]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.0-3.13.2-201402131555.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.13.2-201402131555.patch
CommitLineData
310d4dff
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..e289b9b 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -92,32 +101,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -125,12 +142,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -145,14 +165,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -162,14 +182,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -185,6 +206,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -194,6 +217,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -203,7 +227,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -213,8 +242,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -224,6 +257,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -235,13 +269,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -249,9 +287,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
238index b9e9bd8..bf49b92 100644
239--- a/Documentation/kernel-parameters.txt
240+++ b/Documentation/kernel-parameters.txt
241@@ -1033,6 +1033,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
242 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
243 Default: 1024
244
245+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
246+ ignore grsecurity's /proc restrictions
247+
248+
249 hashdist= [KNL,NUMA] Large hashes allocated during boot
250 are distributed across NUMA nodes. Defaults on
251 for 64-bit NUMA, off otherwise.
252@@ -2018,6 +2022,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
253 noexec=on: enable non-executable mappings (default)
254 noexec=off: disable non-executable mappings
255
256+ nopcid [X86-64]
257+ Disable PCID (Process-Context IDentifier) even if it
258+ is supported by the processor.
259+
260 nosmap [X86]
261 Disable SMAP (Supervisor Mode Access Prevention)
262 even if it is supported by processor.
263@@ -2285,6 +2293,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
264 the specified number of seconds. This is to be used if
265 your oopses keep scrolling off the screen.
266
267+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
268+ virtualization environments that don't cope well with the
269+ expand down segment used by UDEREF on X86-32 or the frequent
270+ page table updates on X86-64.
271+
272+ pax_sanitize_slab=
273+ 0/1 to disable/enable slab object sanitization (enabled by
274+ default).
275+
276+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
277+
278+ pax_extra_latent_entropy
279+ Enable a very simple form of latent entropy extraction
280+ from the first 4GB of memory as the bootmem allocator
281+ passes the memory pages to the buddy allocator.
282+
283+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
284+ when the processor supports PCID.
285+
286 pcbit= [HW,ISDN]
287
288 pcd. [PARIDE]
289diff --git a/Makefile b/Makefile
290index a7fd5d9..dc8e4db 100644
291--- a/Makefile
292+++ b/Makefile
293@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
294
295 HOSTCC = gcc
296 HOSTCXX = g++
297-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
298-HOSTCXXFLAGS = -O2
299+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
300+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
301+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
302
303 # Decide whether to build built-in, modular, or both.
304 # Normally, just do built-in.
305@@ -311,9 +312,15 @@ endif
306 # If the user is running make -s (silent mode), suppress echoing of
307 # commands
308
309+ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
310+ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
311+ quiet=silent_
312+endif
313+else # make-3.8x
314 ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
315 quiet=silent_
316 endif
317+endif
318
319 export quiet Q KBUILD_VERBOSE
320
321@@ -417,8 +424,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
322 # Rules shared between *config targets and build targets
323
324 # Basic helpers built in scripts/
325-PHONY += scripts_basic
326-scripts_basic:
327+PHONY += scripts_basic gcc-plugins
328+scripts_basic: gcc-plugins
329 $(Q)$(MAKE) $(build)=scripts/basic
330 $(Q)rm -f .tmp_quiet_recordmcount
331
332@@ -579,6 +586,72 @@ else
333 KBUILD_CFLAGS += -O2
334 endif
335
336+ifndef DISABLE_PAX_PLUGINS
337+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
338+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
339+else
340+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
341+endif
342+ifneq ($(PLUGINCC),)
343+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
344+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
345+endif
346+ifdef CONFIG_PAX_MEMORY_STACKLEAK
347+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
348+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
349+endif
350+ifdef CONFIG_KALLOCSTAT_PLUGIN
351+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
352+endif
353+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
354+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
355+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
356+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
357+endif
358+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
359+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
360+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
361+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
362+endif
363+endif
364+ifdef CONFIG_CHECKER_PLUGIN
365+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
366+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
367+endif
368+endif
369+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
370+ifdef CONFIG_PAX_SIZE_OVERFLOW
371+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
372+endif
373+ifdef CONFIG_PAX_LATENT_ENTROPY
374+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
375+endif
376+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
377+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
378+endif
379+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
380+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
381+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
382+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
383+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
384+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
385+ifeq ($(KBUILD_EXTMOD),)
386+gcc-plugins:
387+ $(Q)$(MAKE) $(build)=tools/gcc
388+else
389+gcc-plugins: ;
390+endif
391+else
392+gcc-plugins:
393+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
394+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
395+else
396+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
397+endif
398+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
399+endif
400+endif
401+
402 include $(srctree)/arch/$(SRCARCH)/Makefile
403
404 ifdef CONFIG_READABLE_ASM
405@@ -619,7 +692,7 @@ endif
406
407 ifdef CONFIG_DEBUG_INFO
408 KBUILD_CFLAGS += -g
409-KBUILD_AFLAGS += -gdwarf-2
410+KBUILD_AFLAGS += -Wa,--gdwarf-2
411 endif
412
413 ifdef CONFIG_DEBUG_INFO_REDUCED
414@@ -754,7 +827,7 @@ export mod_sign_cmd
415
416
417 ifeq ($(KBUILD_EXTMOD),)
418-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
419+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
420
421 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
422 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
423@@ -803,6 +876,8 @@ endif
424
425 # The actual objects are generated when descending,
426 # make sure no implicit rule kicks in
427+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
428+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
429 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
430
431 # Handle descending into subdirectories listed in $(vmlinux-dirs)
432@@ -812,7 +887,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
433 # Error messages still appears in the original language
434
435 PHONY += $(vmlinux-dirs)
436-$(vmlinux-dirs): prepare scripts
437+$(vmlinux-dirs): gcc-plugins prepare scripts
438 $(Q)$(MAKE) $(build)=$@
439
440 define filechk_kernel.release
441@@ -855,10 +930,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
442
443 archprepare: archheaders archscripts prepare1 scripts_basic
444
445+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
446+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
447 prepare0: archprepare FORCE
448 $(Q)$(MAKE) $(build)=.
449
450 # All the preparing..
451+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
452 prepare: prepare0
453
454 # Generate some files
455@@ -966,6 +1044,8 @@ all: modules
456 # using awk while concatenating to the final file.
457
458 PHONY += modules
459+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
460+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
461 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
462 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
463 @$(kecho) ' Building modules, stage 2.';
464@@ -981,7 +1061,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
465
466 # Target to prepare building external modules
467 PHONY += modules_prepare
468-modules_prepare: prepare scripts
469+modules_prepare: gcc-plugins prepare scripts
470
471 # Target to install modules
472 PHONY += modules_install
473@@ -1047,7 +1127,8 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
474 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
475 signing_key.priv signing_key.x509 x509.genkey \
476 extra_certificates signing_key.x509.keyid \
477- signing_key.x509.signer
478+ signing_key.x509.signer tools/gcc/size_overflow_hash.h \
479+ tools/gcc/randomize_layout_seed.h
480
481 # clean - Delete most, but leave enough to build external modules
482 #
483@@ -1087,6 +1168,7 @@ distclean: mrproper
484 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
485 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
486 -o -name '.*.rej' \
487+ -o -name '.*.rej' -o -name '*.so' \
488 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
489 -type f -print | xargs rm -f
490
491@@ -1248,6 +1330,8 @@ PHONY += $(module-dirs) modules
492 $(module-dirs): crmodverdir $(objtree)/Module.symvers
493 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
494
495+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
496+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
497 modules: $(module-dirs)
498 @$(kecho) ' Building modules, stage 2.';
499 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
500@@ -1387,17 +1471,21 @@ else
501 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
502 endif
503
504-%.s: %.c prepare scripts FORCE
505+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
506+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
507+%.s: %.c gcc-plugins prepare scripts FORCE
508 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
509 %.i: %.c prepare scripts FORCE
510 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
511-%.o: %.c prepare scripts FORCE
512+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
513+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
514+%.o: %.c gcc-plugins prepare scripts FORCE
515 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
516 %.lst: %.c prepare scripts FORCE
517 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
518-%.s: %.S prepare scripts FORCE
519+%.s: %.S gcc-plugins prepare scripts FORCE
520 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
521-%.o: %.S prepare scripts FORCE
522+%.o: %.S gcc-plugins prepare scripts FORCE
523 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
524 %.symtypes: %.c prepare scripts FORCE
525 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
526@@ -1407,11 +1495,15 @@ endif
527 $(cmd_crmodverdir)
528 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
529 $(build)=$(build-dir)
530-%/: prepare scripts FORCE
531+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
532+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
533+%/: gcc-plugins prepare scripts FORCE
534 $(cmd_crmodverdir)
535 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
536 $(build)=$(build-dir)
537-%.ko: prepare scripts FORCE
538+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
539+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
540+%.ko: gcc-plugins prepare scripts FORCE
541 $(cmd_crmodverdir)
542 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
543 $(build)=$(build-dir) $(@:.ko=.o)
544diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
545index 78b03ef..da28a51 100644
546--- a/arch/alpha/include/asm/atomic.h
547+++ b/arch/alpha/include/asm/atomic.h
548@@ -292,6 +292,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
549 #define atomic_dec(v) atomic_sub(1,(v))
550 #define atomic64_dec(v) atomic64_sub(1,(v))
551
552+#define atomic64_read_unchecked(v) atomic64_read(v)
553+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
554+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
555+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
556+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
557+#define atomic64_inc_unchecked(v) atomic64_inc(v)
558+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
559+#define atomic64_dec_unchecked(v) atomic64_dec(v)
560+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
561+
562 #define smp_mb__before_atomic_dec() smp_mb()
563 #define smp_mb__after_atomic_dec() smp_mb()
564 #define smp_mb__before_atomic_inc() smp_mb()
565diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
566index ad368a9..fbe0f25 100644
567--- a/arch/alpha/include/asm/cache.h
568+++ b/arch/alpha/include/asm/cache.h
569@@ -4,19 +4,19 @@
570 #ifndef __ARCH_ALPHA_CACHE_H
571 #define __ARCH_ALPHA_CACHE_H
572
573+#include <linux/const.h>
574
575 /* Bytes per L1 (data) cache line. */
576 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
577-# define L1_CACHE_BYTES 64
578 # define L1_CACHE_SHIFT 6
579 #else
580 /* Both EV4 and EV5 are write-through, read-allocate,
581 direct-mapped, physical.
582 */
583-# define L1_CACHE_BYTES 32
584 # define L1_CACHE_SHIFT 5
585 #endif
586
587+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
588 #define SMP_CACHE_BYTES L1_CACHE_BYTES
589
590 #endif
591diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
592index 968d999..d36b2df 100644
593--- a/arch/alpha/include/asm/elf.h
594+++ b/arch/alpha/include/asm/elf.h
595@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
596
597 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
598
599+#ifdef CONFIG_PAX_ASLR
600+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
601+
602+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
603+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
604+#endif
605+
606 /* $0 is set by ld.so to a pointer to a function which might be
607 registered using atexit. This provides a mean for the dynamic
608 linker to call DT_FINI functions for shared libraries that have
609diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
610index aab14a0..b4fa3e7 100644
611--- a/arch/alpha/include/asm/pgalloc.h
612+++ b/arch/alpha/include/asm/pgalloc.h
613@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
614 pgd_set(pgd, pmd);
615 }
616
617+static inline void
618+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
619+{
620+ pgd_populate(mm, pgd, pmd);
621+}
622+
623 extern pgd_t *pgd_alloc(struct mm_struct *mm);
624
625 static inline void
626diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
627index d8f9b7e..f6222fa 100644
628--- a/arch/alpha/include/asm/pgtable.h
629+++ b/arch/alpha/include/asm/pgtable.h
630@@ -102,6 +102,17 @@ struct vm_area_struct;
631 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
632 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
633 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
634+
635+#ifdef CONFIG_PAX_PAGEEXEC
636+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
637+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
638+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
639+#else
640+# define PAGE_SHARED_NOEXEC PAGE_SHARED
641+# define PAGE_COPY_NOEXEC PAGE_COPY
642+# define PAGE_READONLY_NOEXEC PAGE_READONLY
643+#endif
644+
645 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
646
647 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
648diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
649index 2fd00b7..cfd5069 100644
650--- a/arch/alpha/kernel/module.c
651+++ b/arch/alpha/kernel/module.c
652@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
653
654 /* The small sections were sorted to the end of the segment.
655 The following should definitely cover them. */
656- gp = (u64)me->module_core + me->core_size - 0x8000;
657+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
658 got = sechdrs[me->arch.gotsecindex].sh_addr;
659
660 for (i = 0; i < n; i++) {
661diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
662index 1402fcc..0b1abd2 100644
663--- a/arch/alpha/kernel/osf_sys.c
664+++ b/arch/alpha/kernel/osf_sys.c
665@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
666 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
667
668 static unsigned long
669-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
670- unsigned long limit)
671+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
672+ unsigned long limit, unsigned long flags)
673 {
674 struct vm_unmapped_area_info info;
675+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
676
677 info.flags = 0;
678 info.length = len;
679@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
680 info.high_limit = limit;
681 info.align_mask = 0;
682 info.align_offset = 0;
683+ info.threadstack_offset = offset;
684 return vm_unmapped_area(&info);
685 }
686
687@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
688 merely specific addresses, but regions of memory -- perhaps
689 this feature should be incorporated into all ports? */
690
691+#ifdef CONFIG_PAX_RANDMMAP
692+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
693+#endif
694+
695 if (addr) {
696- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
697+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
698 if (addr != (unsigned long) -ENOMEM)
699 return addr;
700 }
701
702 /* Next, try allocating at TASK_UNMAPPED_BASE. */
703- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
704- len, limit);
705+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
706+
707 if (addr != (unsigned long) -ENOMEM)
708 return addr;
709
710 /* Finally, try allocating in low memory. */
711- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
712+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
713
714 return addr;
715 }
716diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
717index 98838a0..b304fb4 100644
718--- a/arch/alpha/mm/fault.c
719+++ b/arch/alpha/mm/fault.c
720@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
721 __reload_thread(pcb);
722 }
723
724+#ifdef CONFIG_PAX_PAGEEXEC
725+/*
726+ * PaX: decide what to do with offenders (regs->pc = fault address)
727+ *
728+ * returns 1 when task should be killed
729+ * 2 when patched PLT trampoline was detected
730+ * 3 when unpatched PLT trampoline was detected
731+ */
732+static int pax_handle_fetch_fault(struct pt_regs *regs)
733+{
734+
735+#ifdef CONFIG_PAX_EMUPLT
736+ int err;
737+
738+ do { /* PaX: patched PLT emulation #1 */
739+ unsigned int ldah, ldq, jmp;
740+
741+ err = get_user(ldah, (unsigned int *)regs->pc);
742+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
743+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
744+
745+ if (err)
746+ break;
747+
748+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
749+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
750+ jmp == 0x6BFB0000U)
751+ {
752+ unsigned long r27, addr;
753+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
754+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
755+
756+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
757+ err = get_user(r27, (unsigned long *)addr);
758+ if (err)
759+ break;
760+
761+ regs->r27 = r27;
762+ regs->pc = r27;
763+ return 2;
764+ }
765+ } while (0);
766+
767+ do { /* PaX: patched PLT emulation #2 */
768+ unsigned int ldah, lda, br;
769+
770+ err = get_user(ldah, (unsigned int *)regs->pc);
771+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
772+ err |= get_user(br, (unsigned int *)(regs->pc+8));
773+
774+ if (err)
775+ break;
776+
777+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
778+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
779+ (br & 0xFFE00000U) == 0xC3E00000U)
780+ {
781+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
782+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
783+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
784+
785+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
786+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
787+ return 2;
788+ }
789+ } while (0);
790+
791+ do { /* PaX: unpatched PLT emulation */
792+ unsigned int br;
793+
794+ err = get_user(br, (unsigned int *)regs->pc);
795+
796+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
797+ unsigned int br2, ldq, nop, jmp;
798+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
799+
800+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
801+ err = get_user(br2, (unsigned int *)addr);
802+ err |= get_user(ldq, (unsigned int *)(addr+4));
803+ err |= get_user(nop, (unsigned int *)(addr+8));
804+ err |= get_user(jmp, (unsigned int *)(addr+12));
805+ err |= get_user(resolver, (unsigned long *)(addr+16));
806+
807+ if (err)
808+ break;
809+
810+ if (br2 == 0xC3600000U &&
811+ ldq == 0xA77B000CU &&
812+ nop == 0x47FF041FU &&
813+ jmp == 0x6B7B0000U)
814+ {
815+ regs->r28 = regs->pc+4;
816+ regs->r27 = addr+16;
817+ regs->pc = resolver;
818+ return 3;
819+ }
820+ }
821+ } while (0);
822+#endif
823+
824+ return 1;
825+}
826+
827+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
828+{
829+ unsigned long i;
830+
831+ printk(KERN_ERR "PAX: bytes at PC: ");
832+ for (i = 0; i < 5; i++) {
833+ unsigned int c;
834+ if (get_user(c, (unsigned int *)pc+i))
835+ printk(KERN_CONT "???????? ");
836+ else
837+ printk(KERN_CONT "%08x ", c);
838+ }
839+ printk("\n");
840+}
841+#endif
842
843 /*
844 * This routine handles page faults. It determines the address,
845@@ -133,8 +251,29 @@ retry:
846 good_area:
847 si_code = SEGV_ACCERR;
848 if (cause < 0) {
849- if (!(vma->vm_flags & VM_EXEC))
850+ if (!(vma->vm_flags & VM_EXEC)) {
851+
852+#ifdef CONFIG_PAX_PAGEEXEC
853+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
854+ goto bad_area;
855+
856+ up_read(&mm->mmap_sem);
857+ switch (pax_handle_fetch_fault(regs)) {
858+
859+#ifdef CONFIG_PAX_EMUPLT
860+ case 2:
861+ case 3:
862+ return;
863+#endif
864+
865+ }
866+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
867+ do_group_exit(SIGKILL);
868+#else
869 goto bad_area;
870+#endif
871+
872+ }
873 } else if (!cause) {
874 /* Allow reads even for write-only mappings */
875 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
876diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
877index c1f1a7e..554b0cd 100644
878--- a/arch/arm/Kconfig
879+++ b/arch/arm/Kconfig
880@@ -1828,7 +1828,7 @@ config ALIGNMENT_TRAP
881
882 config UACCESS_WITH_MEMCPY
883 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
884- depends on MMU
885+ depends on MMU && !PAX_MEMORY_UDEREF
886 default y if CPU_FEROCEON
887 help
888 Implement faster copy_to_user and clear_user methods for CPU
889@@ -2100,6 +2100,7 @@ config XIP_PHYS_ADDR
890 config KEXEC
891 bool "Kexec system call (EXPERIMENTAL)"
892 depends on (!SMP || PM_SLEEP_SMP)
893+ depends on !GRKERNSEC_KMEM
894 help
895 kexec is a system call that implements the ability to shutdown your
896 current kernel, and to start another kernel. It is like a reboot
897diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
898index 62d2cb5..7a13651 100644
899--- a/arch/arm/include/asm/atomic.h
900+++ b/arch/arm/include/asm/atomic.h
901@@ -18,17 +18,35 @@
902 #include <asm/barrier.h>
903 #include <asm/cmpxchg.h>
904
905+#ifdef CONFIG_GENERIC_ATOMIC64
906+#include <asm-generic/atomic64.h>
907+#endif
908+
909 #define ATOMIC_INIT(i) { (i) }
910
911 #ifdef __KERNEL__
912
913+#define _ASM_EXTABLE(from, to) \
914+" .pushsection __ex_table,\"a\"\n"\
915+" .align 3\n" \
916+" .long " #from ", " #to"\n" \
917+" .popsection"
918+
919 /*
920 * On ARM, ordinary assignment (str instruction) doesn't clear the local
921 * strex/ldrex monitor on some implementations. The reason we can use it for
922 * atomic_set() is the clrex or dummy strex done on every exception return.
923 */
924 #define atomic_read(v) (*(volatile int *)&(v)->counter)
925+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
926+{
927+ return v->counter;
928+}
929 #define atomic_set(v,i) (((v)->counter) = (i))
930+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
931+{
932+ v->counter = i;
933+}
934
935 #if __LINUX_ARM_ARCH__ >= 6
936
937@@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v)
938
939 prefetchw(&v->counter);
940 __asm__ __volatile__("@ atomic_add\n"
941+"1: ldrex %1, [%3]\n"
942+" adds %0, %1, %4\n"
943+
944+#ifdef CONFIG_PAX_REFCOUNT
945+" bvc 3f\n"
946+"2: bkpt 0xf103\n"
947+"3:\n"
948+#endif
949+
950+" strex %1, %0, [%3]\n"
951+" teq %1, #0\n"
952+" bne 1b"
953+
954+#ifdef CONFIG_PAX_REFCOUNT
955+"\n4:\n"
956+ _ASM_EXTABLE(2b, 4b)
957+#endif
958+
959+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
960+ : "r" (&v->counter), "Ir" (i)
961+ : "cc");
962+}
963+
964+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
965+{
966+ unsigned long tmp;
967+ int result;
968+
969+ prefetchw(&v->counter);
970+ __asm__ __volatile__("@ atomic_add_unchecked\n"
971 "1: ldrex %0, [%3]\n"
972 " add %0, %0, %4\n"
973 " strex %1, %0, [%3]\n"
974@@ -62,6 +110,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
975 smp_mb();
976
977 __asm__ __volatile__("@ atomic_add_return\n"
978+"1: ldrex %1, [%3]\n"
979+" adds %0, %1, %4\n"
980+
981+#ifdef CONFIG_PAX_REFCOUNT
982+" bvc 3f\n"
983+" mov %0, %1\n"
984+"2: bkpt 0xf103\n"
985+"3:\n"
986+#endif
987+
988+" strex %1, %0, [%3]\n"
989+" teq %1, #0\n"
990+" bne 1b"
991+
992+#ifdef CONFIG_PAX_REFCOUNT
993+"\n4:\n"
994+ _ASM_EXTABLE(2b, 4b)
995+#endif
996+
997+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
998+ : "r" (&v->counter), "Ir" (i)
999+ : "cc");
1000+
1001+ smp_mb();
1002+
1003+ return result;
1004+}
1005+
1006+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1007+{
1008+ unsigned long tmp;
1009+ int result;
1010+
1011+ smp_mb();
1012+
1013+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
1014 "1: ldrex %0, [%3]\n"
1015 " add %0, %0, %4\n"
1016 " strex %1, %0, [%3]\n"
1017@@ -83,6 +167,36 @@ static inline void atomic_sub(int i, atomic_t *v)
1018
1019 prefetchw(&v->counter);
1020 __asm__ __volatile__("@ atomic_sub\n"
1021+"1: ldrex %1, [%3]\n"
1022+" subs %0, %1, %4\n"
1023+
1024+#ifdef CONFIG_PAX_REFCOUNT
1025+" bvc 3f\n"
1026+"2: bkpt 0xf103\n"
1027+"3:\n"
1028+#endif
1029+
1030+" strex %1, %0, [%3]\n"
1031+" teq %1, #0\n"
1032+" bne 1b"
1033+
1034+#ifdef CONFIG_PAX_REFCOUNT
1035+"\n4:\n"
1036+ _ASM_EXTABLE(2b, 4b)
1037+#endif
1038+
1039+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1040+ : "r" (&v->counter), "Ir" (i)
1041+ : "cc");
1042+}
1043+
1044+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1045+{
1046+ unsigned long tmp;
1047+ int result;
1048+
1049+ prefetchw(&v->counter);
1050+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1051 "1: ldrex %0, [%3]\n"
1052 " sub %0, %0, %4\n"
1053 " strex %1, %0, [%3]\n"
1054@@ -101,11 +215,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1055 smp_mb();
1056
1057 __asm__ __volatile__("@ atomic_sub_return\n"
1058-"1: ldrex %0, [%3]\n"
1059-" sub %0, %0, %4\n"
1060+"1: ldrex %1, [%3]\n"
1061+" subs %0, %1, %4\n"
1062+
1063+#ifdef CONFIG_PAX_REFCOUNT
1064+" bvc 3f\n"
1065+" mov %0, %1\n"
1066+"2: bkpt 0xf103\n"
1067+"3:\n"
1068+#endif
1069+
1070 " strex %1, %0, [%3]\n"
1071 " teq %1, #0\n"
1072 " bne 1b"
1073+
1074+#ifdef CONFIG_PAX_REFCOUNT
1075+"\n4:\n"
1076+ _ASM_EXTABLE(2b, 4b)
1077+#endif
1078+
1079 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1080 : "r" (&v->counter), "Ir" (i)
1081 : "cc");
1082@@ -138,6 +266,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1083 return oldval;
1084 }
1085
1086+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1087+{
1088+ unsigned long oldval, res;
1089+
1090+ smp_mb();
1091+
1092+ do {
1093+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1094+ "ldrex %1, [%3]\n"
1095+ "mov %0, #0\n"
1096+ "teq %1, %4\n"
1097+ "strexeq %0, %5, [%3]\n"
1098+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1099+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1100+ : "cc");
1101+ } while (res);
1102+
1103+ smp_mb();
1104+
1105+ return oldval;
1106+}
1107+
1108 #else /* ARM_ARCH_6 */
1109
1110 #ifdef CONFIG_SMP
1111@@ -156,7 +306,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1112
1113 return val;
1114 }
1115+
1116+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1117+{
1118+ return atomic_add_return(i, v);
1119+}
1120+
1121 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1122+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1123+{
1124+ (void) atomic_add_return(i, v);
1125+}
1126
1127 static inline int atomic_sub_return(int i, atomic_t *v)
1128 {
1129@@ -171,6 +331,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1130 return val;
1131 }
1132 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1133+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1134+{
1135+ (void) atomic_sub_return(i, v);
1136+}
1137
1138 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1139 {
1140@@ -186,9 +350,18 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1141 return ret;
1142 }
1143
1144+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1145+{
1146+ return atomic_cmpxchg(v, old, new);
1147+}
1148+
1149 #endif /* __LINUX_ARM_ARCH__ */
1150
1151 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1152+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1153+{
1154+ return xchg(&v->counter, new);
1155+}
1156
1157 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1158 {
1159@@ -201,11 +374,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1160 }
1161
1162 #define atomic_inc(v) atomic_add(1, v)
1163+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1164+{
1165+ atomic_add_unchecked(1, v);
1166+}
1167 #define atomic_dec(v) atomic_sub(1, v)
1168+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1169+{
1170+ atomic_sub_unchecked(1, v);
1171+}
1172
1173 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1174+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1175+{
1176+ return atomic_add_return_unchecked(1, v) == 0;
1177+}
1178 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1179 #define atomic_inc_return(v) (atomic_add_return(1, v))
1180+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1181+{
1182+ return atomic_add_return_unchecked(1, v);
1183+}
1184 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1185 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1186
1187@@ -221,6 +410,14 @@ typedef struct {
1188 long long counter;
1189 } atomic64_t;
1190
1191+#ifdef CONFIG_PAX_REFCOUNT
1192+typedef struct {
1193+ long long counter;
1194+} atomic64_unchecked_t;
1195+#else
1196+typedef atomic64_t atomic64_unchecked_t;
1197+#endif
1198+
1199 #define ATOMIC64_INIT(i) { (i) }
1200
1201 #ifdef CONFIG_ARM_LPAE
1202@@ -237,6 +434,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1203 return result;
1204 }
1205
1206+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1207+{
1208+ long long result;
1209+
1210+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1211+" ldrd %0, %H0, [%1]"
1212+ : "=&r" (result)
1213+ : "r" (&v->counter), "Qo" (v->counter)
1214+ );
1215+
1216+ return result;
1217+}
1218+
1219 static inline void atomic64_set(atomic64_t *v, long long i)
1220 {
1221 __asm__ __volatile__("@ atomic64_set\n"
1222@@ -245,6 +455,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1223 : "r" (&v->counter), "r" (i)
1224 );
1225 }
1226+
1227+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1228+{
1229+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1230+" strd %2, %H2, [%1]"
1231+ : "=Qo" (v->counter)
1232+ : "r" (&v->counter), "r" (i)
1233+ );
1234+}
1235 #else
1236 static inline long long atomic64_read(const atomic64_t *v)
1237 {
1238@@ -259,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1239 return result;
1240 }
1241
1242+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1243+{
1244+ long long result;
1245+
1246+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1247+" ldrexd %0, %H0, [%1]"
1248+ : "=&r" (result)
1249+ : "r" (&v->counter), "Qo" (v->counter)
1250+ );
1251+
1252+ return result;
1253+}
1254+
1255 static inline void atomic64_set(atomic64_t *v, long long i)
1256 {
1257 long long tmp;
1258@@ -273,6 +505,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1259 : "r" (&v->counter), "r" (i)
1260 : "cc");
1261 }
1262+
1263+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1264+{
1265+ long long tmp;
1266+
1267+ prefetchw(&v->counter);
1268+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1269+"1: ldrexd %0, %H0, [%2]\n"
1270+" strexd %0, %3, %H3, [%2]\n"
1271+" teq %0, #0\n"
1272+" bne 1b"
1273+ : "=&r" (tmp), "=Qo" (v->counter)
1274+ : "r" (&v->counter), "r" (i)
1275+ : "cc");
1276+}
1277 #endif
1278
1279 static inline void atomic64_add(long long i, atomic64_t *v)
1280@@ -284,6 +531,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1281 __asm__ __volatile__("@ atomic64_add\n"
1282 "1: ldrexd %0, %H0, [%3]\n"
1283 " adds %Q0, %Q0, %Q4\n"
1284+" adcs %R0, %R0, %R4\n"
1285+
1286+#ifdef CONFIG_PAX_REFCOUNT
1287+" bvc 3f\n"
1288+"2: bkpt 0xf103\n"
1289+"3:\n"
1290+#endif
1291+
1292+" strexd %1, %0, %H0, [%3]\n"
1293+" teq %1, #0\n"
1294+" bne 1b"
1295+
1296+#ifdef CONFIG_PAX_REFCOUNT
1297+"\n4:\n"
1298+ _ASM_EXTABLE(2b, 4b)
1299+#endif
1300+
1301+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1302+ : "r" (&v->counter), "r" (i)
1303+ : "cc");
1304+}
1305+
1306+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1307+{
1308+ long long result;
1309+ unsigned long tmp;
1310+
1311+ prefetchw(&v->counter);
1312+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1313+"1: ldrexd %0, %H0, [%3]\n"
1314+" adds %Q0, %Q0, %Q4\n"
1315 " adc %R0, %R0, %R4\n"
1316 " strexd %1, %0, %H0, [%3]\n"
1317 " teq %1, #0\n"
1318@@ -303,6 +581,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1319 __asm__ __volatile__("@ atomic64_add_return\n"
1320 "1: ldrexd %0, %H0, [%3]\n"
1321 " adds %Q0, %Q0, %Q4\n"
1322+" adcs %R0, %R0, %R4\n"
1323+
1324+#ifdef CONFIG_PAX_REFCOUNT
1325+" bvc 3f\n"
1326+" mov %0, %1\n"
1327+" mov %H0, %H1\n"
1328+"2: bkpt 0xf103\n"
1329+"3:\n"
1330+#endif
1331+
1332+" strexd %1, %0, %H0, [%3]\n"
1333+" teq %1, #0\n"
1334+" bne 1b"
1335+
1336+#ifdef CONFIG_PAX_REFCOUNT
1337+"\n4:\n"
1338+ _ASM_EXTABLE(2b, 4b)
1339+#endif
1340+
1341+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1342+ : "r" (&v->counter), "r" (i)
1343+ : "cc");
1344+
1345+ smp_mb();
1346+
1347+ return result;
1348+}
1349+
1350+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1351+{
1352+ long long result;
1353+ unsigned long tmp;
1354+
1355+ smp_mb();
1356+
1357+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1358+"1: ldrexd %0, %H0, [%3]\n"
1359+" adds %Q0, %Q0, %Q4\n"
1360 " adc %R0, %R0, %R4\n"
1361 " strexd %1, %0, %H0, [%3]\n"
1362 " teq %1, #0\n"
1363@@ -325,6 +641,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1364 __asm__ __volatile__("@ atomic64_sub\n"
1365 "1: ldrexd %0, %H0, [%3]\n"
1366 " subs %Q0, %Q0, %Q4\n"
1367+" sbcs %R0, %R0, %R4\n"
1368+
1369+#ifdef CONFIG_PAX_REFCOUNT
1370+" bvc 3f\n"
1371+"2: bkpt 0xf103\n"
1372+"3:\n"
1373+#endif
1374+
1375+" strexd %1, %0, %H0, [%3]\n"
1376+" teq %1, #0\n"
1377+" bne 1b"
1378+
1379+#ifdef CONFIG_PAX_REFCOUNT
1380+"\n4:\n"
1381+ _ASM_EXTABLE(2b, 4b)
1382+#endif
1383+
1384+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1385+ : "r" (&v->counter), "r" (i)
1386+ : "cc");
1387+}
1388+
1389+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1390+{
1391+ long long result;
1392+ unsigned long tmp;
1393+
1394+ prefetchw(&v->counter);
1395+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1396+"1: ldrexd %0, %H0, [%3]\n"
1397+" subs %Q0, %Q0, %Q4\n"
1398 " sbc %R0, %R0, %R4\n"
1399 " strexd %1, %0, %H0, [%3]\n"
1400 " teq %1, #0\n"
1401@@ -344,17 +691,28 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1402 __asm__ __volatile__("@ atomic64_sub_return\n"
1403 "1: ldrexd %0, %H0, [%3]\n"
1404 " subs %Q0, %Q0, %Q4\n"
1405-" sbc %R0, %R0, %R4\n"
1406+" sbcs %R0, %R0, %R4\n"
1407+
1408+#ifdef CONFIG_PAX_REFCOUNT
1409+" bvc 3f\n"
1410+" mov %0, %1\n"
1411+" mov %H0, %H1\n"
1412+"2: bkpt 0xf103\n"
1413+"3:\n"
1414+#endif
1415+
1416 " strexd %1, %0, %H0, [%3]\n"
1417 " teq %1, #0\n"
1418 " bne 1b"
1419+
1420+#ifdef CONFIG_PAX_REFCOUNT
1421+"\n4:\n"
1422+ _ASM_EXTABLE(2b, 4b)
1423+#endif
1424+
1425 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1426 : "r" (&v->counter), "r" (i)
1427 : "cc");
1428-
1429- smp_mb();
1430-
1431- return result;
1432 }
1433
1434 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1435@@ -382,6 +740,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1436 return oldval;
1437 }
1438
1439+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1440+ long long new)
1441+{
1442+ long long oldval;
1443+ unsigned long res;
1444+
1445+ smp_mb();
1446+
1447+ do {
1448+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1449+ "ldrexd %1, %H1, [%3]\n"
1450+ "mov %0, #0\n"
1451+ "teq %1, %4\n"
1452+ "teqeq %H1, %H4\n"
1453+ "strexdeq %0, %5, %H5, [%3]"
1454+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1455+ : "r" (&ptr->counter), "r" (old), "r" (new)
1456+ : "cc");
1457+ } while (res);
1458+
1459+ smp_mb();
1460+
1461+ return oldval;
1462+}
1463+
1464 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1465 {
1466 long long result;
1467@@ -406,20 +789,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1468 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1469 {
1470 long long result;
1471- unsigned long tmp;
1472+ u64 tmp;
1473
1474 smp_mb();
1475
1476 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1477-"1: ldrexd %0, %H0, [%3]\n"
1478-" subs %Q0, %Q0, #1\n"
1479-" sbc %R0, %R0, #0\n"
1480+"1: ldrexd %1, %H1, [%3]\n"
1481+" subs %Q0, %Q1, #1\n"
1482+" sbcs %R0, %R1, #0\n"
1483+
1484+#ifdef CONFIG_PAX_REFCOUNT
1485+" bvc 3f\n"
1486+" mov %Q0, %Q1\n"
1487+" mov %R0, %R1\n"
1488+"2: bkpt 0xf103\n"
1489+"3:\n"
1490+#endif
1491+
1492 " teq %R0, #0\n"
1493-" bmi 2f\n"
1494+" bmi 4f\n"
1495 " strexd %1, %0, %H0, [%3]\n"
1496 " teq %1, #0\n"
1497 " bne 1b\n"
1498-"2:"
1499+"4:\n"
1500+
1501+#ifdef CONFIG_PAX_REFCOUNT
1502+ _ASM_EXTABLE(2b, 4b)
1503+#endif
1504+
1505 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1506 : "r" (&v->counter)
1507 : "cc");
1508@@ -442,13 +839,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1509 " teq %0, %5\n"
1510 " teqeq %H0, %H5\n"
1511 " moveq %1, #0\n"
1512-" beq 2f\n"
1513+" beq 4f\n"
1514 " adds %Q0, %Q0, %Q6\n"
1515-" adc %R0, %R0, %R6\n"
1516+" adcs %R0, %R0, %R6\n"
1517+
1518+#ifdef CONFIG_PAX_REFCOUNT
1519+" bvc 3f\n"
1520+"2: bkpt 0xf103\n"
1521+"3:\n"
1522+#endif
1523+
1524 " strexd %2, %0, %H0, [%4]\n"
1525 " teq %2, #0\n"
1526 " bne 1b\n"
1527-"2:"
1528+"4:\n"
1529+
1530+#ifdef CONFIG_PAX_REFCOUNT
1531+ _ASM_EXTABLE(2b, 4b)
1532+#endif
1533+
1534 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1535 : "r" (&v->counter), "r" (u), "r" (a)
1536 : "cc");
1537@@ -461,10 +870,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1538
1539 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1540 #define atomic64_inc(v) atomic64_add(1LL, (v))
1541+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1542 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1543+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1544 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1545 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1546 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1547+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1548 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1549 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1550 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1551diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1552index 75fe66b..ba3dee4 100644
1553--- a/arch/arm/include/asm/cache.h
1554+++ b/arch/arm/include/asm/cache.h
1555@@ -4,8 +4,10 @@
1556 #ifndef __ASMARM_CACHE_H
1557 #define __ASMARM_CACHE_H
1558
1559+#include <linux/const.h>
1560+
1561 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1562-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1563+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1564
1565 /*
1566 * Memory returned by kmalloc() may be used for DMA, so we must make
1567@@ -24,5 +26,6 @@
1568 #endif
1569
1570 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1571+#define __read_only __attribute__ ((__section__(".data..read_only")))
1572
1573 #endif
1574diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1575index ee753f1..c9c30a5 100644
1576--- a/arch/arm/include/asm/cacheflush.h
1577+++ b/arch/arm/include/asm/cacheflush.h
1578@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1579 void (*dma_unmap_area)(const void *, size_t, int);
1580
1581 void (*dma_flush_range)(const void *, const void *);
1582-};
1583+} __no_const;
1584
1585 /*
1586 * Select the calling method
1587diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1588index 6dcc164..b14d917 100644
1589--- a/arch/arm/include/asm/checksum.h
1590+++ b/arch/arm/include/asm/checksum.h
1591@@ -37,7 +37,19 @@ __wsum
1592 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1593
1594 __wsum
1595-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1596+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1597+
1598+static inline __wsum
1599+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1600+{
1601+ __wsum ret;
1602+ pax_open_userland();
1603+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1604+ pax_close_userland();
1605+ return ret;
1606+}
1607+
1608+
1609
1610 /*
1611 * Fold a partial checksum without adding pseudo headers
1612diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1613index df2fbba..63fe3e1 100644
1614--- a/arch/arm/include/asm/cmpxchg.h
1615+++ b/arch/arm/include/asm/cmpxchg.h
1616@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1617
1618 #define xchg(ptr,x) \
1619 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1620+#define xchg_unchecked(ptr,x) \
1621+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1622
1623 #include <asm-generic/cmpxchg-local.h>
1624
1625diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1626index 6ddbe44..b5e38b1 100644
1627--- a/arch/arm/include/asm/domain.h
1628+++ b/arch/arm/include/asm/domain.h
1629@@ -48,18 +48,37 @@
1630 * Domain types
1631 */
1632 #define DOMAIN_NOACCESS 0
1633-#define DOMAIN_CLIENT 1
1634 #ifdef CONFIG_CPU_USE_DOMAINS
1635+#define DOMAIN_USERCLIENT 1
1636+#define DOMAIN_KERNELCLIENT 1
1637 #define DOMAIN_MANAGER 3
1638+#define DOMAIN_VECTORS DOMAIN_USER
1639 #else
1640+
1641+#ifdef CONFIG_PAX_KERNEXEC
1642 #define DOMAIN_MANAGER 1
1643+#define DOMAIN_KERNEXEC 3
1644+#else
1645+#define DOMAIN_MANAGER 1
1646+#endif
1647+
1648+#ifdef CONFIG_PAX_MEMORY_UDEREF
1649+#define DOMAIN_USERCLIENT 0
1650+#define DOMAIN_UDEREF 1
1651+#define DOMAIN_VECTORS DOMAIN_KERNEL
1652+#else
1653+#define DOMAIN_USERCLIENT 1
1654+#define DOMAIN_VECTORS DOMAIN_USER
1655+#endif
1656+#define DOMAIN_KERNELCLIENT 1
1657+
1658 #endif
1659
1660 #define domain_val(dom,type) ((type) << (2*(dom)))
1661
1662 #ifndef __ASSEMBLY__
1663
1664-#ifdef CONFIG_CPU_USE_DOMAINS
1665+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1666 static inline void set_domain(unsigned val)
1667 {
1668 asm volatile(
1669@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1670 isb();
1671 }
1672
1673-#define modify_domain(dom,type) \
1674- do { \
1675- struct thread_info *thread = current_thread_info(); \
1676- unsigned int domain = thread->cpu_domain; \
1677- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1678- thread->cpu_domain = domain | domain_val(dom, type); \
1679- set_domain(thread->cpu_domain); \
1680- } while (0)
1681-
1682+extern void modify_domain(unsigned int dom, unsigned int type);
1683 #else
1684 static inline void set_domain(unsigned val) { }
1685 static inline void modify_domain(unsigned dom, unsigned type) { }
1686diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1687index f4b46d3..abc9b2b 100644
1688--- a/arch/arm/include/asm/elf.h
1689+++ b/arch/arm/include/asm/elf.h
1690@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1691 the loader. We need to make sure that it is out of the way of the program
1692 that it will "exec", and that there is sufficient room for the brk. */
1693
1694-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1695+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1696+
1697+#ifdef CONFIG_PAX_ASLR
1698+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1699+
1700+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1701+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1702+#endif
1703
1704 /* When the program starts, a1 contains a pointer to a function to be
1705 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1706@@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1707 extern void elf_set_personality(const struct elf32_hdr *);
1708 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1709
1710-struct mm_struct;
1711-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1712-#define arch_randomize_brk arch_randomize_brk
1713-
1714 #ifdef CONFIG_MMU
1715 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1716 struct linux_binprm;
1717diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1718index de53547..52b9a28 100644
1719--- a/arch/arm/include/asm/fncpy.h
1720+++ b/arch/arm/include/asm/fncpy.h
1721@@ -81,7 +81,9 @@
1722 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1723 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1724 \
1725+ pax_open_kernel(); \
1726 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1727+ pax_close_kernel(); \
1728 flush_icache_range((unsigned long)(dest_buf), \
1729 (unsigned long)(dest_buf) + (size)); \
1730 \
1731diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1732index e42cf59..7b94b8f 100644
1733--- a/arch/arm/include/asm/futex.h
1734+++ b/arch/arm/include/asm/futex.h
1735@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1736 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1737 return -EFAULT;
1738
1739+ pax_open_userland();
1740+
1741 smp_mb();
1742 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1743 "1: ldrex %1, [%4]\n"
1744@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1745 : "cc", "memory");
1746 smp_mb();
1747
1748+ pax_close_userland();
1749+
1750 *uval = val;
1751 return ret;
1752 }
1753@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1754 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1755 return -EFAULT;
1756
1757+ pax_open_userland();
1758+
1759 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1760 "1: " TUSER(ldr) " %1, [%4]\n"
1761 " teq %1, %2\n"
1762@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1763 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1764 : "cc", "memory");
1765
1766+ pax_close_userland();
1767+
1768 *uval = val;
1769 return ret;
1770 }
1771@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1772 return -EFAULT;
1773
1774 pagefault_disable(); /* implies preempt_disable() */
1775+ pax_open_userland();
1776
1777 switch (op) {
1778 case FUTEX_OP_SET:
1779@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1780 ret = -ENOSYS;
1781 }
1782
1783+ pax_close_userland();
1784 pagefault_enable(); /* subsumes preempt_enable() */
1785
1786 if (!ret) {
1787diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1788index 83eb2f7..ed77159 100644
1789--- a/arch/arm/include/asm/kmap_types.h
1790+++ b/arch/arm/include/asm/kmap_types.h
1791@@ -4,6 +4,6 @@
1792 /*
1793 * This is the "bare minimum". AIO seems to require this.
1794 */
1795-#define KM_TYPE_NR 16
1796+#define KM_TYPE_NR 17
1797
1798 #endif
1799diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1800index 9e614a1..3302cca 100644
1801--- a/arch/arm/include/asm/mach/dma.h
1802+++ b/arch/arm/include/asm/mach/dma.h
1803@@ -22,7 +22,7 @@ struct dma_ops {
1804 int (*residue)(unsigned int, dma_t *); /* optional */
1805 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1806 const char *type;
1807-};
1808+} __do_const;
1809
1810 struct dma_struct {
1811 void *addr; /* single DMA address */
1812diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1813index 2fe141f..192dc01 100644
1814--- a/arch/arm/include/asm/mach/map.h
1815+++ b/arch/arm/include/asm/mach/map.h
1816@@ -27,13 +27,16 @@ struct map_desc {
1817 #define MT_MINICLEAN 6
1818 #define MT_LOW_VECTORS 7
1819 #define MT_HIGH_VECTORS 8
1820-#define MT_MEMORY 9
1821+#define MT_MEMORY_RWX 9
1822 #define MT_ROM 10
1823-#define MT_MEMORY_NONCACHED 11
1824+#define MT_MEMORY_NONCACHED_RX 11
1825 #define MT_MEMORY_DTCM 12
1826 #define MT_MEMORY_ITCM 13
1827 #define MT_MEMORY_SO 14
1828 #define MT_MEMORY_DMA_READY 15
1829+#define MT_MEMORY_RW 16
1830+#define MT_MEMORY_RX 17
1831+#define MT_MEMORY_NONCACHED_RW 18
1832
1833 #ifdef CONFIG_MMU
1834 extern void iotable_init(struct map_desc *, int);
1835diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1836index f94784f..9a09a4a 100644
1837--- a/arch/arm/include/asm/outercache.h
1838+++ b/arch/arm/include/asm/outercache.h
1839@@ -35,7 +35,7 @@ struct outer_cache_fns {
1840 #endif
1841 void (*set_debug)(unsigned long);
1842 void (*resume)(void);
1843-};
1844+} __no_const;
1845
1846 extern struct outer_cache_fns outer_cache;
1847
1848diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1849index 4355f0e..c229913 100644
1850--- a/arch/arm/include/asm/page.h
1851+++ b/arch/arm/include/asm/page.h
1852@@ -114,7 +114,7 @@ struct cpu_user_fns {
1853 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1854 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1855 unsigned long vaddr, struct vm_area_struct *vma);
1856-};
1857+} __no_const;
1858
1859 #ifdef MULTI_USER
1860 extern struct cpu_user_fns cpu_user;
1861diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1862index 78a7793..e3dc06c 100644
1863--- a/arch/arm/include/asm/pgalloc.h
1864+++ b/arch/arm/include/asm/pgalloc.h
1865@@ -17,6 +17,7 @@
1866 #include <asm/processor.h>
1867 #include <asm/cacheflush.h>
1868 #include <asm/tlbflush.h>
1869+#include <asm/system_info.h>
1870
1871 #define check_pgt_cache() do { } while (0)
1872
1873@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1874 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1875 }
1876
1877+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1878+{
1879+ pud_populate(mm, pud, pmd);
1880+}
1881+
1882 #else /* !CONFIG_ARM_LPAE */
1883
1884 /*
1885@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1886 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1887 #define pmd_free(mm, pmd) do { } while (0)
1888 #define pud_populate(mm,pmd,pte) BUG()
1889+#define pud_populate_kernel(mm,pmd,pte) BUG()
1890
1891 #endif /* CONFIG_ARM_LPAE */
1892
1893@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1894 __free_page(pte);
1895 }
1896
1897+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1898+{
1899+#ifdef CONFIG_ARM_LPAE
1900+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1901+#else
1902+ if (addr & SECTION_SIZE)
1903+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1904+ else
1905+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1906+#endif
1907+ flush_pmd_entry(pmdp);
1908+}
1909+
1910 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1911 pmdval_t prot)
1912 {
1913@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1914 static inline void
1915 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1916 {
1917- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1918+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1919 }
1920 #define pmd_pgtable(pmd) pmd_page(pmd)
1921
1922diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1923index 5cfba15..f415e1a 100644
1924--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1925+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1926@@ -20,12 +20,15 @@
1927 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1928 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1929 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1930+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1931 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1932 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1933 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1934+
1935 /*
1936 * - section
1937 */
1938+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1939 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1940 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1941 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1942@@ -37,6 +40,7 @@
1943 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1944 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1945 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1946+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1947
1948 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1949 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1950@@ -66,6 +70,7 @@
1951 * - extended small page/tiny page
1952 */
1953 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1954+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1955 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1956 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1957 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1958diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1959index 86a659a..70e0120 100644
1960--- a/arch/arm/include/asm/pgtable-2level.h
1961+++ b/arch/arm/include/asm/pgtable-2level.h
1962@@ -126,6 +126,9 @@
1963 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1964 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1965
1966+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1967+#define L_PTE_PXN (_AT(pteval_t, 0))
1968+
1969 /*
1970 * These are the memory types, defined to be compatible with
1971 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1972diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1973index 626989f..9d67a33 100644
1974--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1975+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1976@@ -75,6 +75,7 @@
1977 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1978 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1979 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1980+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1981 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1982
1983 /*
1984diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1985index 4f95039..b2dd513 100644
1986--- a/arch/arm/include/asm/pgtable-3level.h
1987+++ b/arch/arm/include/asm/pgtable-3level.h
1988@@ -82,6 +82,7 @@
1989 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1990 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1991 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1992+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1993 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1994 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1995 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1996@@ -95,6 +96,7 @@
1997 /*
1998 * To be used in assembly code with the upper page attributes.
1999 */
2000+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2001 #define L_PTE_XN_HIGH (1 << (54 - 32))
2002 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2003
2004diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2005index 1571d12..b8a9b43 100644
2006--- a/arch/arm/include/asm/pgtable.h
2007+++ b/arch/arm/include/asm/pgtable.h
2008@@ -33,6 +33,9 @@
2009 #include <asm/pgtable-2level.h>
2010 #endif
2011
2012+#define ktla_ktva(addr) (addr)
2013+#define ktva_ktla(addr) (addr)
2014+
2015 /*
2016 * Just any arbitrary offset to the start of the vmalloc VM area: the
2017 * current 8MB value just means that there will be a 8MB "hole" after the
2018@@ -48,6 +51,9 @@
2019 #define LIBRARY_TEXT_START 0x0c000000
2020
2021 #ifndef __ASSEMBLY__
2022+extern pteval_t __supported_pte_mask;
2023+extern pmdval_t __supported_pmd_mask;
2024+
2025 extern void __pte_error(const char *file, int line, pte_t);
2026 extern void __pmd_error(const char *file, int line, pmd_t);
2027 extern void __pgd_error(const char *file, int line, pgd_t);
2028@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2029 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2030 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2031
2032+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2033+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2034+
2035+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2036+#include <asm/domain.h>
2037+#include <linux/thread_info.h>
2038+#include <linux/preempt.h>
2039+
2040+static inline int test_domain(int domain, int domaintype)
2041+{
2042+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2043+}
2044+#endif
2045+
2046+#ifdef CONFIG_PAX_KERNEXEC
2047+static inline unsigned long pax_open_kernel(void) {
2048+#ifdef CONFIG_ARM_LPAE
2049+ /* TODO */
2050+#else
2051+ preempt_disable();
2052+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2053+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2054+#endif
2055+ return 0;
2056+}
2057+
2058+static inline unsigned long pax_close_kernel(void) {
2059+#ifdef CONFIG_ARM_LPAE
2060+ /* TODO */
2061+#else
2062+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2063+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2064+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2065+ preempt_enable_no_resched();
2066+#endif
2067+ return 0;
2068+}
2069+#else
2070+static inline unsigned long pax_open_kernel(void) { return 0; }
2071+static inline unsigned long pax_close_kernel(void) { return 0; }
2072+#endif
2073+
2074 /*
2075 * This is the lowest virtual address we can permit any user space
2076 * mapping to be mapped at. This is particularly important for
2077@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2078 /*
2079 * The pgprot_* and protection_map entries will be fixed up in runtime
2080 * to include the cachable and bufferable bits based on memory policy,
2081- * as well as any architecture dependent bits like global/ASID and SMP
2082- * shared mapping bits.
2083+ * as well as any architecture dependent bits like global/ASID, PXN,
2084+ * and SMP shared mapping bits.
2085 */
2086 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2087
2088@@ -260,7 +308,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2089 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2090 {
2091 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2092- L_PTE_NONE | L_PTE_VALID;
2093+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2094 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2095 return pte;
2096 }
2097diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2098index 5324c11..bcae5f0 100644
2099--- a/arch/arm/include/asm/proc-fns.h
2100+++ b/arch/arm/include/asm/proc-fns.h
2101@@ -75,7 +75,7 @@ extern struct processor {
2102 unsigned int suspend_size;
2103 void (*do_suspend)(void *);
2104 void (*do_resume)(void *);
2105-} processor;
2106+} __do_const processor;
2107
2108 #ifndef MULTI_CPU
2109 extern void cpu_proc_init(void);
2110diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2111index c4ae171..ea0c0c2 100644
2112--- a/arch/arm/include/asm/psci.h
2113+++ b/arch/arm/include/asm/psci.h
2114@@ -29,7 +29,7 @@ struct psci_operations {
2115 int (*cpu_off)(struct psci_power_state state);
2116 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2117 int (*migrate)(unsigned long cpuid);
2118-};
2119+} __no_const;
2120
2121 extern struct psci_operations psci_ops;
2122 extern struct smp_operations psci_smp_ops;
2123diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2124index 22a3b9b..7f214ee 100644
2125--- a/arch/arm/include/asm/smp.h
2126+++ b/arch/arm/include/asm/smp.h
2127@@ -112,7 +112,7 @@ struct smp_operations {
2128 int (*cpu_disable)(unsigned int cpu);
2129 #endif
2130 #endif
2131-};
2132+} __no_const;
2133
2134 /*
2135 * set platform specific SMP operations
2136diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2137index 71a06b2..8bb9ae1 100644
2138--- a/arch/arm/include/asm/thread_info.h
2139+++ b/arch/arm/include/asm/thread_info.h
2140@@ -88,9 +88,9 @@ struct thread_info {
2141 .flags = 0, \
2142 .preempt_count = INIT_PREEMPT_COUNT, \
2143 .addr_limit = KERNEL_DS, \
2144- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2145- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2146- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2147+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2148+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2149+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2150 .restart_block = { \
2151 .fn = do_no_restart_syscall, \
2152 }, \
2153@@ -157,7 +157,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2154 #define TIF_SYSCALL_AUDIT 9
2155 #define TIF_SYSCALL_TRACEPOINT 10
2156 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2157-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2158+/* within 8 bits of TIF_SYSCALL_TRACE
2159+ * to meet flexible second operand requirements
2160+ */
2161+#define TIF_GRSEC_SETXID 12
2162+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2163 #define TIF_USING_IWMMXT 17
2164 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2165 #define TIF_RESTORE_SIGMASK 20
2166@@ -170,10 +174,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2167 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2168 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2169 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2170+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2171
2172 /* Checks for any syscall work in entry-common.S */
2173 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2174- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2175+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2176
2177 /*
2178 * Change these and you break ASM code in entry-common.S
2179diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2180index 72abdc5..9eba222 100644
2181--- a/arch/arm/include/asm/uaccess.h
2182+++ b/arch/arm/include/asm/uaccess.h
2183@@ -18,6 +18,7 @@
2184 #include <asm/domain.h>
2185 #include <asm/unified.h>
2186 #include <asm/compiler.h>
2187+#include <asm/pgtable.h>
2188
2189 #if __LINUX_ARM_ARCH__ < 6
2190 #include <asm-generic/uaccess-unaligned.h>
2191@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2192 static inline void set_fs(mm_segment_t fs)
2193 {
2194 current_thread_info()->addr_limit = fs;
2195- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2196+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2197 }
2198
2199 #define segment_eq(a,b) ((a) == (b))
2200
2201+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2202+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2203+
2204+static inline void pax_open_userland(void)
2205+{
2206+
2207+#ifdef CONFIG_PAX_MEMORY_UDEREF
2208+ if (segment_eq(get_fs(), USER_DS)) {
2209+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2210+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2211+ }
2212+#endif
2213+
2214+}
2215+
2216+static inline void pax_close_userland(void)
2217+{
2218+
2219+#ifdef CONFIG_PAX_MEMORY_UDEREF
2220+ if (segment_eq(get_fs(), USER_DS)) {
2221+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2222+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2223+ }
2224+#endif
2225+
2226+}
2227+
2228 #define __addr_ok(addr) ({ \
2229 unsigned long flag; \
2230 __asm__("cmp %2, %0; movlo %0, #0" \
2231@@ -150,8 +178,12 @@ extern int __get_user_4(void *);
2232
2233 #define get_user(x,p) \
2234 ({ \
2235+ int __e; \
2236 might_fault(); \
2237- __get_user_check(x,p); \
2238+ pax_open_userland(); \
2239+ __e = __get_user_check(x,p); \
2240+ pax_close_userland(); \
2241+ __e; \
2242 })
2243
2244 extern int __put_user_1(void *, unsigned int);
2245@@ -195,8 +227,12 @@ extern int __put_user_8(void *, unsigned long long);
2246
2247 #define put_user(x,p) \
2248 ({ \
2249+ int __e; \
2250 might_fault(); \
2251- __put_user_check(x,p); \
2252+ pax_open_userland(); \
2253+ __e = __put_user_check(x,p); \
2254+ pax_close_userland(); \
2255+ __e; \
2256 })
2257
2258 #else /* CONFIG_MMU */
2259@@ -237,13 +273,17 @@ static inline void set_fs(mm_segment_t fs)
2260 #define __get_user(x,ptr) \
2261 ({ \
2262 long __gu_err = 0; \
2263+ pax_open_userland(); \
2264 __get_user_err((x),(ptr),__gu_err); \
2265+ pax_close_userland(); \
2266 __gu_err; \
2267 })
2268
2269 #define __get_user_error(x,ptr,err) \
2270 ({ \
2271+ pax_open_userland(); \
2272 __get_user_err((x),(ptr),err); \
2273+ pax_close_userland(); \
2274 (void) 0; \
2275 })
2276
2277@@ -319,13 +359,17 @@ do { \
2278 #define __put_user(x,ptr) \
2279 ({ \
2280 long __pu_err = 0; \
2281+ pax_open_userland(); \
2282 __put_user_err((x),(ptr),__pu_err); \
2283+ pax_close_userland(); \
2284 __pu_err; \
2285 })
2286
2287 #define __put_user_error(x,ptr,err) \
2288 ({ \
2289+ pax_open_userland(); \
2290 __put_user_err((x),(ptr),err); \
2291+ pax_close_userland(); \
2292 (void) 0; \
2293 })
2294
2295@@ -425,11 +469,44 @@ do { \
2296
2297
2298 #ifdef CONFIG_MMU
2299-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2300-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2301+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2302+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2303+
2304+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2305+{
2306+ unsigned long ret;
2307+
2308+ check_object_size(to, n, false);
2309+ pax_open_userland();
2310+ ret = ___copy_from_user(to, from, n);
2311+ pax_close_userland();
2312+ return ret;
2313+}
2314+
2315+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2316+{
2317+ unsigned long ret;
2318+
2319+ check_object_size(from, n, true);
2320+ pax_open_userland();
2321+ ret = ___copy_to_user(to, from, n);
2322+ pax_close_userland();
2323+ return ret;
2324+}
2325+
2326 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2327-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2328+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2329 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2330+
2331+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2332+{
2333+ unsigned long ret;
2334+ pax_open_userland();
2335+ ret = ___clear_user(addr, n);
2336+ pax_close_userland();
2337+ return ret;
2338+}
2339+
2340 #else
2341 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2342 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2343@@ -438,6 +515,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2344
2345 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2346 {
2347+ if ((long)n < 0)
2348+ return n;
2349+
2350 if (access_ok(VERIFY_READ, from, n))
2351 n = __copy_from_user(to, from, n);
2352 else /* security hole - plug it */
2353@@ -447,6 +527,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2354
2355 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2356 {
2357+ if ((long)n < 0)
2358+ return n;
2359+
2360 if (access_ok(VERIFY_WRITE, to, n))
2361 n = __copy_to_user(to, from, n);
2362 return n;
2363diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2364index 5af0ed1..cea83883 100644
2365--- a/arch/arm/include/uapi/asm/ptrace.h
2366+++ b/arch/arm/include/uapi/asm/ptrace.h
2367@@ -92,7 +92,7 @@
2368 * ARMv7 groups of PSR bits
2369 */
2370 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2371-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2372+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2373 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2374 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2375
2376diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2377index 1f031dd..d9b5e4a 100644
2378--- a/arch/arm/kernel/armksyms.c
2379+++ b/arch/arm/kernel/armksyms.c
2380@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2381
2382 /* networking */
2383 EXPORT_SYMBOL(csum_partial);
2384-EXPORT_SYMBOL(csum_partial_copy_from_user);
2385+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2386 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2387 EXPORT_SYMBOL(__csum_ipv6_magic);
2388
2389@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2390 #ifdef CONFIG_MMU
2391 EXPORT_SYMBOL(copy_page);
2392
2393-EXPORT_SYMBOL(__copy_from_user);
2394-EXPORT_SYMBOL(__copy_to_user);
2395-EXPORT_SYMBOL(__clear_user);
2396+EXPORT_SYMBOL(___copy_from_user);
2397+EXPORT_SYMBOL(___copy_to_user);
2398+EXPORT_SYMBOL(___clear_user);
2399
2400 EXPORT_SYMBOL(__get_user_1);
2401 EXPORT_SYMBOL(__get_user_2);
2402diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2403index b3fb8c9..59cfab2 100644
2404--- a/arch/arm/kernel/entry-armv.S
2405+++ b/arch/arm/kernel/entry-armv.S
2406@@ -47,6 +47,87 @@
2407 9997:
2408 .endm
2409
2410+ .macro pax_enter_kernel
2411+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2412+ @ make aligned space for saved DACR
2413+ sub sp, sp, #8
2414+ @ save regs
2415+ stmdb sp!, {r1, r2}
2416+ @ read DACR from cpu_domain into r1
2417+ mov r2, sp
2418+ @ assume 8K pages, since we have to split the immediate in two
2419+ bic r2, r2, #(0x1fc0)
2420+ bic r2, r2, #(0x3f)
2421+ ldr r1, [r2, #TI_CPU_DOMAIN]
2422+ @ store old DACR on stack
2423+ str r1, [sp, #8]
2424+#ifdef CONFIG_PAX_KERNEXEC
2425+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2426+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2427+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2428+#endif
2429+#ifdef CONFIG_PAX_MEMORY_UDEREF
2430+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2431+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2432+#endif
2433+ @ write r1 to current_thread_info()->cpu_domain
2434+ str r1, [r2, #TI_CPU_DOMAIN]
2435+ @ write r1 to DACR
2436+ mcr p15, 0, r1, c3, c0, 0
2437+ @ instruction sync
2438+ instr_sync
2439+ @ restore regs
2440+ ldmia sp!, {r1, r2}
2441+#endif
2442+ .endm
2443+
2444+ .macro pax_open_userland
2445+#ifdef CONFIG_PAX_MEMORY_UDEREF
2446+ @ save regs
2447+ stmdb sp!, {r0, r1}
2448+ @ read DACR from cpu_domain into r1
2449+ mov r0, sp
2450+ @ assume 8K pages, since we have to split the immediate in two
2451+ bic r0, r0, #(0x1fc0)
2452+ bic r0, r0, #(0x3f)
2453+ ldr r1, [r0, #TI_CPU_DOMAIN]
2454+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2455+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2456+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2457+ @ write r1 to current_thread_info()->cpu_domain
2458+ str r1, [r0, #TI_CPU_DOMAIN]
2459+ @ write r1 to DACR
2460+ mcr p15, 0, r1, c3, c0, 0
2461+ @ instruction sync
2462+ instr_sync
2463+ @ restore regs
2464+ ldmia sp!, {r0, r1}
2465+#endif
2466+ .endm
2467+
2468+ .macro pax_close_userland
2469+#ifdef CONFIG_PAX_MEMORY_UDEREF
2470+ @ save regs
2471+ stmdb sp!, {r0, r1}
2472+ @ read DACR from cpu_domain into r1
2473+ mov r0, sp
2474+ @ assume 8K pages, since we have to split the immediate in two
2475+ bic r0, r0, #(0x1fc0)
2476+ bic r0, r0, #(0x3f)
2477+ ldr r1, [r0, #TI_CPU_DOMAIN]
2478+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2479+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2480+ @ write r1 to current_thread_info()->cpu_domain
2481+ str r1, [r0, #TI_CPU_DOMAIN]
2482+ @ write r1 to DACR
2483+ mcr p15, 0, r1, c3, c0, 0
2484+ @ instruction sync
2485+ instr_sync
2486+ @ restore regs
2487+ ldmia sp!, {r0, r1}
2488+#endif
2489+ .endm
2490+
2491 .macro pabt_helper
2492 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2493 #ifdef MULTI_PABORT
2494@@ -89,11 +170,15 @@
2495 * Invalid mode handlers
2496 */
2497 .macro inv_entry, reason
2498+
2499+ pax_enter_kernel
2500+
2501 sub sp, sp, #S_FRAME_SIZE
2502 ARM( stmib sp, {r1 - lr} )
2503 THUMB( stmia sp, {r0 - r12} )
2504 THUMB( str sp, [sp, #S_SP] )
2505 THUMB( str lr, [sp, #S_LR] )
2506+
2507 mov r1, #\reason
2508 .endm
2509
2510@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2511 .macro svc_entry, stack_hole=0
2512 UNWIND(.fnstart )
2513 UNWIND(.save {r0 - pc} )
2514+
2515+ pax_enter_kernel
2516+
2517 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2518+
2519 #ifdef CONFIG_THUMB2_KERNEL
2520 SPFIX( str r0, [sp] ) @ temporarily saved
2521 SPFIX( mov r0, sp )
2522@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2523 ldmia r0, {r3 - r5}
2524 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2525 mov r6, #-1 @ "" "" "" ""
2526+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2527+ @ offset sp by 8 as done in pax_enter_kernel
2528+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2529+#else
2530 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2531+#endif
2532 SPFIX( addeq r2, r2, #4 )
2533 str r3, [sp, #-4]! @ save the "real" r0 copied
2534 @ from the exception stack
2535@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
2536 .macro usr_entry
2537 UNWIND(.fnstart )
2538 UNWIND(.cantunwind ) @ don't unwind the user space
2539+
2540+ pax_enter_kernel_user
2541+
2542 sub sp, sp, #S_FRAME_SIZE
2543 ARM( stmib sp, {r1 - r12} )
2544 THUMB( stmia sp, {r0 - r12} )
2545@@ -416,7 +513,9 @@ __und_usr:
2546 tst r3, #PSR_T_BIT @ Thumb mode?
2547 bne __und_usr_thumb
2548 sub r4, r2, #4 @ ARM instr at LR - 4
2549+ pax_open_userland
2550 1: ldrt r0, [r4]
2551+ pax_close_userland
2552 ARM_BE8(rev r0, r0) @ little endian instruction
2553
2554 @ r0 = 32-bit ARM instruction which caused the exception
2555@@ -450,10 +549,14 @@ __und_usr_thumb:
2556 */
2557 .arch armv6t2
2558 #endif
2559+ pax_open_userland
2560 2: ldrht r5, [r4]
2561+ pax_close_userland
2562 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2563 blo __und_usr_fault_16 @ 16bit undefined instruction
2564+ pax_open_userland
2565 3: ldrht r0, [r2]
2566+ pax_close_userland
2567 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2568 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2569 orr r0, r0, r5, lsl #16
2570@@ -482,7 +585,8 @@ ENDPROC(__und_usr)
2571 */
2572 .pushsection .fixup, "ax"
2573 .align 2
2574-4: mov pc, r9
2575+4: pax_close_userland
2576+ mov pc, r9
2577 .popsection
2578 .pushsection __ex_table,"a"
2579 .long 1b, 4b
2580@@ -692,7 +796,7 @@ ENTRY(__switch_to)
2581 THUMB( str lr, [ip], #4 )
2582 ldr r4, [r2, #TI_TP_VALUE]
2583 ldr r5, [r2, #TI_TP_VALUE + 4]
2584-#ifdef CONFIG_CPU_USE_DOMAINS
2585+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2586 ldr r6, [r2, #TI_CPU_DOMAIN]
2587 #endif
2588 switch_tls r1, r4, r5, r3, r7
2589@@ -701,7 +805,7 @@ ENTRY(__switch_to)
2590 ldr r8, =__stack_chk_guard
2591 ldr r7, [r7, #TSK_STACK_CANARY]
2592 #endif
2593-#ifdef CONFIG_CPU_USE_DOMAINS
2594+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2595 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2596 #endif
2597 mov r5, r0
2598diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2599index a2dcafd..1048b5a 100644
2600--- a/arch/arm/kernel/entry-common.S
2601+++ b/arch/arm/kernel/entry-common.S
2602@@ -10,18 +10,46 @@
2603
2604 #include <asm/unistd.h>
2605 #include <asm/ftrace.h>
2606+#include <asm/domain.h>
2607 #include <asm/unwind.h>
2608
2609+#include "entry-header.S"
2610+
2611 #ifdef CONFIG_NEED_RET_TO_USER
2612 #include <mach/entry-macro.S>
2613 #else
2614 .macro arch_ret_to_user, tmp1, tmp2
2615+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2616+ @ save regs
2617+ stmdb sp!, {r1, r2}
2618+ @ read DACR from cpu_domain into r1
2619+ mov r2, sp
2620+ @ assume 8K pages, since we have to split the immediate in two
2621+ bic r2, r2, #(0x1fc0)
2622+ bic r2, r2, #(0x3f)
2623+ ldr r1, [r2, #TI_CPU_DOMAIN]
2624+#ifdef CONFIG_PAX_KERNEXEC
2625+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2626+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2627+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2628+#endif
2629+#ifdef CONFIG_PAX_MEMORY_UDEREF
2630+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2631+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2632+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2633+#endif
2634+ @ write r1 to current_thread_info()->cpu_domain
2635+ str r1, [r2, #TI_CPU_DOMAIN]
2636+ @ write r1 to DACR
2637+ mcr p15, 0, r1, c3, c0, 0
2638+ @ instruction sync
2639+ instr_sync
2640+ @ restore regs
2641+ ldmia sp!, {r1, r2}
2642+#endif
2643 .endm
2644 #endif
2645
2646-#include "entry-header.S"
2647-
2648-
2649 .align 5
2650 /*
2651 * This is the fast syscall return path. We do as little as
2652@@ -411,6 +439,12 @@ ENTRY(vector_swi)
2653 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2654 #endif
2655
2656+ /*
2657+ * do this here to avoid a performance hit of wrapping the code above
2658+ * that directly dereferences userland to parse the SWI instruction
2659+ */
2660+ pax_enter_kernel_user
2661+
2662 adr tbl, sys_call_table @ load syscall table pointer
2663
2664 #if defined(CONFIG_OABI_COMPAT)
2665diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2666index 39f89fb..d612bd9 100644
2667--- a/arch/arm/kernel/entry-header.S
2668+++ b/arch/arm/kernel/entry-header.S
2669@@ -184,6 +184,60 @@
2670 msr cpsr_c, \rtemp @ switch back to the SVC mode
2671 .endm
2672
2673+ .macro pax_enter_kernel_user
2674+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2675+ @ save regs
2676+ stmdb sp!, {r0, r1}
2677+ @ read DACR from cpu_domain into r1
2678+ mov r0, sp
2679+ @ assume 8K pages, since we have to split the immediate in two
2680+ bic r0, r0, #(0x1fc0)
2681+ bic r0, r0, #(0x3f)
2682+ ldr r1, [r0, #TI_CPU_DOMAIN]
2683+#ifdef CONFIG_PAX_MEMORY_UDEREF
2684+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2685+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2686+#endif
2687+#ifdef CONFIG_PAX_KERNEXEC
2688+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2689+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2690+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2691+#endif
2692+ @ write r1 to current_thread_info()->cpu_domain
2693+ str r1, [r0, #TI_CPU_DOMAIN]
2694+ @ write r1 to DACR
2695+ mcr p15, 0, r1, c3, c0, 0
2696+ @ instruction sync
2697+ instr_sync
2698+ @ restore regs
2699+ ldmia sp!, {r0, r1}
2700+#endif
2701+ .endm
2702+
2703+ .macro pax_exit_kernel
2704+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2705+ @ save regs
2706+ stmdb sp!, {r0, r1}
2707+ @ read old DACR from stack into r1
2708+ ldr r1, [sp, #(8 + S_SP)]
2709+ sub r1, r1, #8
2710+ ldr r1, [r1]
2711+
2712+ @ write r1 to current_thread_info()->cpu_domain
2713+ mov r0, sp
2714+ @ assume 8K pages, since we have to split the immediate in two
2715+ bic r0, r0, #(0x1fc0)
2716+ bic r0, r0, #(0x3f)
2717+ str r1, [r0, #TI_CPU_DOMAIN]
2718+ @ write r1 to DACR
2719+ mcr p15, 0, r1, c3, c0, 0
2720+ @ instruction sync
2721+ instr_sync
2722+ @ restore regs
2723+ ldmia sp!, {r0, r1}
2724+#endif
2725+ .endm
2726+
2727 #ifndef CONFIG_THUMB2_KERNEL
2728 .macro svc_exit, rpsr, irq = 0
2729 .if \irq != 0
2730@@ -203,6 +257,9 @@
2731 blne trace_hardirqs_off
2732 #endif
2733 .endif
2734+
2735+ pax_exit_kernel
2736+
2737 msr spsr_cxsf, \rpsr
2738 #if defined(CONFIG_CPU_V6)
2739 ldr r0, [sp]
2740@@ -266,6 +323,9 @@
2741 blne trace_hardirqs_off
2742 #endif
2743 .endif
2744+
2745+ pax_exit_kernel
2746+
2747 ldr lr, [sp, #S_SP] @ top of the stack
2748 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2749 clrex @ clear the exclusive monitor
2750diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2751index 918875d..cd5fa27 100644
2752--- a/arch/arm/kernel/fiq.c
2753+++ b/arch/arm/kernel/fiq.c
2754@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
2755 void *base = vectors_page;
2756 unsigned offset = FIQ_OFFSET;
2757
2758+ pax_open_kernel();
2759 memcpy(base + offset, start, length);
2760+ pax_close_kernel();
2761+
2762 if (!cache_is_vipt_nonaliasing())
2763 flush_icache_range((unsigned long)base + offset, offset +
2764 length);
2765diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2766index 32f317e..710ae07 100644
2767--- a/arch/arm/kernel/head.S
2768+++ b/arch/arm/kernel/head.S
2769@@ -52,7 +52,9 @@
2770 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2771
2772 .macro pgtbl, rd, phys
2773- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2774+ mov \rd, #TEXT_OFFSET
2775+ sub \rd, #PG_DIR_SIZE
2776+ add \rd, \rd, \phys
2777 .endm
2778
2779 /*
2780@@ -436,7 +438,7 @@ __enable_mmu:
2781 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2782 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2783 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2784- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2785+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2786 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2787 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2788 #endif
2789diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2790index 45e4781..8eac93d 100644
2791--- a/arch/arm/kernel/module.c
2792+++ b/arch/arm/kernel/module.c
2793@@ -38,12 +38,39 @@
2794 #endif
2795
2796 #ifdef CONFIG_MMU
2797-void *module_alloc(unsigned long size)
2798+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2799 {
2800+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2801+ return NULL;
2802 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2803- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2804+ GFP_KERNEL, prot, NUMA_NO_NODE,
2805 __builtin_return_address(0));
2806 }
2807+
2808+void *module_alloc(unsigned long size)
2809+{
2810+
2811+#ifdef CONFIG_PAX_KERNEXEC
2812+ return __module_alloc(size, PAGE_KERNEL);
2813+#else
2814+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2815+#endif
2816+
2817+}
2818+
2819+#ifdef CONFIG_PAX_KERNEXEC
2820+void module_free_exec(struct module *mod, void *module_region)
2821+{
2822+ module_free(mod, module_region);
2823+}
2824+EXPORT_SYMBOL(module_free_exec);
2825+
2826+void *module_alloc_exec(unsigned long size)
2827+{
2828+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2829+}
2830+EXPORT_SYMBOL(module_alloc_exec);
2831+#endif
2832 #endif
2833
2834 int
2835diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2836index 07314af..c46655c 100644
2837--- a/arch/arm/kernel/patch.c
2838+++ b/arch/arm/kernel/patch.c
2839@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2840 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2841 int size;
2842
2843+ pax_open_kernel();
2844 if (thumb2 && __opcode_is_thumb16(insn)) {
2845 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2846 size = sizeof(u16);
2847@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2848 *(u32 *)addr = insn;
2849 size = sizeof(u32);
2850 }
2851+ pax_close_kernel();
2852
2853 flush_icache_range((uintptr_t)(addr),
2854 (uintptr_t)(addr) + size);
2855diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2856index 92f7b15..7048500 100644
2857--- a/arch/arm/kernel/process.c
2858+++ b/arch/arm/kernel/process.c
2859@@ -217,6 +217,7 @@ void machine_power_off(void)
2860
2861 if (pm_power_off)
2862 pm_power_off();
2863+ BUG();
2864 }
2865
2866 /*
2867@@ -230,7 +231,7 @@ void machine_power_off(void)
2868 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2869 * to use. Implementing such co-ordination would be essentially impossible.
2870 */
2871-void machine_restart(char *cmd)
2872+__noreturn void machine_restart(char *cmd)
2873 {
2874 local_irq_disable();
2875 smp_send_stop();
2876@@ -253,8 +254,8 @@ void __show_regs(struct pt_regs *regs)
2877
2878 show_regs_print_info(KERN_DEFAULT);
2879
2880- print_symbol("PC is at %s\n", instruction_pointer(regs));
2881- print_symbol("LR is at %s\n", regs->ARM_lr);
2882+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2883+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2884 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2885 "sp : %08lx ip : %08lx fp : %08lx\n",
2886 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2887@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
2888 return 0;
2889 }
2890
2891-unsigned long arch_randomize_brk(struct mm_struct *mm)
2892-{
2893- unsigned long range_end = mm->brk + 0x02000000;
2894- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2895-}
2896-
2897 #ifdef CONFIG_MMU
2898 #ifdef CONFIG_KUSER_HELPERS
2899 /*
2900@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
2901
2902 static int __init gate_vma_init(void)
2903 {
2904- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2905+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2906 return 0;
2907 }
2908 arch_initcall(gate_vma_init);
2909@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
2910
2911 const char *arch_vma_name(struct vm_area_struct *vma)
2912 {
2913- return is_gate_vma(vma) ? "[vectors]" :
2914- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
2915- "[sigpage]" : NULL;
2916+ return is_gate_vma(vma) ? "[vectors]" : NULL;
2917 }
2918
2919-static struct page *signal_page;
2920-extern struct page *get_signal_page(void);
2921-
2922 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2923 {
2924 struct mm_struct *mm = current->mm;
2925- unsigned long addr;
2926- int ret;
2927-
2928- if (!signal_page)
2929- signal_page = get_signal_page();
2930- if (!signal_page)
2931- return -ENOMEM;
2932
2933 down_write(&mm->mmap_sem);
2934- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
2935- if (IS_ERR_VALUE(addr)) {
2936- ret = addr;
2937- goto up_fail;
2938- }
2939-
2940- ret = install_special_mapping(mm, addr, PAGE_SIZE,
2941- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2942- &signal_page);
2943-
2944- if (ret == 0)
2945- mm->context.sigpage = addr;
2946-
2947- up_fail:
2948+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2949 up_write(&mm->mmap_sem);
2950- return ret;
2951+ return 0;
2952 }
2953 #endif
2954diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2955index 4693188..4596c5e 100644
2956--- a/arch/arm/kernel/psci.c
2957+++ b/arch/arm/kernel/psci.c
2958@@ -24,7 +24,7 @@
2959 #include <asm/opcodes-virt.h>
2960 #include <asm/psci.h>
2961
2962-struct psci_operations psci_ops;
2963+struct psci_operations psci_ops __read_only;
2964
2965 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2966
2967diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2968index 0dd3b79..e018f64 100644
2969--- a/arch/arm/kernel/ptrace.c
2970+++ b/arch/arm/kernel/ptrace.c
2971@@ -929,10 +929,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2972 return current_thread_info()->syscall;
2973 }
2974
2975+#ifdef CONFIG_GRKERNSEC_SETXID
2976+extern void gr_delayed_cred_worker(void);
2977+#endif
2978+
2979 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2980 {
2981 current_thread_info()->syscall = scno;
2982
2983+#ifdef CONFIG_GRKERNSEC_SETXID
2984+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2985+ gr_delayed_cred_worker();
2986+#endif
2987+
2988 /* Do the secure computing check first; failures should be fast. */
2989 if (secure_computing(scno) == -1)
2990 return -1;
2991diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2992index 987a7f5..d9d6071 100644
2993--- a/arch/arm/kernel/setup.c
2994+++ b/arch/arm/kernel/setup.c
2995@@ -100,21 +100,23 @@ EXPORT_SYMBOL(system_serial_high);
2996 unsigned int elf_hwcap __read_mostly;
2997 EXPORT_SYMBOL(elf_hwcap);
2998
2999+pteval_t __supported_pte_mask __read_only;
3000+pmdval_t __supported_pmd_mask __read_only;
3001
3002 #ifdef MULTI_CPU
3003-struct processor processor __read_mostly;
3004+struct processor processor;
3005 #endif
3006 #ifdef MULTI_TLB
3007-struct cpu_tlb_fns cpu_tlb __read_mostly;
3008+struct cpu_tlb_fns cpu_tlb __read_only;
3009 #endif
3010 #ifdef MULTI_USER
3011-struct cpu_user_fns cpu_user __read_mostly;
3012+struct cpu_user_fns cpu_user __read_only;
3013 #endif
3014 #ifdef MULTI_CACHE
3015-struct cpu_cache_fns cpu_cache __read_mostly;
3016+struct cpu_cache_fns cpu_cache __read_only;
3017 #endif
3018 #ifdef CONFIG_OUTER_CACHE
3019-struct outer_cache_fns outer_cache __read_mostly;
3020+struct outer_cache_fns outer_cache __read_only;
3021 EXPORT_SYMBOL(outer_cache);
3022 #endif
3023
3024@@ -247,9 +249,13 @@ static int __get_cpu_architecture(void)
3025 asm("mrc p15, 0, %0, c0, c1, 4"
3026 : "=r" (mmfr0));
3027 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3028- (mmfr0 & 0x000000f0) >= 0x00000030)
3029+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3030 cpu_arch = CPU_ARCH_ARMv7;
3031- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3032+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3033+ __supported_pte_mask |= L_PTE_PXN;
3034+ __supported_pmd_mask |= PMD_PXNTABLE;
3035+ }
3036+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3037 (mmfr0 & 0x000000f0) == 0x00000020)
3038 cpu_arch = CPU_ARCH_ARMv6;
3039 else
3040@@ -573,7 +579,7 @@ static void __init setup_processor(void)
3041 __cpu_architecture = __get_cpu_architecture();
3042
3043 #ifdef MULTI_CPU
3044- processor = *list->proc;
3045+ memcpy((void *)&processor, list->proc, sizeof processor);
3046 #endif
3047 #ifdef MULTI_TLB
3048 cpu_tlb = *list->tlb;
3049diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3050index 04d6388..5115238 100644
3051--- a/arch/arm/kernel/signal.c
3052+++ b/arch/arm/kernel/signal.c
3053@@ -23,8 +23,6 @@
3054
3055 extern const unsigned long sigreturn_codes[7];
3056
3057-static unsigned long signal_return_offset;
3058-
3059 #ifdef CONFIG_CRUNCH
3060 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3061 {
3062@@ -395,8 +393,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3063 * except when the MPU has protected the vectors
3064 * page from PL0
3065 */
3066- retcode = mm->context.sigpage + signal_return_offset +
3067- (idx << 2) + thumb;
3068+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3069 } else
3070 #endif
3071 {
3072@@ -600,33 +597,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3073 } while (thread_flags & _TIF_WORK_MASK);
3074 return 0;
3075 }
3076-
3077-struct page *get_signal_page(void)
3078-{
3079- unsigned long ptr;
3080- unsigned offset;
3081- struct page *page;
3082- void *addr;
3083-
3084- page = alloc_pages(GFP_KERNEL, 0);
3085-
3086- if (!page)
3087- return NULL;
3088-
3089- addr = page_address(page);
3090-
3091- /* Give the signal return code some randomness */
3092- offset = 0x200 + (get_random_int() & 0x7fc);
3093- signal_return_offset = offset;
3094-
3095- /*
3096- * Copy signal return handlers into the vector page, and
3097- * set sigreturn to be a pointer to these.
3098- */
3099- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3100-
3101- ptr = (unsigned long)addr + offset;
3102- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3103-
3104- return page;
3105-}
3106diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3107index dc894ab..f929a0d 100644
3108--- a/arch/arm/kernel/smp.c
3109+++ b/arch/arm/kernel/smp.c
3110@@ -73,7 +73,7 @@ enum ipi_msg_type {
3111
3112 static DECLARE_COMPLETION(cpu_running);
3113
3114-static struct smp_operations smp_ops;
3115+static struct smp_operations smp_ops __read_only;
3116
3117 void __init smp_set_ops(struct smp_operations *ops)
3118 {
3119diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3120index 4636d56..ce4ec3d 100644
3121--- a/arch/arm/kernel/traps.c
3122+++ b/arch/arm/kernel/traps.c
3123@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3124 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3125 {
3126 #ifdef CONFIG_KALLSYMS
3127- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3128+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3129 #else
3130 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3131 #endif
3132@@ -264,6 +264,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3133 static int die_owner = -1;
3134 static unsigned int die_nest_count;
3135
3136+extern void gr_handle_kernel_exploit(void);
3137+
3138 static unsigned long oops_begin(void)
3139 {
3140 int cpu;
3141@@ -306,6 +308,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3142 panic("Fatal exception in interrupt");
3143 if (panic_on_oops)
3144 panic("Fatal exception");
3145+
3146+ gr_handle_kernel_exploit();
3147+
3148 if (signr)
3149 do_exit(signr);
3150 }
3151@@ -642,7 +647,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3152 * The user helper at 0xffff0fe0 must be used instead.
3153 * (see entry-armv.S for details)
3154 */
3155+ pax_open_kernel();
3156 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3157+ pax_close_kernel();
3158 }
3159 return 0;
3160
3161@@ -899,7 +906,11 @@ void __init early_trap_init(void *vectors_base)
3162 kuser_init(vectors_base);
3163
3164 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3165- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3166+
3167+#ifndef CONFIG_PAX_MEMORY_UDEREF
3168+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3169+#endif
3170+
3171 #else /* ifndef CONFIG_CPU_V7M */
3172 /*
3173 * on V7-M there is no need to copy the vector table to a dedicated
3174diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3175index 7bcee5c..e2f3249 100644
3176--- a/arch/arm/kernel/vmlinux.lds.S
3177+++ b/arch/arm/kernel/vmlinux.lds.S
3178@@ -8,7 +8,11 @@
3179 #include <asm/thread_info.h>
3180 #include <asm/memory.h>
3181 #include <asm/page.h>
3182-
3183+
3184+#ifdef CONFIG_PAX_KERNEXEC
3185+#include <asm/pgtable.h>
3186+#endif
3187+
3188 #define PROC_INFO \
3189 . = ALIGN(4); \
3190 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3191@@ -34,7 +38,7 @@
3192 #endif
3193
3194 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3195- defined(CONFIG_GENERIC_BUG)
3196+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3197 #define ARM_EXIT_KEEP(x) x
3198 #define ARM_EXIT_DISCARD(x)
3199 #else
3200@@ -90,6 +94,11 @@ SECTIONS
3201 _text = .;
3202 HEAD_TEXT
3203 }
3204+
3205+#ifdef CONFIG_PAX_KERNEXEC
3206+ . = ALIGN(1<<SECTION_SHIFT);
3207+#endif
3208+
3209 .text : { /* Real text segment */
3210 _stext = .; /* Text and read-only data */
3211 __exception_text_start = .;
3212@@ -112,6 +121,8 @@ SECTIONS
3213 ARM_CPU_KEEP(PROC_INFO)
3214 }
3215
3216+ _etext = .; /* End of text section */
3217+
3218 RO_DATA(PAGE_SIZE)
3219
3220 . = ALIGN(4);
3221@@ -142,7 +153,9 @@ SECTIONS
3222
3223 NOTES
3224
3225- _etext = .; /* End of text and rodata section */
3226+#ifdef CONFIG_PAX_KERNEXEC
3227+ . = ALIGN(1<<SECTION_SHIFT);
3228+#endif
3229
3230 #ifndef CONFIG_XIP_KERNEL
3231 . = ALIGN(PAGE_SIZE);
3232@@ -220,6 +233,11 @@ SECTIONS
3233 . = PAGE_OFFSET + TEXT_OFFSET;
3234 #else
3235 __init_end = .;
3236+
3237+#ifdef CONFIG_PAX_KERNEXEC
3238+ . = ALIGN(1<<SECTION_SHIFT);
3239+#endif
3240+
3241 . = ALIGN(THREAD_SIZE);
3242 __data_loc = .;
3243 #endif
3244diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3245index 2a700e0..745b980 100644
3246--- a/arch/arm/kvm/arm.c
3247+++ b/arch/arm/kvm/arm.c
3248@@ -56,7 +56,7 @@ static unsigned long hyp_default_vectors;
3249 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3250
3251 /* The VMID used in the VTTBR */
3252-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3253+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3254 static u8 kvm_next_vmid;
3255 static DEFINE_SPINLOCK(kvm_vmid_lock);
3256
3257@@ -397,7 +397,7 @@ void force_vm_exit(const cpumask_t *mask)
3258 */
3259 static bool need_new_vmid_gen(struct kvm *kvm)
3260 {
3261- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3262+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3263 }
3264
3265 /**
3266@@ -430,7 +430,7 @@ static void update_vttbr(struct kvm *kvm)
3267
3268 /* First user of a new VMID generation? */
3269 if (unlikely(kvm_next_vmid == 0)) {
3270- atomic64_inc(&kvm_vmid_gen);
3271+ atomic64_inc_unchecked(&kvm_vmid_gen);
3272 kvm_next_vmid = 1;
3273
3274 /*
3275@@ -447,7 +447,7 @@ static void update_vttbr(struct kvm *kvm)
3276 kvm_call_hyp(__kvm_flush_vm_context);
3277 }
3278
3279- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3280+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3281 kvm->arch.vmid = kvm_next_vmid;
3282 kvm_next_vmid++;
3283
3284diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3285index 14a0d98..7771a7d 100644
3286--- a/arch/arm/lib/clear_user.S
3287+++ b/arch/arm/lib/clear_user.S
3288@@ -12,14 +12,14 @@
3289
3290 .text
3291
3292-/* Prototype: int __clear_user(void *addr, size_t sz)
3293+/* Prototype: int ___clear_user(void *addr, size_t sz)
3294 * Purpose : clear some user memory
3295 * Params : addr - user memory address to clear
3296 * : sz - number of bytes to clear
3297 * Returns : number of bytes NOT cleared
3298 */
3299 ENTRY(__clear_user_std)
3300-WEAK(__clear_user)
3301+WEAK(___clear_user)
3302 stmfd sp!, {r1, lr}
3303 mov r2, #0
3304 cmp r1, #4
3305@@ -44,7 +44,7 @@ WEAK(__clear_user)
3306 USER( strnebt r2, [r0])
3307 mov r0, #0
3308 ldmfd sp!, {r1, pc}
3309-ENDPROC(__clear_user)
3310+ENDPROC(___clear_user)
3311 ENDPROC(__clear_user_std)
3312
3313 .pushsection .fixup,"ax"
3314diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3315index 66a477a..bee61d3 100644
3316--- a/arch/arm/lib/copy_from_user.S
3317+++ b/arch/arm/lib/copy_from_user.S
3318@@ -16,7 +16,7 @@
3319 /*
3320 * Prototype:
3321 *
3322- * size_t __copy_from_user(void *to, const void *from, size_t n)
3323+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3324 *
3325 * Purpose:
3326 *
3327@@ -84,11 +84,11 @@
3328
3329 .text
3330
3331-ENTRY(__copy_from_user)
3332+ENTRY(___copy_from_user)
3333
3334 #include "copy_template.S"
3335
3336-ENDPROC(__copy_from_user)
3337+ENDPROC(___copy_from_user)
3338
3339 .pushsection .fixup,"ax"
3340 .align 0
3341diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3342index 6ee2f67..d1cce76 100644
3343--- a/arch/arm/lib/copy_page.S
3344+++ b/arch/arm/lib/copy_page.S
3345@@ -10,6 +10,7 @@
3346 * ASM optimised string functions
3347 */
3348 #include <linux/linkage.h>
3349+#include <linux/const.h>
3350 #include <asm/assembler.h>
3351 #include <asm/asm-offsets.h>
3352 #include <asm/cache.h>
3353diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3354index d066df6..df28194 100644
3355--- a/arch/arm/lib/copy_to_user.S
3356+++ b/arch/arm/lib/copy_to_user.S
3357@@ -16,7 +16,7 @@
3358 /*
3359 * Prototype:
3360 *
3361- * size_t __copy_to_user(void *to, const void *from, size_t n)
3362+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3363 *
3364 * Purpose:
3365 *
3366@@ -88,11 +88,11 @@
3367 .text
3368
3369 ENTRY(__copy_to_user_std)
3370-WEAK(__copy_to_user)
3371+WEAK(___copy_to_user)
3372
3373 #include "copy_template.S"
3374
3375-ENDPROC(__copy_to_user)
3376+ENDPROC(___copy_to_user)
3377 ENDPROC(__copy_to_user_std)
3378
3379 .pushsection .fixup,"ax"
3380diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3381index 7d08b43..f7ca7ea 100644
3382--- a/arch/arm/lib/csumpartialcopyuser.S
3383+++ b/arch/arm/lib/csumpartialcopyuser.S
3384@@ -57,8 +57,8 @@
3385 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3386 */
3387
3388-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3389-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3390+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3391+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3392
3393 #include "csumpartialcopygeneric.S"
3394
3395diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3396index 5306de3..aed6d03 100644
3397--- a/arch/arm/lib/delay.c
3398+++ b/arch/arm/lib/delay.c
3399@@ -28,7 +28,7 @@
3400 /*
3401 * Default to the loop-based delay implementation.
3402 */
3403-struct arm_delay_ops arm_delay_ops = {
3404+struct arm_delay_ops arm_delay_ops __read_only = {
3405 .delay = __loop_delay,
3406 .const_udelay = __loop_const_udelay,
3407 .udelay = __loop_udelay,
3408diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3409index 3e58d71..029817c 100644
3410--- a/arch/arm/lib/uaccess_with_memcpy.c
3411+++ b/arch/arm/lib/uaccess_with_memcpy.c
3412@@ -136,7 +136,7 @@ out:
3413 }
3414
3415 unsigned long
3416-__copy_to_user(void __user *to, const void *from, unsigned long n)
3417+___copy_to_user(void __user *to, const void *from, unsigned long n)
3418 {
3419 /*
3420 * This test is stubbed out of the main function above to keep
3421@@ -190,7 +190,7 @@ out:
3422 return n;
3423 }
3424
3425-unsigned long __clear_user(void __user *addr, unsigned long n)
3426+unsigned long ___clear_user(void __user *addr, unsigned long n)
3427 {
3428 /* See rational for this in __copy_to_user() above. */
3429 if (n < 64)
3430diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3431index f3407a5..bd4256f 100644
3432--- a/arch/arm/mach-kirkwood/common.c
3433+++ b/arch/arm/mach-kirkwood/common.c
3434@@ -156,7 +156,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3435 clk_gate_ops.disable(hw);
3436 }
3437
3438-static struct clk_ops clk_gate_fn_ops;
3439+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3440+{
3441+ return clk_gate_ops.is_enabled(hw);
3442+}
3443+
3444+static struct clk_ops clk_gate_fn_ops = {
3445+ .enable = clk_gate_fn_enable,
3446+ .disable = clk_gate_fn_disable,
3447+ .is_enabled = clk_gate_fn_is_enabled,
3448+};
3449
3450 static struct clk __init *clk_register_gate_fn(struct device *dev,
3451 const char *name,
3452@@ -190,14 +199,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3453 gate_fn->fn_en = fn_en;
3454 gate_fn->fn_dis = fn_dis;
3455
3456- /* ops is the gate ops, but with our enable/disable functions */
3457- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3458- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3459- clk_gate_fn_ops = clk_gate_ops;
3460- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3461- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3462- }
3463-
3464 clk = clk_register(dev, &gate_fn->gate.hw);
3465
3466 if (IS_ERR(clk))
3467diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3468index 827d1500..2885dc6 100644
3469--- a/arch/arm/mach-omap2/board-n8x0.c
3470+++ b/arch/arm/mach-omap2/board-n8x0.c
3471@@ -627,7 +627,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3472 }
3473 #endif
3474
3475-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3476+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3477 .late_init = n8x0_menelaus_late_init,
3478 };
3479
3480diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3481index d24926e..a7645a6 100644
3482--- a/arch/arm/mach-omap2/gpmc.c
3483+++ b/arch/arm/mach-omap2/gpmc.c
3484@@ -148,7 +148,6 @@ struct omap3_gpmc_regs {
3485 };
3486
3487 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3488-static struct irq_chip gpmc_irq_chip;
3489 static int gpmc_irq_start;
3490
3491 static struct resource gpmc_mem_root;
3492@@ -716,6 +715,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3493
3494 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3495
3496+static struct irq_chip gpmc_irq_chip = {
3497+ .name = "gpmc",
3498+ .irq_startup = gpmc_irq_noop_ret,
3499+ .irq_enable = gpmc_irq_enable,
3500+ .irq_disable = gpmc_irq_disable,
3501+ .irq_shutdown = gpmc_irq_noop,
3502+ .irq_ack = gpmc_irq_noop,
3503+ .irq_mask = gpmc_irq_noop,
3504+ .irq_unmask = gpmc_irq_noop,
3505+
3506+};
3507+
3508 static int gpmc_setup_irq(void)
3509 {
3510 int i;
3511@@ -730,15 +741,6 @@ static int gpmc_setup_irq(void)
3512 return gpmc_irq_start;
3513 }
3514
3515- gpmc_irq_chip.name = "gpmc";
3516- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3517- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3518- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3519- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3520- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3521- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3522- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3523-
3524 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3525 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3526
3527diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3528index f991016..145ebeb 100644
3529--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3530+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3531@@ -84,7 +84,7 @@ struct cpu_pm_ops {
3532 int (*finish_suspend)(unsigned long cpu_state);
3533 void (*resume)(void);
3534 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3535-};
3536+} __no_const;
3537
3538 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3539 static struct powerdomain *mpuss_pd;
3540@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
3541 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3542 {}
3543
3544-struct cpu_pm_ops omap_pm_ops = {
3545+static struct cpu_pm_ops omap_pm_ops __read_only = {
3546 .finish_suspend = default_finish_suspend,
3547 .resume = dummy_cpu_resume,
3548 .scu_prepare = dummy_scu_prepare,
3549diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3550index 3664562..72f85c6 100644
3551--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3552+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3553@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3554 return NOTIFY_OK;
3555 }
3556
3557-static struct notifier_block __refdata irq_hotplug_notifier = {
3558+static struct notifier_block irq_hotplug_notifier = {
3559 .notifier_call = irq_cpu_hotplug_notify,
3560 };
3561
3562diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3563index e0a398c..a470fa5 100644
3564--- a/arch/arm/mach-omap2/omap_device.c
3565+++ b/arch/arm/mach-omap2/omap_device.c
3566@@ -508,7 +508,7 @@ void omap_device_delete(struct omap_device *od)
3567 struct platform_device __init *omap_device_build(const char *pdev_name,
3568 int pdev_id,
3569 struct omap_hwmod *oh,
3570- void *pdata, int pdata_len)
3571+ const void *pdata, int pdata_len)
3572 {
3573 struct omap_hwmod *ohs[] = { oh };
3574
3575@@ -536,7 +536,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3576 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3577 int pdev_id,
3578 struct omap_hwmod **ohs,
3579- int oh_cnt, void *pdata,
3580+ int oh_cnt, const void *pdata,
3581 int pdata_len)
3582 {
3583 int ret = -ENOMEM;
3584diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3585index 78c02b3..c94109a 100644
3586--- a/arch/arm/mach-omap2/omap_device.h
3587+++ b/arch/arm/mach-omap2/omap_device.h
3588@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3589 /* Core code interface */
3590
3591 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3592- struct omap_hwmod *oh, void *pdata,
3593+ struct omap_hwmod *oh, const void *pdata,
3594 int pdata_len);
3595
3596 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3597 struct omap_hwmod **oh, int oh_cnt,
3598- void *pdata, int pdata_len);
3599+ const void *pdata, int pdata_len);
3600
3601 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3602 struct omap_hwmod **ohs, int oh_cnt);
3603diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3604index 8a1b5e0..5f30074 100644
3605--- a/arch/arm/mach-omap2/omap_hwmod.c
3606+++ b/arch/arm/mach-omap2/omap_hwmod.c
3607@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3608 int (*init_clkdm)(struct omap_hwmod *oh);
3609 void (*update_context_lost)(struct omap_hwmod *oh);
3610 int (*get_context_lost)(struct omap_hwmod *oh);
3611-};
3612+} __no_const;
3613
3614 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3615-static struct omap_hwmod_soc_ops soc_ops;
3616+static struct omap_hwmod_soc_ops soc_ops __read_only;
3617
3618 /* omap_hwmod_list contains all registered struct omap_hwmods */
3619 static LIST_HEAD(omap_hwmod_list);
3620diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3621index 95fee54..cfa9cf1 100644
3622--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3623+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3624@@ -10,6 +10,7 @@
3625
3626 #include <linux/kernel.h>
3627 #include <linux/init.h>
3628+#include <asm/pgtable.h>
3629
3630 #include "powerdomain.h"
3631
3632@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3633
3634 void __init am43xx_powerdomains_init(void)
3635 {
3636- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3637+ pax_open_kernel();
3638+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3639+ pax_close_kernel();
3640 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3641 pwrdm_register_pwrdms(powerdomains_am43xx);
3642 pwrdm_complete_init();
3643diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3644index d15c7bb..b2d1f0c 100644
3645--- a/arch/arm/mach-omap2/wd_timer.c
3646+++ b/arch/arm/mach-omap2/wd_timer.c
3647@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3648 struct omap_hwmod *oh;
3649 char *oh_name = "wd_timer2";
3650 char *dev_name = "omap_wdt";
3651- struct omap_wd_timer_platform_data pdata;
3652+ static struct omap_wd_timer_platform_data pdata = {
3653+ .read_reset_sources = prm_read_reset_sources
3654+ };
3655
3656 if (!cpu_class_is_omap2() || of_have_populated_dt())
3657 return 0;
3658@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3659 return -EINVAL;
3660 }
3661
3662- pdata.read_reset_sources = prm_read_reset_sources;
3663-
3664 pdev = omap_device_build(dev_name, id, oh, &pdata,
3665 sizeof(struct omap_wd_timer_platform_data));
3666 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3667diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3668index b82dcae..44ee5b6 100644
3669--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3670+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3671@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3672 bool entered_lp2 = false;
3673
3674 if (tegra_pending_sgi())
3675- ACCESS_ONCE(abort_flag) = true;
3676+ ACCESS_ONCE_RW(abort_flag) = true;
3677
3678 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3679
3680diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3681index bdb3564..cebb96f 100644
3682--- a/arch/arm/mach-ux500/setup.h
3683+++ b/arch/arm/mach-ux500/setup.h
3684@@ -39,13 +39,6 @@ extern void ux500_timer_init(void);
3685 .type = MT_DEVICE, \
3686 }
3687
3688-#define __MEM_DEV_DESC(x, sz) { \
3689- .virtual = IO_ADDRESS(x), \
3690- .pfn = __phys_to_pfn(x), \
3691- .length = sz, \
3692- .type = MT_MEMORY, \
3693-}
3694-
3695 extern struct smp_operations ux500_smp_ops;
3696 extern void ux500_cpu_die(unsigned int cpu);
3697
3698diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3699index 1f8fed9..14d7823 100644
3700--- a/arch/arm/mm/Kconfig
3701+++ b/arch/arm/mm/Kconfig
3702@@ -446,7 +446,7 @@ config CPU_32v5
3703
3704 config CPU_32v6
3705 bool
3706- select CPU_USE_DOMAINS if CPU_V6 && MMU
3707+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3708 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3709
3710 config CPU_32v6K
3711@@ -601,6 +601,7 @@ config CPU_CP15_MPU
3712
3713 config CPU_USE_DOMAINS
3714 bool
3715+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3716 help
3717 This option enables or disables the use of domain switching
3718 via the set_fs() function.
3719@@ -800,6 +801,7 @@ config NEED_KUSER_HELPERS
3720 config KUSER_HELPERS
3721 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3722 default y
3723+ depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
3724 help
3725 Warning: disabling this option may break user programs.
3726
3727@@ -812,7 +814,7 @@ config KUSER_HELPERS
3728 See Documentation/arm/kernel_user_helpers.txt for details.
3729
3730 However, the fixed address nature of these helpers can be used
3731- by ROP (return orientated programming) authors when creating
3732+ by ROP (Return Oriented Programming) authors when creating
3733 exploits.
3734
3735 If all of the binaries and libraries which run on your platform
3736diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3737index 9240364..a2b8cf3 100644
3738--- a/arch/arm/mm/alignment.c
3739+++ b/arch/arm/mm/alignment.c
3740@@ -212,10 +212,12 @@ union offset_union {
3741 #define __get16_unaligned_check(ins,val,addr) \
3742 do { \
3743 unsigned int err = 0, v, a = addr; \
3744+ pax_open_userland(); \
3745 __get8_unaligned_check(ins,v,a,err); \
3746 val = v << ((BE) ? 8 : 0); \
3747 __get8_unaligned_check(ins,v,a,err); \
3748 val |= v << ((BE) ? 0 : 8); \
3749+ pax_close_userland(); \
3750 if (err) \
3751 goto fault; \
3752 } while (0)
3753@@ -229,6 +231,7 @@ union offset_union {
3754 #define __get32_unaligned_check(ins,val,addr) \
3755 do { \
3756 unsigned int err = 0, v, a = addr; \
3757+ pax_open_userland(); \
3758 __get8_unaligned_check(ins,v,a,err); \
3759 val = v << ((BE) ? 24 : 0); \
3760 __get8_unaligned_check(ins,v,a,err); \
3761@@ -237,6 +240,7 @@ union offset_union {
3762 val |= v << ((BE) ? 8 : 16); \
3763 __get8_unaligned_check(ins,v,a,err); \
3764 val |= v << ((BE) ? 0 : 24); \
3765+ pax_close_userland(); \
3766 if (err) \
3767 goto fault; \
3768 } while (0)
3769@@ -250,6 +254,7 @@ union offset_union {
3770 #define __put16_unaligned_check(ins,val,addr) \
3771 do { \
3772 unsigned int err = 0, v = val, a = addr; \
3773+ pax_open_userland(); \
3774 __asm__( FIRST_BYTE_16 \
3775 ARM( "1: "ins" %1, [%2], #1\n" ) \
3776 THUMB( "1: "ins" %1, [%2]\n" ) \
3777@@ -269,6 +274,7 @@ union offset_union {
3778 " .popsection\n" \
3779 : "=r" (err), "=&r" (v), "=&r" (a) \
3780 : "0" (err), "1" (v), "2" (a)); \
3781+ pax_close_userland(); \
3782 if (err) \
3783 goto fault; \
3784 } while (0)
3785@@ -282,6 +288,7 @@ union offset_union {
3786 #define __put32_unaligned_check(ins,val,addr) \
3787 do { \
3788 unsigned int err = 0, v = val, a = addr; \
3789+ pax_open_userland(); \
3790 __asm__( FIRST_BYTE_32 \
3791 ARM( "1: "ins" %1, [%2], #1\n" ) \
3792 THUMB( "1: "ins" %1, [%2]\n" ) \
3793@@ -311,6 +318,7 @@ union offset_union {
3794 " .popsection\n" \
3795 : "=r" (err), "=&r" (v), "=&r" (a) \
3796 : "0" (err), "1" (v), "2" (a)); \
3797+ pax_close_userland(); \
3798 if (err) \
3799 goto fault; \
3800 } while (0)
3801diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3802index 447da6f..77a5057 100644
3803--- a/arch/arm/mm/cache-l2x0.c
3804+++ b/arch/arm/mm/cache-l2x0.c
3805@@ -45,7 +45,7 @@ struct l2x0_of_data {
3806 void (*setup)(const struct device_node *, u32 *, u32 *);
3807 void (*save)(void);
3808 struct outer_cache_fns outer_cache;
3809-};
3810+} __do_const;
3811
3812 static bool of_init = false;
3813
3814diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3815index 84e6f77..0b52f31 100644
3816--- a/arch/arm/mm/context.c
3817+++ b/arch/arm/mm/context.c
3818@@ -43,7 +43,7 @@
3819 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3820
3821 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3822-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3823+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3824 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3825
3826 static DEFINE_PER_CPU(atomic64_t, active_asids);
3827@@ -180,7 +180,7 @@ static int is_reserved_asid(u64 asid)
3828 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3829 {
3830 u64 asid = atomic64_read(&mm->context.id);
3831- u64 generation = atomic64_read(&asid_generation);
3832+ u64 generation = atomic64_read_unchecked(&asid_generation);
3833
3834 if (asid != 0 && is_reserved_asid(asid)) {
3835 /*
3836@@ -198,7 +198,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3837 */
3838 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3839 if (asid == NUM_USER_ASIDS) {
3840- generation = atomic64_add_return(ASID_FIRST_VERSION,
3841+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3842 &asid_generation);
3843 flush_context(cpu);
3844 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3845@@ -227,14 +227,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3846 cpu_set_reserved_ttbr0();
3847
3848 asid = atomic64_read(&mm->context.id);
3849- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3850+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3851 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3852 goto switch_mm_fastpath;
3853
3854 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3855 /* Check that our ASID belongs to the current generation. */
3856 asid = atomic64_read(&mm->context.id);
3857- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3858+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3859 asid = new_context(mm, cpu);
3860 atomic64_set(&mm->context.id, asid);
3861 }
3862diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3863index eb8830a..5360ce7 100644
3864--- a/arch/arm/mm/fault.c
3865+++ b/arch/arm/mm/fault.c
3866@@ -25,6 +25,7 @@
3867 #include <asm/system_misc.h>
3868 #include <asm/system_info.h>
3869 #include <asm/tlbflush.h>
3870+#include <asm/sections.h>
3871
3872 #include "fault.h"
3873
3874@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3875 if (fixup_exception(regs))
3876 return;
3877
3878+#ifdef CONFIG_PAX_MEMORY_UDEREF
3879+ if (addr < TASK_SIZE) {
3880+ if (current->signal->curr_ip)
3881+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3882+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3883+ else
3884+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3885+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3886+ }
3887+#endif
3888+
3889+#ifdef CONFIG_PAX_KERNEXEC
3890+ if ((fsr & FSR_WRITE) &&
3891+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3892+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3893+ {
3894+ if (current->signal->curr_ip)
3895+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3896+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3897+ else
3898+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3899+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3900+ }
3901+#endif
3902+
3903 /*
3904 * No handler, we'll have to terminate things with extreme prejudice.
3905 */
3906@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3907 }
3908 #endif
3909
3910+#ifdef CONFIG_PAX_PAGEEXEC
3911+ if (fsr & FSR_LNX_PF) {
3912+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3913+ do_group_exit(SIGKILL);
3914+ }
3915+#endif
3916+
3917 tsk->thread.address = addr;
3918 tsk->thread.error_code = fsr;
3919 tsk->thread.trap_no = 14;
3920@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3921 }
3922 #endif /* CONFIG_MMU */
3923
3924+#ifdef CONFIG_PAX_PAGEEXEC
3925+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3926+{
3927+ long i;
3928+
3929+ printk(KERN_ERR "PAX: bytes at PC: ");
3930+ for (i = 0; i < 20; i++) {
3931+ unsigned char c;
3932+ if (get_user(c, (__force unsigned char __user *)pc+i))
3933+ printk(KERN_CONT "?? ");
3934+ else
3935+ printk(KERN_CONT "%02x ", c);
3936+ }
3937+ printk("\n");
3938+
3939+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3940+ for (i = -1; i < 20; i++) {
3941+ unsigned long c;
3942+ if (get_user(c, (__force unsigned long __user *)sp+i))
3943+ printk(KERN_CONT "???????? ");
3944+ else
3945+ printk(KERN_CONT "%08lx ", c);
3946+ }
3947+ printk("\n");
3948+}
3949+#endif
3950+
3951 /*
3952 * First Level Translation Fault Handler
3953 *
3954@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3955 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3956 struct siginfo info;
3957
3958+#ifdef CONFIG_PAX_MEMORY_UDEREF
3959+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3960+ if (current->signal->curr_ip)
3961+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3962+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3963+ else
3964+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3965+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3966+ goto die;
3967+ }
3968+#endif
3969+
3970 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3971 return;
3972
3973+die:
3974 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3975 inf->name, fsr, addr);
3976
3977@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
3978 ifsr_info[nr].name = name;
3979 }
3980
3981+asmlinkage int sys_sigreturn(struct pt_regs *regs);
3982+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
3983+
3984 asmlinkage void __exception
3985 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3986 {
3987 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3988 struct siginfo info;
3989+ unsigned long pc = instruction_pointer(regs);
3990+
3991+ if (user_mode(regs)) {
3992+ unsigned long sigpage = current->mm->context.sigpage;
3993+
3994+ if (sigpage <= pc && pc < sigpage + 7*4) {
3995+ if (pc < sigpage + 3*4)
3996+ sys_sigreturn(regs);
3997+ else
3998+ sys_rt_sigreturn(regs);
3999+ return;
4000+ }
4001+ if (pc == 0xffff0f60UL) {
4002+ /*
4003+ * PaX: __kuser_cmpxchg64 emulation
4004+ */
4005+ // TODO
4006+ //regs->ARM_pc = regs->ARM_lr;
4007+ //return;
4008+ }
4009+ if (pc == 0xffff0fa0UL) {
4010+ /*
4011+ * PaX: __kuser_memory_barrier emulation
4012+ */
4013+ // dmb(); implied by the exception
4014+ regs->ARM_pc = regs->ARM_lr;
4015+ return;
4016+ }
4017+ if (pc == 0xffff0fc0UL) {
4018+ /*
4019+ * PaX: __kuser_cmpxchg emulation
4020+ */
4021+ // TODO
4022+ //long new;
4023+ //int op;
4024+
4025+ //op = FUTEX_OP_SET << 28;
4026+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4027+ //regs->ARM_r0 = old != new;
4028+ //regs->ARM_pc = regs->ARM_lr;
4029+ //return;
4030+ }
4031+ if (pc == 0xffff0fe0UL) {
4032+ /*
4033+ * PaX: __kuser_get_tls emulation
4034+ */
4035+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4036+ regs->ARM_pc = regs->ARM_lr;
4037+ return;
4038+ }
4039+ }
4040+
4041+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4042+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4043+ if (current->signal->curr_ip)
4044+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4045+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4046+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4047+ else
4048+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4049+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4050+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4051+ goto die;
4052+ }
4053+#endif
4054+
4055+#ifdef CONFIG_PAX_REFCOUNT
4056+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4057+ unsigned int bkpt;
4058+
4059+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4060+ current->thread.error_code = ifsr;
4061+ current->thread.trap_no = 0;
4062+ pax_report_refcount_overflow(regs);
4063+ fixup_exception(regs);
4064+ return;
4065+ }
4066+ }
4067+#endif
4068
4069 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4070 return;
4071
4072+die:
4073 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4074 inf->name, ifsr, addr);
4075
4076diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4077index cf08bdf..772656c 100644
4078--- a/arch/arm/mm/fault.h
4079+++ b/arch/arm/mm/fault.h
4080@@ -3,6 +3,7 @@
4081
4082 /*
4083 * Fault status register encodings. We steal bit 31 for our own purposes.
4084+ * Set when the FSR value is from an instruction fault.
4085 */
4086 #define FSR_LNX_PF (1 << 31)
4087 #define FSR_WRITE (1 << 11)
4088@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4089 }
4090 #endif
4091
4092+/* valid for LPAE and !LPAE */
4093+static inline int is_xn_fault(unsigned int fsr)
4094+{
4095+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4096+}
4097+
4098+static inline int is_domain_fault(unsigned int fsr)
4099+{
4100+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4101+}
4102+
4103 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4104 unsigned long search_exception_table(unsigned long addr);
4105
4106diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4107index 3e8f106..a0a1fe4 100644
4108--- a/arch/arm/mm/init.c
4109+++ b/arch/arm/mm/init.c
4110@@ -30,6 +30,8 @@
4111 #include <asm/setup.h>
4112 #include <asm/tlb.h>
4113 #include <asm/fixmap.h>
4114+#include <asm/system_info.h>
4115+#include <asm/cp15.h>
4116
4117 #include <asm/mach/arch.h>
4118 #include <asm/mach/map.h>
4119@@ -681,7 +683,46 @@ void free_initmem(void)
4120 {
4121 #ifdef CONFIG_HAVE_TCM
4122 extern char __tcm_start, __tcm_end;
4123+#endif
4124
4125+#ifdef CONFIG_PAX_KERNEXEC
4126+ unsigned long addr;
4127+ pgd_t *pgd;
4128+ pud_t *pud;
4129+ pmd_t *pmd;
4130+ int cpu_arch = cpu_architecture();
4131+ unsigned int cr = get_cr();
4132+
4133+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4134+ /* make pages tables, etc before .text NX */
4135+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4136+ pgd = pgd_offset_k(addr);
4137+ pud = pud_offset(pgd, addr);
4138+ pmd = pmd_offset(pud, addr);
4139+ __section_update(pmd, addr, PMD_SECT_XN);
4140+ }
4141+ /* make init NX */
4142+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4143+ pgd = pgd_offset_k(addr);
4144+ pud = pud_offset(pgd, addr);
4145+ pmd = pmd_offset(pud, addr);
4146+ __section_update(pmd, addr, PMD_SECT_XN);
4147+ }
4148+ /* make kernel code/rodata RX */
4149+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4150+ pgd = pgd_offset_k(addr);
4151+ pud = pud_offset(pgd, addr);
4152+ pmd = pmd_offset(pud, addr);
4153+#ifdef CONFIG_ARM_LPAE
4154+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4155+#else
4156+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4157+#endif
4158+ }
4159+ }
4160+#endif
4161+
4162+#ifdef CONFIG_HAVE_TCM
4163 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4164 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4165 #endif
4166diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4167index f123d6e..04bf569 100644
4168--- a/arch/arm/mm/ioremap.c
4169+++ b/arch/arm/mm/ioremap.c
4170@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4171 unsigned int mtype;
4172
4173 if (cached)
4174- mtype = MT_MEMORY;
4175+ mtype = MT_MEMORY_RX;
4176 else
4177- mtype = MT_MEMORY_NONCACHED;
4178+ mtype = MT_MEMORY_NONCACHED_RX;
4179
4180 return __arm_ioremap_caller(phys_addr, size, mtype,
4181 __builtin_return_address(0));
4182diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4183index 5e85ed3..b10a7ed 100644
4184--- a/arch/arm/mm/mmap.c
4185+++ b/arch/arm/mm/mmap.c
4186@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4187 struct vm_area_struct *vma;
4188 int do_align = 0;
4189 int aliasing = cache_is_vipt_aliasing();
4190+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4191 struct vm_unmapped_area_info info;
4192
4193 /*
4194@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4195 if (len > TASK_SIZE)
4196 return -ENOMEM;
4197
4198+#ifdef CONFIG_PAX_RANDMMAP
4199+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4200+#endif
4201+
4202 if (addr) {
4203 if (do_align)
4204 addr = COLOUR_ALIGN(addr, pgoff);
4205@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4206 addr = PAGE_ALIGN(addr);
4207
4208 vma = find_vma(mm, addr);
4209- if (TASK_SIZE - len >= addr &&
4210- (!vma || addr + len <= vma->vm_start))
4211+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4212 return addr;
4213 }
4214
4215@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4216 info.high_limit = TASK_SIZE;
4217 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4218 info.align_offset = pgoff << PAGE_SHIFT;
4219+ info.threadstack_offset = offset;
4220 return vm_unmapped_area(&info);
4221 }
4222
4223@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4224 unsigned long addr = addr0;
4225 int do_align = 0;
4226 int aliasing = cache_is_vipt_aliasing();
4227+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4228 struct vm_unmapped_area_info info;
4229
4230 /*
4231@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4232 return addr;
4233 }
4234
4235+#ifdef CONFIG_PAX_RANDMMAP
4236+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4237+#endif
4238+
4239 /* requesting a specific address */
4240 if (addr) {
4241 if (do_align)
4242@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4243 else
4244 addr = PAGE_ALIGN(addr);
4245 vma = find_vma(mm, addr);
4246- if (TASK_SIZE - len >= addr &&
4247- (!vma || addr + len <= vma->vm_start))
4248+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4249 return addr;
4250 }
4251
4252@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4253 info.high_limit = mm->mmap_base;
4254 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4255 info.align_offset = pgoff << PAGE_SHIFT;
4256+ info.threadstack_offset = offset;
4257 addr = vm_unmapped_area(&info);
4258
4259 /*
4260@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4261 {
4262 unsigned long random_factor = 0UL;
4263
4264+#ifdef CONFIG_PAX_RANDMMAP
4265+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4266+#endif
4267+
4268 /* 8 bits of randomness in 20 address space bits */
4269 if ((current->flags & PF_RANDOMIZE) &&
4270 !(current->personality & ADDR_NO_RANDOMIZE))
4271@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4272
4273 if (mmap_is_legacy()) {
4274 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4275+
4276+#ifdef CONFIG_PAX_RANDMMAP
4277+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4278+ mm->mmap_base += mm->delta_mmap;
4279+#endif
4280+
4281 mm->get_unmapped_area = arch_get_unmapped_area;
4282 } else {
4283 mm->mmap_base = mmap_base(random_factor);
4284+
4285+#ifdef CONFIG_PAX_RANDMMAP
4286+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4287+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4288+#endif
4289+
4290 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4291 }
4292 }
4293diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4294index 580ef2d..2da06ca 100644
4295--- a/arch/arm/mm/mmu.c
4296+++ b/arch/arm/mm/mmu.c
4297@@ -38,6 +38,22 @@
4298 #include "mm.h"
4299 #include "tcm.h"
4300
4301+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4302+void modify_domain(unsigned int dom, unsigned int type)
4303+{
4304+ struct thread_info *thread = current_thread_info();
4305+ unsigned int domain = thread->cpu_domain;
4306+ /*
4307+ * DOMAIN_MANAGER might be defined to some other value,
4308+ * use the arch-defined constant
4309+ */
4310+ domain &= ~domain_val(dom, 3);
4311+ thread->cpu_domain = domain | domain_val(dom, type);
4312+ set_domain(thread->cpu_domain);
4313+}
4314+EXPORT_SYMBOL(modify_domain);
4315+#endif
4316+
4317 /*
4318 * empty_zero_page is a special page that is used for
4319 * zero-initialized data and COW.
4320@@ -230,10 +246,18 @@ __setup("noalign", noalign_setup);
4321
4322 #endif /* ifdef CONFIG_CPU_CP15 / else */
4323
4324-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4325+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4326 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4327
4328-static struct mem_type mem_types[] = {
4329+#ifdef CONFIG_PAX_KERNEXEC
4330+#define L_PTE_KERNEXEC L_PTE_RDONLY
4331+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4332+#else
4333+#define L_PTE_KERNEXEC L_PTE_DIRTY
4334+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4335+#endif
4336+
4337+static struct mem_type mem_types[] __read_only = {
4338 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4339 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4340 L_PTE_SHARED,
4341@@ -262,16 +286,16 @@ static struct mem_type mem_types[] = {
4342 [MT_UNCACHED] = {
4343 .prot_pte = PROT_PTE_DEVICE,
4344 .prot_l1 = PMD_TYPE_TABLE,
4345- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4346+ .prot_sect = PROT_SECT_DEVICE,
4347 .domain = DOMAIN_IO,
4348 },
4349 [MT_CACHECLEAN] = {
4350- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4351+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4352 .domain = DOMAIN_KERNEL,
4353 },
4354 #ifndef CONFIG_ARM_LPAE
4355 [MT_MINICLEAN] = {
4356- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4357+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4358 .domain = DOMAIN_KERNEL,
4359 },
4360 #endif
4361@@ -279,36 +303,54 @@ static struct mem_type mem_types[] = {
4362 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4363 L_PTE_RDONLY,
4364 .prot_l1 = PMD_TYPE_TABLE,
4365- .domain = DOMAIN_USER,
4366+ .domain = DOMAIN_VECTORS,
4367 },
4368 [MT_HIGH_VECTORS] = {
4369 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4370 L_PTE_USER | L_PTE_RDONLY,
4371 .prot_l1 = PMD_TYPE_TABLE,
4372- .domain = DOMAIN_USER,
4373+ .domain = DOMAIN_VECTORS,
4374 },
4375- [MT_MEMORY] = {
4376+ [MT_MEMORY_RWX] = {
4377 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4378 .prot_l1 = PMD_TYPE_TABLE,
4379 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4380 .domain = DOMAIN_KERNEL,
4381 },
4382+ [MT_MEMORY_RW] = {
4383+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4384+ .prot_l1 = PMD_TYPE_TABLE,
4385+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4386+ .domain = DOMAIN_KERNEL,
4387+ },
4388+ [MT_MEMORY_RX] = {
4389+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4390+ .prot_l1 = PMD_TYPE_TABLE,
4391+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4392+ .domain = DOMAIN_KERNEL,
4393+ },
4394 [MT_ROM] = {
4395- .prot_sect = PMD_TYPE_SECT,
4396+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4397 .domain = DOMAIN_KERNEL,
4398 },
4399- [MT_MEMORY_NONCACHED] = {
4400+ [MT_MEMORY_NONCACHED_RW] = {
4401 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4402 L_PTE_MT_BUFFERABLE,
4403 .prot_l1 = PMD_TYPE_TABLE,
4404 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4405 .domain = DOMAIN_KERNEL,
4406 },
4407+ [MT_MEMORY_NONCACHED_RX] = {
4408+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4409+ L_PTE_MT_BUFFERABLE,
4410+ .prot_l1 = PMD_TYPE_TABLE,
4411+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4412+ .domain = DOMAIN_KERNEL,
4413+ },
4414 [MT_MEMORY_DTCM] = {
4415- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4416- L_PTE_XN,
4417+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4418 .prot_l1 = PMD_TYPE_TABLE,
4419- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4420+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4421 .domain = DOMAIN_KERNEL,
4422 },
4423 [MT_MEMORY_ITCM] = {
4424@@ -318,10 +360,10 @@ static struct mem_type mem_types[] = {
4425 },
4426 [MT_MEMORY_SO] = {
4427 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4428- L_PTE_MT_UNCACHED | L_PTE_XN,
4429+ L_PTE_MT_UNCACHED,
4430 .prot_l1 = PMD_TYPE_TABLE,
4431 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4432- PMD_SECT_UNCACHED | PMD_SECT_XN,
4433+ PMD_SECT_UNCACHED,
4434 .domain = DOMAIN_KERNEL,
4435 },
4436 [MT_MEMORY_DMA_READY] = {
4437@@ -407,9 +449,35 @@ static void __init build_mem_type_table(void)
4438 * to prevent speculative instruction fetches.
4439 */
4440 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4441+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4442 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4443+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4444 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4445+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4446 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4447+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4448+
4449+ /* Mark other regions on ARMv6+ as execute-never */
4450+
4451+#ifdef CONFIG_PAX_KERNEXEC
4452+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4453+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4454+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4455+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4456+#ifndef CONFIG_ARM_LPAE
4457+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4458+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4459+#endif
4460+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4461+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4462+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4463+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4464+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4465+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4466+#endif
4467+
4468+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4469+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4470 }
4471 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4472 /*
4473@@ -470,6 +538,9 @@ static void __init build_mem_type_table(void)
4474 * from SVC mode and no access from userspace.
4475 */
4476 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4477+#ifdef CONFIG_PAX_KERNEXEC
4478+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4479+#endif
4480 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4481 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4482 #endif
4483@@ -487,11 +558,17 @@ static void __init build_mem_type_table(void)
4484 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4485 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4486 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4487- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4488- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4489+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4490+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4491+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4492+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4493+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4494+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4495 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4496- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4497- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4498+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4499+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4500+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4501+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4502 }
4503 }
4504
4505@@ -502,15 +579,20 @@ static void __init build_mem_type_table(void)
4506 if (cpu_arch >= CPU_ARCH_ARMv6) {
4507 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4508 /* Non-cacheable Normal is XCB = 001 */
4509- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4510+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4511+ PMD_SECT_BUFFERED;
4512+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4513 PMD_SECT_BUFFERED;
4514 } else {
4515 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4516- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4517+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4518+ PMD_SECT_TEX(1);
4519+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4520 PMD_SECT_TEX(1);
4521 }
4522 } else {
4523- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4524+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4525+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4526 }
4527
4528 #ifdef CONFIG_ARM_LPAE
4529@@ -526,6 +608,8 @@ static void __init build_mem_type_table(void)
4530 vecs_pgprot |= PTE_EXT_AF;
4531 #endif
4532
4533+ user_pgprot |= __supported_pte_mask;
4534+
4535 for (i = 0; i < 16; i++) {
4536 pteval_t v = pgprot_val(protection_map[i]);
4537 protection_map[i] = __pgprot(v | user_pgprot);
4538@@ -543,10 +627,15 @@ static void __init build_mem_type_table(void)
4539
4540 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4541 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4542- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4543- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4544+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4545+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4546+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4547+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4548+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4549+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4550 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4551- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4552+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4553+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4554 mem_types[MT_ROM].prot_sect |= cp->pmd;
4555
4556 switch (cp->pmd) {
4557@@ -1188,18 +1277,15 @@ void __init arm_mm_memblock_reserve(void)
4558 * called function. This means you can't use any function or debugging
4559 * method which may touch any device, otherwise the kernel _will_ crash.
4560 */
4561+
4562+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4563+
4564 static void __init devicemaps_init(const struct machine_desc *mdesc)
4565 {
4566 struct map_desc map;
4567 unsigned long addr;
4568- void *vectors;
4569
4570- /*
4571- * Allocate the vector page early.
4572- */
4573- vectors = early_alloc(PAGE_SIZE * 2);
4574-
4575- early_trap_init(vectors);
4576+ early_trap_init(&vectors);
4577
4578 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4579 pmd_clear(pmd_off_k(addr));
4580@@ -1239,7 +1325,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4581 * location (0xffff0000). If we aren't using high-vectors, also
4582 * create a mapping at the low-vectors virtual address.
4583 */
4584- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4585+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4586 map.virtual = 0xffff0000;
4587 map.length = PAGE_SIZE;
4588 #ifdef CONFIG_KUSER_HELPERS
4589@@ -1311,8 +1397,39 @@ static void __init map_lowmem(void)
4590 map.pfn = __phys_to_pfn(start);
4591 map.virtual = __phys_to_virt(start);
4592 map.length = end - start;
4593- map.type = MT_MEMORY;
4594
4595+#ifdef CONFIG_PAX_KERNEXEC
4596+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4597+ struct map_desc kernel;
4598+ struct map_desc initmap;
4599+
4600+ /* when freeing initmem we will make this RW */
4601+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4602+ initmap.virtual = (unsigned long)__init_begin;
4603+ initmap.length = _sdata - __init_begin;
4604+ initmap.type = MT_MEMORY_RWX;
4605+ create_mapping(&initmap);
4606+
4607+ /* when freeing initmem we will make this RX */
4608+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4609+ kernel.virtual = (unsigned long)_stext;
4610+ kernel.length = __init_begin - _stext;
4611+ kernel.type = MT_MEMORY_RWX;
4612+ create_mapping(&kernel);
4613+
4614+ if (map.virtual < (unsigned long)_stext) {
4615+ map.length = (unsigned long)_stext - map.virtual;
4616+ map.type = MT_MEMORY_RWX;
4617+ create_mapping(&map);
4618+ }
4619+
4620+ map.pfn = __phys_to_pfn(__pa(_sdata));
4621+ map.virtual = (unsigned long)_sdata;
4622+ map.length = end - __pa(_sdata);
4623+ }
4624+#endif
4625+
4626+ map.type = MT_MEMORY_RW;
4627 create_mapping(&map);
4628 }
4629 }
4630diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4631index a5bc92d..0bb4730 100644
4632--- a/arch/arm/plat-omap/sram.c
4633+++ b/arch/arm/plat-omap/sram.c
4634@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4635 * Looks like we need to preserve some bootloader code at the
4636 * beginning of SRAM for jumping to flash for reboot to work...
4637 */
4638+ pax_open_kernel();
4639 memset_io(omap_sram_base + omap_sram_skip, 0,
4640 omap_sram_size - omap_sram_skip);
4641+ pax_close_kernel();
4642 }
4643diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4644index ce6d763..cfea917 100644
4645--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4646+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4647@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4648 int (*started)(unsigned ch);
4649 int (*flush)(unsigned ch);
4650 int (*stop)(unsigned ch);
4651-};
4652+} __no_const;
4653
4654 extern void *samsung_dmadev_get_ops(void);
4655 extern void *s3c_dma_get_ops(void);
4656diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4657index c3a58a1..78fbf54 100644
4658--- a/arch/avr32/include/asm/cache.h
4659+++ b/arch/avr32/include/asm/cache.h
4660@@ -1,8 +1,10 @@
4661 #ifndef __ASM_AVR32_CACHE_H
4662 #define __ASM_AVR32_CACHE_H
4663
4664+#include <linux/const.h>
4665+
4666 #define L1_CACHE_SHIFT 5
4667-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4668+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4669
4670 /*
4671 * Memory returned by kmalloc() may be used for DMA, so we must make
4672diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4673index d232888..87c8df1 100644
4674--- a/arch/avr32/include/asm/elf.h
4675+++ b/arch/avr32/include/asm/elf.h
4676@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4677 the loader. We need to make sure that it is out of the way of the program
4678 that it will "exec", and that there is sufficient room for the brk. */
4679
4680-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4681+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4682
4683+#ifdef CONFIG_PAX_ASLR
4684+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4685+
4686+#define PAX_DELTA_MMAP_LEN 15
4687+#define PAX_DELTA_STACK_LEN 15
4688+#endif
4689
4690 /* This yields a mask that user programs can use to figure out what
4691 instruction set this CPU supports. This could be done in user space,
4692diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4693index 479330b..53717a8 100644
4694--- a/arch/avr32/include/asm/kmap_types.h
4695+++ b/arch/avr32/include/asm/kmap_types.h
4696@@ -2,9 +2,9 @@
4697 #define __ASM_AVR32_KMAP_TYPES_H
4698
4699 #ifdef CONFIG_DEBUG_HIGHMEM
4700-# define KM_TYPE_NR 29
4701+# define KM_TYPE_NR 30
4702 #else
4703-# define KM_TYPE_NR 14
4704+# define KM_TYPE_NR 15
4705 #endif
4706
4707 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4708diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4709index 0eca933..eb78c7b 100644
4710--- a/arch/avr32/mm/fault.c
4711+++ b/arch/avr32/mm/fault.c
4712@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4713
4714 int exception_trace = 1;
4715
4716+#ifdef CONFIG_PAX_PAGEEXEC
4717+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4718+{
4719+ unsigned long i;
4720+
4721+ printk(KERN_ERR "PAX: bytes at PC: ");
4722+ for (i = 0; i < 20; i++) {
4723+ unsigned char c;
4724+ if (get_user(c, (unsigned char *)pc+i))
4725+ printk(KERN_CONT "???????? ");
4726+ else
4727+ printk(KERN_CONT "%02x ", c);
4728+ }
4729+ printk("\n");
4730+}
4731+#endif
4732+
4733 /*
4734 * This routine handles page faults. It determines the address and the
4735 * problem, and then passes it off to one of the appropriate routines.
4736@@ -176,6 +193,16 @@ bad_area:
4737 up_read(&mm->mmap_sem);
4738
4739 if (user_mode(regs)) {
4740+
4741+#ifdef CONFIG_PAX_PAGEEXEC
4742+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4743+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4744+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4745+ do_group_exit(SIGKILL);
4746+ }
4747+ }
4748+#endif
4749+
4750 if (exception_trace && printk_ratelimit())
4751 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4752 "sp %08lx ecr %lu\n",
4753diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4754index 568885a..f8008df 100644
4755--- a/arch/blackfin/include/asm/cache.h
4756+++ b/arch/blackfin/include/asm/cache.h
4757@@ -7,6 +7,7 @@
4758 #ifndef __ARCH_BLACKFIN_CACHE_H
4759 #define __ARCH_BLACKFIN_CACHE_H
4760
4761+#include <linux/const.h>
4762 #include <linux/linkage.h> /* for asmlinkage */
4763
4764 /*
4765@@ -14,7 +15,7 @@
4766 * Blackfin loads 32 bytes for cache
4767 */
4768 #define L1_CACHE_SHIFT 5
4769-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4770+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4771 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4772
4773 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4774diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4775index aea2718..3639a60 100644
4776--- a/arch/cris/include/arch-v10/arch/cache.h
4777+++ b/arch/cris/include/arch-v10/arch/cache.h
4778@@ -1,8 +1,9 @@
4779 #ifndef _ASM_ARCH_CACHE_H
4780 #define _ASM_ARCH_CACHE_H
4781
4782+#include <linux/const.h>
4783 /* Etrax 100LX have 32-byte cache-lines. */
4784-#define L1_CACHE_BYTES 32
4785 #define L1_CACHE_SHIFT 5
4786+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4787
4788 #endif /* _ASM_ARCH_CACHE_H */
4789diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4790index 7caf25d..ee65ac5 100644
4791--- a/arch/cris/include/arch-v32/arch/cache.h
4792+++ b/arch/cris/include/arch-v32/arch/cache.h
4793@@ -1,11 +1,12 @@
4794 #ifndef _ASM_CRIS_ARCH_CACHE_H
4795 #define _ASM_CRIS_ARCH_CACHE_H
4796
4797+#include <linux/const.h>
4798 #include <arch/hwregs/dma.h>
4799
4800 /* A cache-line is 32 bytes. */
4801-#define L1_CACHE_BYTES 32
4802 #define L1_CACHE_SHIFT 5
4803+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4804
4805 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4806
4807diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4808index b86329d..6709906 100644
4809--- a/arch/frv/include/asm/atomic.h
4810+++ b/arch/frv/include/asm/atomic.h
4811@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4812 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4813 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4814
4815+#define atomic64_read_unchecked(v) atomic64_read(v)
4816+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4817+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4818+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4819+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4820+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4821+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4822+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4823+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4824+
4825 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4826 {
4827 int c, old;
4828diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4829index 2797163..c2a401d 100644
4830--- a/arch/frv/include/asm/cache.h
4831+++ b/arch/frv/include/asm/cache.h
4832@@ -12,10 +12,11 @@
4833 #ifndef __ASM_CACHE_H
4834 #define __ASM_CACHE_H
4835
4836+#include <linux/const.h>
4837
4838 /* bytes per L1 cache line */
4839 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4840-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4841+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4842
4843 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4844 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4845diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4846index 43901f2..0d8b865 100644
4847--- a/arch/frv/include/asm/kmap_types.h
4848+++ b/arch/frv/include/asm/kmap_types.h
4849@@ -2,6 +2,6 @@
4850 #ifndef _ASM_KMAP_TYPES_H
4851 #define _ASM_KMAP_TYPES_H
4852
4853-#define KM_TYPE_NR 17
4854+#define KM_TYPE_NR 18
4855
4856 #endif
4857diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4858index 836f147..4cf23f5 100644
4859--- a/arch/frv/mm/elf-fdpic.c
4860+++ b/arch/frv/mm/elf-fdpic.c
4861@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4862 {
4863 struct vm_area_struct *vma;
4864 struct vm_unmapped_area_info info;
4865+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4866
4867 if (len > TASK_SIZE)
4868 return -ENOMEM;
4869@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4870 if (addr) {
4871 addr = PAGE_ALIGN(addr);
4872 vma = find_vma(current->mm, addr);
4873- if (TASK_SIZE - len >= addr &&
4874- (!vma || addr + len <= vma->vm_start))
4875+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4876 goto success;
4877 }
4878
4879@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4880 info.high_limit = (current->mm->start_stack - 0x00200000);
4881 info.align_mask = 0;
4882 info.align_offset = 0;
4883+ info.threadstack_offset = offset;
4884 addr = vm_unmapped_area(&info);
4885 if (!(addr & ~PAGE_MASK))
4886 goto success;
4887diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4888index f4ca594..adc72fd6 100644
4889--- a/arch/hexagon/include/asm/cache.h
4890+++ b/arch/hexagon/include/asm/cache.h
4891@@ -21,9 +21,11 @@
4892 #ifndef __ASM_CACHE_H
4893 #define __ASM_CACHE_H
4894
4895+#include <linux/const.h>
4896+
4897 /* Bytes per L1 cache line */
4898-#define L1_CACHE_SHIFT (5)
4899-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4900+#define L1_CACHE_SHIFT 5
4901+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4902
4903 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4904 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4905diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
4906index 4e4119b..dd7de0a 100644
4907--- a/arch/ia64/Kconfig
4908+++ b/arch/ia64/Kconfig
4909@@ -554,6 +554,7 @@ source "drivers/sn/Kconfig"
4910 config KEXEC
4911 bool "kexec system call"
4912 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
4913+ depends on !GRKERNSEC_KMEM
4914 help
4915 kexec is a system call that implements the ability to shutdown your
4916 current kernel, and to start another kernel. It is like a reboot
4917diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4918index 6e6fe18..a6ae668 100644
4919--- a/arch/ia64/include/asm/atomic.h
4920+++ b/arch/ia64/include/asm/atomic.h
4921@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4922 #define atomic64_inc(v) atomic64_add(1, (v))
4923 #define atomic64_dec(v) atomic64_sub(1, (v))
4924
4925+#define atomic64_read_unchecked(v) atomic64_read(v)
4926+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4927+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4928+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4929+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4930+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4931+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4932+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4933+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4934+
4935 /* Atomic operations are already serializing */
4936 #define smp_mb__before_atomic_dec() barrier()
4937 #define smp_mb__after_atomic_dec() barrier()
4938diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4939index 988254a..e1ee885 100644
4940--- a/arch/ia64/include/asm/cache.h
4941+++ b/arch/ia64/include/asm/cache.h
4942@@ -1,6 +1,7 @@
4943 #ifndef _ASM_IA64_CACHE_H
4944 #define _ASM_IA64_CACHE_H
4945
4946+#include <linux/const.h>
4947
4948 /*
4949 * Copyright (C) 1998-2000 Hewlett-Packard Co
4950@@ -9,7 +10,7 @@
4951
4952 /* Bytes per L1 (data) cache line. */
4953 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4954-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4955+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4956
4957 #ifdef CONFIG_SMP
4958 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4959diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4960index 5a83c5c..4d7f553 100644
4961--- a/arch/ia64/include/asm/elf.h
4962+++ b/arch/ia64/include/asm/elf.h
4963@@ -42,6 +42,13 @@
4964 */
4965 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4966
4967+#ifdef CONFIG_PAX_ASLR
4968+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4969+
4970+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4971+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4972+#endif
4973+
4974 #define PT_IA_64_UNWIND 0x70000001
4975
4976 /* IA-64 relocations: */
4977diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4978index 5767cdf..7462574 100644
4979--- a/arch/ia64/include/asm/pgalloc.h
4980+++ b/arch/ia64/include/asm/pgalloc.h
4981@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4982 pgd_val(*pgd_entry) = __pa(pud);
4983 }
4984
4985+static inline void
4986+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4987+{
4988+ pgd_populate(mm, pgd_entry, pud);
4989+}
4990+
4991 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4992 {
4993 return quicklist_alloc(0, GFP_KERNEL, NULL);
4994@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4995 pud_val(*pud_entry) = __pa(pmd);
4996 }
4997
4998+static inline void
4999+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5000+{
5001+ pud_populate(mm, pud_entry, pmd);
5002+}
5003+
5004 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5005 {
5006 return quicklist_alloc(0, GFP_KERNEL, NULL);
5007diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5008index 7935115..c0eca6a 100644
5009--- a/arch/ia64/include/asm/pgtable.h
5010+++ b/arch/ia64/include/asm/pgtable.h
5011@@ -12,7 +12,7 @@
5012 * David Mosberger-Tang <davidm@hpl.hp.com>
5013 */
5014
5015-
5016+#include <linux/const.h>
5017 #include <asm/mman.h>
5018 #include <asm/page.h>
5019 #include <asm/processor.h>
5020@@ -142,6 +142,17 @@
5021 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5022 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5023 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5024+
5025+#ifdef CONFIG_PAX_PAGEEXEC
5026+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5027+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5028+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5029+#else
5030+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5031+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5032+# define PAGE_COPY_NOEXEC PAGE_COPY
5033+#endif
5034+
5035 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5036 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5037 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5038diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5039index 45698cd..e8e2dbc 100644
5040--- a/arch/ia64/include/asm/spinlock.h
5041+++ b/arch/ia64/include/asm/spinlock.h
5042@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5043 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5044
5045 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5046- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5047+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5048 }
5049
5050 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5051diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5052index 449c8c0..18965fb 100644
5053--- a/arch/ia64/include/asm/uaccess.h
5054+++ b/arch/ia64/include/asm/uaccess.h
5055@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5056 static inline unsigned long
5057 __copy_to_user (void __user *to, const void *from, unsigned long count)
5058 {
5059+ if (count > INT_MAX)
5060+ return count;
5061+
5062+ if (!__builtin_constant_p(count))
5063+ check_object_size(from, count, true);
5064+
5065 return __copy_user(to, (__force void __user *) from, count);
5066 }
5067
5068 static inline unsigned long
5069 __copy_from_user (void *to, const void __user *from, unsigned long count)
5070 {
5071+ if (count > INT_MAX)
5072+ return count;
5073+
5074+ if (!__builtin_constant_p(count))
5075+ check_object_size(to, count, false);
5076+
5077 return __copy_user((__force void __user *) to, from, count);
5078 }
5079
5080@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5081 ({ \
5082 void __user *__cu_to = (to); \
5083 const void *__cu_from = (from); \
5084- long __cu_len = (n); \
5085+ unsigned long __cu_len = (n); \
5086 \
5087- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5088+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5089+ if (!__builtin_constant_p(n)) \
5090+ check_object_size(__cu_from, __cu_len, true); \
5091 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5092+ } \
5093 __cu_len; \
5094 })
5095
5096@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5097 ({ \
5098 void *__cu_to = (to); \
5099 const void __user *__cu_from = (from); \
5100- long __cu_len = (n); \
5101+ unsigned long __cu_len = (n); \
5102 \
5103 __chk_user_ptr(__cu_from); \
5104- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5105+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5106+ if (!__builtin_constant_p(n)) \
5107+ check_object_size(__cu_to, __cu_len, false); \
5108 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5109+ } \
5110 __cu_len; \
5111 })
5112
5113diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5114index 24603be..948052d 100644
5115--- a/arch/ia64/kernel/module.c
5116+++ b/arch/ia64/kernel/module.c
5117@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5118 void
5119 module_free (struct module *mod, void *module_region)
5120 {
5121- if (mod && mod->arch.init_unw_table &&
5122- module_region == mod->module_init) {
5123+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5124 unw_remove_unwind_table(mod->arch.init_unw_table);
5125 mod->arch.init_unw_table = NULL;
5126 }
5127@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5128 }
5129
5130 static inline int
5131+in_init_rx (const struct module *mod, uint64_t addr)
5132+{
5133+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5134+}
5135+
5136+static inline int
5137+in_init_rw (const struct module *mod, uint64_t addr)
5138+{
5139+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5140+}
5141+
5142+static inline int
5143 in_init (const struct module *mod, uint64_t addr)
5144 {
5145- return addr - (uint64_t) mod->module_init < mod->init_size;
5146+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5147+}
5148+
5149+static inline int
5150+in_core_rx (const struct module *mod, uint64_t addr)
5151+{
5152+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5153+}
5154+
5155+static inline int
5156+in_core_rw (const struct module *mod, uint64_t addr)
5157+{
5158+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5159 }
5160
5161 static inline int
5162 in_core (const struct module *mod, uint64_t addr)
5163 {
5164- return addr - (uint64_t) mod->module_core < mod->core_size;
5165+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5166 }
5167
5168 static inline int
5169@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5170 break;
5171
5172 case RV_BDREL:
5173- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5174+ if (in_init_rx(mod, val))
5175+ val -= (uint64_t) mod->module_init_rx;
5176+ else if (in_init_rw(mod, val))
5177+ val -= (uint64_t) mod->module_init_rw;
5178+ else if (in_core_rx(mod, val))
5179+ val -= (uint64_t) mod->module_core_rx;
5180+ else if (in_core_rw(mod, val))
5181+ val -= (uint64_t) mod->module_core_rw;
5182 break;
5183
5184 case RV_LTV:
5185@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5186 * addresses have been selected...
5187 */
5188 uint64_t gp;
5189- if (mod->core_size > MAX_LTOFF)
5190+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5191 /*
5192 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5193 * at the end of the module.
5194 */
5195- gp = mod->core_size - MAX_LTOFF / 2;
5196+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5197 else
5198- gp = mod->core_size / 2;
5199- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5200+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5201+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5202 mod->arch.gp = gp;
5203 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5204 }
5205diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5206index ab33328..f39506c 100644
5207--- a/arch/ia64/kernel/palinfo.c
5208+++ b/arch/ia64/kernel/palinfo.c
5209@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5210 return NOTIFY_OK;
5211 }
5212
5213-static struct notifier_block __refdata palinfo_cpu_notifier =
5214+static struct notifier_block palinfo_cpu_notifier =
5215 {
5216 .notifier_call = palinfo_cpu_callback,
5217 .priority = 0,
5218diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5219index 41e33f8..65180b2a 100644
5220--- a/arch/ia64/kernel/sys_ia64.c
5221+++ b/arch/ia64/kernel/sys_ia64.c
5222@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5223 unsigned long align_mask = 0;
5224 struct mm_struct *mm = current->mm;
5225 struct vm_unmapped_area_info info;
5226+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5227
5228 if (len > RGN_MAP_LIMIT)
5229 return -ENOMEM;
5230@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5231 if (REGION_NUMBER(addr) == RGN_HPAGE)
5232 addr = 0;
5233 #endif
5234+
5235+#ifdef CONFIG_PAX_RANDMMAP
5236+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5237+ addr = mm->free_area_cache;
5238+ else
5239+#endif
5240+
5241 if (!addr)
5242 addr = TASK_UNMAPPED_BASE;
5243
5244@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5245 info.high_limit = TASK_SIZE;
5246 info.align_mask = align_mask;
5247 info.align_offset = 0;
5248+ info.threadstack_offset = offset;
5249 return vm_unmapped_area(&info);
5250 }
5251
5252diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5253index 0ccb28f..8992469 100644
5254--- a/arch/ia64/kernel/vmlinux.lds.S
5255+++ b/arch/ia64/kernel/vmlinux.lds.S
5256@@ -198,7 +198,7 @@ SECTIONS {
5257 /* Per-cpu data: */
5258 . = ALIGN(PERCPU_PAGE_SIZE);
5259 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5260- __phys_per_cpu_start = __per_cpu_load;
5261+ __phys_per_cpu_start = per_cpu_load;
5262 /*
5263 * ensure percpu data fits
5264 * into percpu page size
5265diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5266index 7225dad..2a7c8256 100644
5267--- a/arch/ia64/mm/fault.c
5268+++ b/arch/ia64/mm/fault.c
5269@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5270 return pte_present(pte);
5271 }
5272
5273+#ifdef CONFIG_PAX_PAGEEXEC
5274+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5275+{
5276+ unsigned long i;
5277+
5278+ printk(KERN_ERR "PAX: bytes at PC: ");
5279+ for (i = 0; i < 8; i++) {
5280+ unsigned int c;
5281+ if (get_user(c, (unsigned int *)pc+i))
5282+ printk(KERN_CONT "???????? ");
5283+ else
5284+ printk(KERN_CONT "%08x ", c);
5285+ }
5286+ printk("\n");
5287+}
5288+#endif
5289+
5290 # define VM_READ_BIT 0
5291 # define VM_WRITE_BIT 1
5292 # define VM_EXEC_BIT 2
5293@@ -151,8 +168,21 @@ retry:
5294 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5295 goto bad_area;
5296
5297- if ((vma->vm_flags & mask) != mask)
5298+ if ((vma->vm_flags & mask) != mask) {
5299+
5300+#ifdef CONFIG_PAX_PAGEEXEC
5301+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5302+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5303+ goto bad_area;
5304+
5305+ up_read(&mm->mmap_sem);
5306+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5307+ do_group_exit(SIGKILL);
5308+ }
5309+#endif
5310+
5311 goto bad_area;
5312+ }
5313
5314 /*
5315 * If for any reason at all we couldn't handle the fault, make
5316diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5317index 68232db..6ca80af 100644
5318--- a/arch/ia64/mm/hugetlbpage.c
5319+++ b/arch/ia64/mm/hugetlbpage.c
5320@@ -154,6 +154,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5321 unsigned long pgoff, unsigned long flags)
5322 {
5323 struct vm_unmapped_area_info info;
5324+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5325
5326 if (len > RGN_MAP_LIMIT)
5327 return -ENOMEM;
5328@@ -177,6 +178,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5329 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5330 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5331 info.align_offset = 0;
5332+ info.threadstack_offset = offset;
5333 return vm_unmapped_area(&info);
5334 }
5335
5336diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5337index 88504ab..cbb6c9f 100644
5338--- a/arch/ia64/mm/init.c
5339+++ b/arch/ia64/mm/init.c
5340@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5341 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5342 vma->vm_end = vma->vm_start + PAGE_SIZE;
5343 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5344+
5345+#ifdef CONFIG_PAX_PAGEEXEC
5346+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5347+ vma->vm_flags &= ~VM_EXEC;
5348+
5349+#ifdef CONFIG_PAX_MPROTECT
5350+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5351+ vma->vm_flags &= ~VM_MAYEXEC;
5352+#endif
5353+
5354+ }
5355+#endif
5356+
5357 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5358 down_write(&current->mm->mmap_sem);
5359 if (insert_vm_struct(current->mm, vma)) {
5360diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5361index 40b3ee9..8c2c112 100644
5362--- a/arch/m32r/include/asm/cache.h
5363+++ b/arch/m32r/include/asm/cache.h
5364@@ -1,8 +1,10 @@
5365 #ifndef _ASM_M32R_CACHE_H
5366 #define _ASM_M32R_CACHE_H
5367
5368+#include <linux/const.h>
5369+
5370 /* L1 cache line size */
5371 #define L1_CACHE_SHIFT 4
5372-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5373+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5374
5375 #endif /* _ASM_M32R_CACHE_H */
5376diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5377index 82abd15..d95ae5d 100644
5378--- a/arch/m32r/lib/usercopy.c
5379+++ b/arch/m32r/lib/usercopy.c
5380@@ -14,6 +14,9 @@
5381 unsigned long
5382 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5383 {
5384+ if ((long)n < 0)
5385+ return n;
5386+
5387 prefetch(from);
5388 if (access_ok(VERIFY_WRITE, to, n))
5389 __copy_user(to,from,n);
5390@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5391 unsigned long
5392 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5393 {
5394+ if ((long)n < 0)
5395+ return n;
5396+
5397 prefetchw(to);
5398 if (access_ok(VERIFY_READ, from, n))
5399 __copy_user_zeroing(to,from,n);
5400diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5401index 0395c51..5f26031 100644
5402--- a/arch/m68k/include/asm/cache.h
5403+++ b/arch/m68k/include/asm/cache.h
5404@@ -4,9 +4,11 @@
5405 #ifndef __ARCH_M68K_CACHE_H
5406 #define __ARCH_M68K_CACHE_H
5407
5408+#include <linux/const.h>
5409+
5410 /* bytes per L1 cache line */
5411 #define L1_CACHE_SHIFT 4
5412-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5413+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5414
5415 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5416
5417diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5418index 0424315..defcca9 100644
5419--- a/arch/metag/mm/hugetlbpage.c
5420+++ b/arch/metag/mm/hugetlbpage.c
5421@@ -205,6 +205,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5422 info.high_limit = TASK_SIZE;
5423 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5424 info.align_offset = 0;
5425+ info.threadstack_offset = 0;
5426 return vm_unmapped_area(&info);
5427 }
5428
5429diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5430index 4efe96a..60e8699 100644
5431--- a/arch/microblaze/include/asm/cache.h
5432+++ b/arch/microblaze/include/asm/cache.h
5433@@ -13,11 +13,12 @@
5434 #ifndef _ASM_MICROBLAZE_CACHE_H
5435 #define _ASM_MICROBLAZE_CACHE_H
5436
5437+#include <linux/const.h>
5438 #include <asm/registers.h>
5439
5440 #define L1_CACHE_SHIFT 5
5441 /* word-granular cache in microblaze */
5442-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5443+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5444
5445 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5446
5447diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5448index 650de39..6982b02 100644
5449--- a/arch/mips/Kconfig
5450+++ b/arch/mips/Kconfig
5451@@ -2268,6 +2268,7 @@ source "kernel/Kconfig.preempt"
5452
5453 config KEXEC
5454 bool "Kexec system call"
5455+ depends on !GRKERNSEC_KMEM
5456 help
5457 kexec is a system call that implements the ability to shutdown your
5458 current kernel, and to start another kernel. It is like a reboot
5459diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5460index 7eed2f2..c4e385d 100644
5461--- a/arch/mips/include/asm/atomic.h
5462+++ b/arch/mips/include/asm/atomic.h
5463@@ -21,15 +21,39 @@
5464 #include <asm/cmpxchg.h>
5465 #include <asm/war.h>
5466
5467+#ifdef CONFIG_GENERIC_ATOMIC64
5468+#include <asm-generic/atomic64.h>
5469+#endif
5470+
5471 #define ATOMIC_INIT(i) { (i) }
5472
5473+#ifdef CONFIG_64BIT
5474+#define _ASM_EXTABLE(from, to) \
5475+" .section __ex_table,\"a\"\n" \
5476+" .dword " #from ", " #to"\n" \
5477+" .previous\n"
5478+#else
5479+#define _ASM_EXTABLE(from, to) \
5480+" .section __ex_table,\"a\"\n" \
5481+" .word " #from ", " #to"\n" \
5482+" .previous\n"
5483+#endif
5484+
5485 /*
5486 * atomic_read - read atomic variable
5487 * @v: pointer of type atomic_t
5488 *
5489 * Atomically reads the value of @v.
5490 */
5491-#define atomic_read(v) (*(volatile int *)&(v)->counter)
5492+static inline int atomic_read(const atomic_t *v)
5493+{
5494+ return (*(volatile const int *) &v->counter);
5495+}
5496+
5497+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5498+{
5499+ return (*(volatile const int *) &v->counter);
5500+}
5501
5502 /*
5503 * atomic_set - set atomic variable
5504@@ -38,7 +62,15 @@
5505 *
5506 * Atomically sets the value of @v to @i.
5507 */
5508-#define atomic_set(v, i) ((v)->counter = (i))
5509+static inline void atomic_set(atomic_t *v, int i)
5510+{
5511+ v->counter = i;
5512+}
5513+
5514+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5515+{
5516+ v->counter = i;
5517+}
5518
5519 /*
5520 * atomic_add - add integer to atomic variable
5521@@ -47,7 +79,67 @@
5522 *
5523 * Atomically adds @i to @v.
5524 */
5525-static __inline__ void atomic_add(int i, atomic_t * v)
5526+static __inline__ void atomic_add(int i, atomic_t *v)
5527+{
5528+ int temp;
5529+
5530+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5531+ __asm__ __volatile__(
5532+ " .set mips3 \n"
5533+ "1: ll %0, %1 # atomic_add \n"
5534+#ifdef CONFIG_PAX_REFCOUNT
5535+ /* Exception on overflow. */
5536+ "2: add %0, %2 \n"
5537+#else
5538+ " addu %0, %2 \n"
5539+#endif
5540+ " sc %0, %1 \n"
5541+ " beqzl %0, 1b \n"
5542+#ifdef CONFIG_PAX_REFCOUNT
5543+ "3: \n"
5544+ _ASM_EXTABLE(2b, 3b)
5545+#endif
5546+ " .set mips0 \n"
5547+ : "=&r" (temp), "+m" (v->counter)
5548+ : "Ir" (i));
5549+ } else if (kernel_uses_llsc) {
5550+ __asm__ __volatile__(
5551+ " .set mips3 \n"
5552+ "1: ll %0, %1 # atomic_add \n"
5553+#ifdef CONFIG_PAX_REFCOUNT
5554+ /* Exception on overflow. */
5555+ "2: add %0, %2 \n"
5556+#else
5557+ " addu %0, %2 \n"
5558+#endif
5559+ " sc %0, %1 \n"
5560+ " beqz %0, 1b \n"
5561+#ifdef CONFIG_PAX_REFCOUNT
5562+ "3: \n"
5563+ _ASM_EXTABLE(2b, 3b)
5564+#endif
5565+ " .set mips0 \n"
5566+ : "=&r" (temp), "+m" (v->counter)
5567+ : "Ir" (i));
5568+ } else {
5569+ unsigned long flags;
5570+
5571+ raw_local_irq_save(flags);
5572+ __asm__ __volatile__(
5573+#ifdef CONFIG_PAX_REFCOUNT
5574+ /* Exception on overflow. */
5575+ "1: add %0, %1 \n"
5576+ "2: \n"
5577+ _ASM_EXTABLE(1b, 2b)
5578+#else
5579+ " addu %0, %1 \n"
5580+#endif
5581+ : "+r" (v->counter) : "Ir" (i));
5582+ raw_local_irq_restore(flags);
5583+ }
5584+}
5585+
5586+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5587 {
5588 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5589 int temp;
5590@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5591 *
5592 * Atomically subtracts @i from @v.
5593 */
5594-static __inline__ void atomic_sub(int i, atomic_t * v)
5595+static __inline__ void atomic_sub(int i, atomic_t *v)
5596+{
5597+ int temp;
5598+
5599+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5600+ __asm__ __volatile__(
5601+ " .set mips3 \n"
5602+ "1: ll %0, %1 # atomic64_sub \n"
5603+#ifdef CONFIG_PAX_REFCOUNT
5604+ /* Exception on overflow. */
5605+ "2: sub %0, %2 \n"
5606+#else
5607+ " subu %0, %2 \n"
5608+#endif
5609+ " sc %0, %1 \n"
5610+ " beqzl %0, 1b \n"
5611+#ifdef CONFIG_PAX_REFCOUNT
5612+ "3: \n"
5613+ _ASM_EXTABLE(2b, 3b)
5614+#endif
5615+ " .set mips0 \n"
5616+ : "=&r" (temp), "+m" (v->counter)
5617+ : "Ir" (i));
5618+ } else if (kernel_uses_llsc) {
5619+ __asm__ __volatile__(
5620+ " .set mips3 \n"
5621+ "1: ll %0, %1 # atomic64_sub \n"
5622+#ifdef CONFIG_PAX_REFCOUNT
5623+ /* Exception on overflow. */
5624+ "2: sub %0, %2 \n"
5625+#else
5626+ " subu %0, %2 \n"
5627+#endif
5628+ " sc %0, %1 \n"
5629+ " beqz %0, 1b \n"
5630+#ifdef CONFIG_PAX_REFCOUNT
5631+ "3: \n"
5632+ _ASM_EXTABLE(2b, 3b)
5633+#endif
5634+ " .set mips0 \n"
5635+ : "=&r" (temp), "+m" (v->counter)
5636+ : "Ir" (i));
5637+ } else {
5638+ unsigned long flags;
5639+
5640+ raw_local_irq_save(flags);
5641+ __asm__ __volatile__(
5642+#ifdef CONFIG_PAX_REFCOUNT
5643+ /* Exception on overflow. */
5644+ "1: sub %0, %1 \n"
5645+ "2: \n"
5646+ _ASM_EXTABLE(1b, 2b)
5647+#else
5648+ " subu %0, %1 \n"
5649+#endif
5650+ : "+r" (v->counter) : "Ir" (i));
5651+ raw_local_irq_restore(flags);
5652+ }
5653+}
5654+
5655+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
5656 {
5657 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5658 int temp;
5659@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
5660 /*
5661 * Same as above, but return the result value
5662 */
5663-static __inline__ int atomic_add_return(int i, atomic_t * v)
5664+static __inline__ int atomic_add_return(int i, atomic_t *v)
5665+{
5666+ int result;
5667+ int temp;
5668+
5669+ smp_mb__before_llsc();
5670+
5671+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5672+ __asm__ __volatile__(
5673+ " .set mips3 \n"
5674+ "1: ll %1, %2 # atomic_add_return \n"
5675+#ifdef CONFIG_PAX_REFCOUNT
5676+ "2: add %0, %1, %3 \n"
5677+#else
5678+ " addu %0, %1, %3 \n"
5679+#endif
5680+ " sc %0, %2 \n"
5681+ " beqzl %0, 1b \n"
5682+#ifdef CONFIG_PAX_REFCOUNT
5683+ " b 4f \n"
5684+ " .set noreorder \n"
5685+ "3: b 5f \n"
5686+ " move %0, %1 \n"
5687+ " .set reorder \n"
5688+ _ASM_EXTABLE(2b, 3b)
5689+#endif
5690+ "4: addu %0, %1, %3 \n"
5691+#ifdef CONFIG_PAX_REFCOUNT
5692+ "5: \n"
5693+#endif
5694+ " .set mips0 \n"
5695+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5696+ : "Ir" (i));
5697+ } else if (kernel_uses_llsc) {
5698+ __asm__ __volatile__(
5699+ " .set mips3 \n"
5700+ "1: ll %1, %2 # atomic_add_return \n"
5701+#ifdef CONFIG_PAX_REFCOUNT
5702+ "2: add %0, %1, %3 \n"
5703+#else
5704+ " addu %0, %1, %3 \n"
5705+#endif
5706+ " sc %0, %2 \n"
5707+ " bnez %0, 4f \n"
5708+ " b 1b \n"
5709+#ifdef CONFIG_PAX_REFCOUNT
5710+ " .set noreorder \n"
5711+ "3: b 5f \n"
5712+ " move %0, %1 \n"
5713+ " .set reorder \n"
5714+ _ASM_EXTABLE(2b, 3b)
5715+#endif
5716+ "4: addu %0, %1, %3 \n"
5717+#ifdef CONFIG_PAX_REFCOUNT
5718+ "5: \n"
5719+#endif
5720+ " .set mips0 \n"
5721+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5722+ : "Ir" (i));
5723+ } else {
5724+ unsigned long flags;
5725+
5726+ raw_local_irq_save(flags);
5727+ __asm__ __volatile__(
5728+ " lw %0, %1 \n"
5729+#ifdef CONFIG_PAX_REFCOUNT
5730+ /* Exception on overflow. */
5731+ "1: add %0, %2 \n"
5732+#else
5733+ " addu %0, %2 \n"
5734+#endif
5735+ " sw %0, %1 \n"
5736+#ifdef CONFIG_PAX_REFCOUNT
5737+ /* Note: Dest reg is not modified on overflow */
5738+ "2: \n"
5739+ _ASM_EXTABLE(1b, 2b)
5740+#endif
5741+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5742+ raw_local_irq_restore(flags);
5743+ }
5744+
5745+ smp_llsc_mb();
5746+
5747+ return result;
5748+}
5749+
5750+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5751 {
5752 int result;
5753
5754@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
5755 return result;
5756 }
5757
5758-static __inline__ int atomic_sub_return(int i, atomic_t * v)
5759+static __inline__ int atomic_sub_return(int i, atomic_t *v)
5760+{
5761+ int result;
5762+ int temp;
5763+
5764+ smp_mb__before_llsc();
5765+
5766+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5767+ __asm__ __volatile__(
5768+ " .set mips3 \n"
5769+ "1: ll %1, %2 # atomic_sub_return \n"
5770+#ifdef CONFIG_PAX_REFCOUNT
5771+ "2: sub %0, %1, %3 \n"
5772+#else
5773+ " subu %0, %1, %3 \n"
5774+#endif
5775+ " sc %0, %2 \n"
5776+ " beqzl %0, 1b \n"
5777+#ifdef CONFIG_PAX_REFCOUNT
5778+ " b 4f \n"
5779+ " .set noreorder \n"
5780+ "3: b 5f \n"
5781+ " move %0, %1 \n"
5782+ " .set reorder \n"
5783+ _ASM_EXTABLE(2b, 3b)
5784+#endif
5785+ "4: subu %0, %1, %3 \n"
5786+#ifdef CONFIG_PAX_REFCOUNT
5787+ "5: \n"
5788+#endif
5789+ " .set mips0 \n"
5790+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
5791+ : "Ir" (i), "m" (v->counter)
5792+ : "memory");
5793+ } else if (kernel_uses_llsc) {
5794+ __asm__ __volatile__(
5795+ " .set mips3 \n"
5796+ "1: ll %1, %2 # atomic_sub_return \n"
5797+#ifdef CONFIG_PAX_REFCOUNT
5798+ "2: sub %0, %1, %3 \n"
5799+#else
5800+ " subu %0, %1, %3 \n"
5801+#endif
5802+ " sc %0, %2 \n"
5803+ " bnez %0, 4f \n"
5804+ " b 1b \n"
5805+#ifdef CONFIG_PAX_REFCOUNT
5806+ " .set noreorder \n"
5807+ "3: b 5f \n"
5808+ " move %0, %1 \n"
5809+ " .set reorder \n"
5810+ _ASM_EXTABLE(2b, 3b)
5811+#endif
5812+ "4: subu %0, %1, %3 \n"
5813+#ifdef CONFIG_PAX_REFCOUNT
5814+ "5: \n"
5815+#endif
5816+ " .set mips0 \n"
5817+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5818+ : "Ir" (i));
5819+ } else {
5820+ unsigned long flags;
5821+
5822+ raw_local_irq_save(flags);
5823+ __asm__ __volatile__(
5824+ " lw %0, %1 \n"
5825+#ifdef CONFIG_PAX_REFCOUNT
5826+ /* Exception on overflow. */
5827+ "1: sub %0, %2 \n"
5828+#else
5829+ " subu %0, %2 \n"
5830+#endif
5831+ " sw %0, %1 \n"
5832+#ifdef CONFIG_PAX_REFCOUNT
5833+ /* Note: Dest reg is not modified on overflow */
5834+ "2: \n"
5835+ _ASM_EXTABLE(1b, 2b)
5836+#endif
5837+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5838+ raw_local_irq_restore(flags);
5839+ }
5840+
5841+ smp_llsc_mb();
5842+
5843+ return result;
5844+}
5845+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
5846 {
5847 int result;
5848
5849@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
5850 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5851 * The function returns the old value of @v minus @i.
5852 */
5853-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5854+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5855 {
5856 int result;
5857
5858@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5859 return result;
5860 }
5861
5862-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5863-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5864+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5865+{
5866+ return cmpxchg(&v->counter, old, new);
5867+}
5868+
5869+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5870+ int new)
5871+{
5872+ return cmpxchg(&(v->counter), old, new);
5873+}
5874+
5875+static inline int atomic_xchg(atomic_t *v, int new)
5876+{
5877+ return xchg(&v->counter, new);
5878+}
5879+
5880+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5881+{
5882+ return xchg(&(v->counter), new);
5883+}
5884
5885 /**
5886 * __atomic_add_unless - add unless the number is a given value
5887@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5888
5889 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5890 #define atomic_inc_return(v) atomic_add_return(1, (v))
5891+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5892+{
5893+ return atomic_add_return_unchecked(1, v);
5894+}
5895
5896 /*
5897 * atomic_sub_and_test - subtract value from variable and test result
5898@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5899 * other cases.
5900 */
5901 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5902+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5903+{
5904+ return atomic_add_return_unchecked(1, v) == 0;
5905+}
5906
5907 /*
5908 * atomic_dec_and_test - decrement by 1 and test
5909@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5910 * Atomically increments @v by 1.
5911 */
5912 #define atomic_inc(v) atomic_add(1, (v))
5913+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
5914+{
5915+ atomic_add_unchecked(1, v);
5916+}
5917
5918 /*
5919 * atomic_dec - decrement and test
5920@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5921 * Atomically decrements @v by 1.
5922 */
5923 #define atomic_dec(v) atomic_sub(1, (v))
5924+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
5925+{
5926+ atomic_sub_unchecked(1, v);
5927+}
5928
5929 /*
5930 * atomic_add_negative - add and test if negative
5931@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5932 * @v: pointer of type atomic64_t
5933 *
5934 */
5935-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
5936+static inline long atomic64_read(const atomic64_t *v)
5937+{
5938+ return (*(volatile const long *) &v->counter);
5939+}
5940+
5941+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5942+{
5943+ return (*(volatile const long *) &v->counter);
5944+}
5945
5946 /*
5947 * atomic64_set - set atomic variable
5948 * @v: pointer of type atomic64_t
5949 * @i: required value
5950 */
5951-#define atomic64_set(v, i) ((v)->counter = (i))
5952+static inline void atomic64_set(atomic64_t *v, long i)
5953+{
5954+ v->counter = i;
5955+}
5956+
5957+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5958+{
5959+ v->counter = i;
5960+}
5961
5962 /*
5963 * atomic64_add - add integer to atomic variable
5964@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5965 *
5966 * Atomically adds @i to @v.
5967 */
5968-static __inline__ void atomic64_add(long i, atomic64_t * v)
5969+static __inline__ void atomic64_add(long i, atomic64_t *v)
5970+{
5971+ long temp;
5972+
5973+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5974+ __asm__ __volatile__(
5975+ " .set mips3 \n"
5976+ "1: lld %0, %1 # atomic64_add \n"
5977+#ifdef CONFIG_PAX_REFCOUNT
5978+ /* Exception on overflow. */
5979+ "2: dadd %0, %2 \n"
5980+#else
5981+ " daddu %0, %2 \n"
5982+#endif
5983+ " scd %0, %1 \n"
5984+ " beqzl %0, 1b \n"
5985+#ifdef CONFIG_PAX_REFCOUNT
5986+ "3: \n"
5987+ _ASM_EXTABLE(2b, 3b)
5988+#endif
5989+ " .set mips0 \n"
5990+ : "=&r" (temp), "+m" (v->counter)
5991+ : "Ir" (i));
5992+ } else if (kernel_uses_llsc) {
5993+ __asm__ __volatile__(
5994+ " .set mips3 \n"
5995+ "1: lld %0, %1 # atomic64_add \n"
5996+#ifdef CONFIG_PAX_REFCOUNT
5997+ /* Exception on overflow. */
5998+ "2: dadd %0, %2 \n"
5999+#else
6000+ " daddu %0, %2 \n"
6001+#endif
6002+ " scd %0, %1 \n"
6003+ " beqz %0, 1b \n"
6004+#ifdef CONFIG_PAX_REFCOUNT
6005+ "3: \n"
6006+ _ASM_EXTABLE(2b, 3b)
6007+#endif
6008+ " .set mips0 \n"
6009+ : "=&r" (temp), "+m" (v->counter)
6010+ : "Ir" (i));
6011+ } else {
6012+ unsigned long flags;
6013+
6014+ raw_local_irq_save(flags);
6015+ __asm__ __volatile__(
6016+#ifdef CONFIG_PAX_REFCOUNT
6017+ /* Exception on overflow. */
6018+ "1: dadd %0, %1 \n"
6019+ "2: \n"
6020+ _ASM_EXTABLE(1b, 2b)
6021+#else
6022+ " daddu %0, %1 \n"
6023+#endif
6024+ : "+r" (v->counter) : "Ir" (i));
6025+ raw_local_irq_restore(flags);
6026+ }
6027+}
6028+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6029 {
6030 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6031 long temp;
6032@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6033 *
6034 * Atomically subtracts @i from @v.
6035 */
6036-static __inline__ void atomic64_sub(long i, atomic64_t * v)
6037+static __inline__ void atomic64_sub(long i, atomic64_t *v)
6038+{
6039+ long temp;
6040+
6041+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6042+ __asm__ __volatile__(
6043+ " .set mips3 \n"
6044+ "1: lld %0, %1 # atomic64_sub \n"
6045+#ifdef CONFIG_PAX_REFCOUNT
6046+ /* Exception on overflow. */
6047+ "2: dsub %0, %2 \n"
6048+#else
6049+ " dsubu %0, %2 \n"
6050+#endif
6051+ " scd %0, %1 \n"
6052+ " beqzl %0, 1b \n"
6053+#ifdef CONFIG_PAX_REFCOUNT
6054+ "3: \n"
6055+ _ASM_EXTABLE(2b, 3b)
6056+#endif
6057+ " .set mips0 \n"
6058+ : "=&r" (temp), "+m" (v->counter)
6059+ : "Ir" (i));
6060+ } else if (kernel_uses_llsc) {
6061+ __asm__ __volatile__(
6062+ " .set mips3 \n"
6063+ "1: lld %0, %1 # atomic64_sub \n"
6064+#ifdef CONFIG_PAX_REFCOUNT
6065+ /* Exception on overflow. */
6066+ "2: dsub %0, %2 \n"
6067+#else
6068+ " dsubu %0, %2 \n"
6069+#endif
6070+ " scd %0, %1 \n"
6071+ " beqz %0, 1b \n"
6072+#ifdef CONFIG_PAX_REFCOUNT
6073+ "3: \n"
6074+ _ASM_EXTABLE(2b, 3b)
6075+#endif
6076+ " .set mips0 \n"
6077+ : "=&r" (temp), "+m" (v->counter)
6078+ : "Ir" (i));
6079+ } else {
6080+ unsigned long flags;
6081+
6082+ raw_local_irq_save(flags);
6083+ __asm__ __volatile__(
6084+#ifdef CONFIG_PAX_REFCOUNT
6085+ /* Exception on overflow. */
6086+ "1: dsub %0, %1 \n"
6087+ "2: \n"
6088+ _ASM_EXTABLE(1b, 2b)
6089+#else
6090+ " dsubu %0, %1 \n"
6091+#endif
6092+ : "+r" (v->counter) : "Ir" (i));
6093+ raw_local_irq_restore(flags);
6094+ }
6095+}
6096+
6097+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6098 {
6099 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6100 long temp;
6101@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6102 /*
6103 * Same as above, but return the result value
6104 */
6105-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6106+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6107+{
6108+ long result;
6109+ long temp;
6110+
6111+ smp_mb__before_llsc();
6112+
6113+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6114+ __asm__ __volatile__(
6115+ " .set mips3 \n"
6116+ "1: lld %1, %2 # atomic64_add_return \n"
6117+#ifdef CONFIG_PAX_REFCOUNT
6118+ "2: dadd %0, %1, %3 \n"
6119+#else
6120+ " daddu %0, %1, %3 \n"
6121+#endif
6122+ " scd %0, %2 \n"
6123+ " beqzl %0, 1b \n"
6124+#ifdef CONFIG_PAX_REFCOUNT
6125+ " b 4f \n"
6126+ " .set noreorder \n"
6127+ "3: b 5f \n"
6128+ " move %0, %1 \n"
6129+ " .set reorder \n"
6130+ _ASM_EXTABLE(2b, 3b)
6131+#endif
6132+ "4: daddu %0, %1, %3 \n"
6133+#ifdef CONFIG_PAX_REFCOUNT
6134+ "5: \n"
6135+#endif
6136+ " .set mips0 \n"
6137+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6138+ : "Ir" (i));
6139+ } else if (kernel_uses_llsc) {
6140+ __asm__ __volatile__(
6141+ " .set mips3 \n"
6142+ "1: lld %1, %2 # atomic64_add_return \n"
6143+#ifdef CONFIG_PAX_REFCOUNT
6144+ "2: dadd %0, %1, %3 \n"
6145+#else
6146+ " daddu %0, %1, %3 \n"
6147+#endif
6148+ " scd %0, %2 \n"
6149+ " bnez %0, 4f \n"
6150+ " b 1b \n"
6151+#ifdef CONFIG_PAX_REFCOUNT
6152+ " .set noreorder \n"
6153+ "3: b 5f \n"
6154+ " move %0, %1 \n"
6155+ " .set reorder \n"
6156+ _ASM_EXTABLE(2b, 3b)
6157+#endif
6158+ "4: daddu %0, %1, %3 \n"
6159+#ifdef CONFIG_PAX_REFCOUNT
6160+ "5: \n"
6161+#endif
6162+ " .set mips0 \n"
6163+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6164+ : "Ir" (i), "m" (v->counter)
6165+ : "memory");
6166+ } else {
6167+ unsigned long flags;
6168+
6169+ raw_local_irq_save(flags);
6170+ __asm__ __volatile__(
6171+ " ld %0, %1 \n"
6172+#ifdef CONFIG_PAX_REFCOUNT
6173+ /* Exception on overflow. */
6174+ "1: dadd %0, %2 \n"
6175+#else
6176+ " daddu %0, %2 \n"
6177+#endif
6178+ " sd %0, %1 \n"
6179+#ifdef CONFIG_PAX_REFCOUNT
6180+ /* Note: Dest reg is not modified on overflow */
6181+ "2: \n"
6182+ _ASM_EXTABLE(1b, 2b)
6183+#endif
6184+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6185+ raw_local_irq_restore(flags);
6186+ }
6187+
6188+ smp_llsc_mb();
6189+
6190+ return result;
6191+}
6192+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6193 {
6194 long result;
6195
6196@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6197 return result;
6198 }
6199
6200-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6201+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6202+{
6203+ long result;
6204+ long temp;
6205+
6206+ smp_mb__before_llsc();
6207+
6208+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6209+ long temp;
6210+
6211+ __asm__ __volatile__(
6212+ " .set mips3 \n"
6213+ "1: lld %1, %2 # atomic64_sub_return \n"
6214+#ifdef CONFIG_PAX_REFCOUNT
6215+ "2: dsub %0, %1, %3 \n"
6216+#else
6217+ " dsubu %0, %1, %3 \n"
6218+#endif
6219+ " scd %0, %2 \n"
6220+ " beqzl %0, 1b \n"
6221+#ifdef CONFIG_PAX_REFCOUNT
6222+ " b 4f \n"
6223+ " .set noreorder \n"
6224+ "3: b 5f \n"
6225+ " move %0, %1 \n"
6226+ " .set reorder \n"
6227+ _ASM_EXTABLE(2b, 3b)
6228+#endif
6229+ "4: dsubu %0, %1, %3 \n"
6230+#ifdef CONFIG_PAX_REFCOUNT
6231+ "5: \n"
6232+#endif
6233+ " .set mips0 \n"
6234+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6235+ : "Ir" (i), "m" (v->counter)
6236+ : "memory");
6237+ } else if (kernel_uses_llsc) {
6238+ __asm__ __volatile__(
6239+ " .set mips3 \n"
6240+ "1: lld %1, %2 # atomic64_sub_return \n"
6241+#ifdef CONFIG_PAX_REFCOUNT
6242+ "2: dsub %0, %1, %3 \n"
6243+#else
6244+ " dsubu %0, %1, %3 \n"
6245+#endif
6246+ " scd %0, %2 \n"
6247+ " bnez %0, 4f \n"
6248+ " b 1b \n"
6249+#ifdef CONFIG_PAX_REFCOUNT
6250+ " .set noreorder \n"
6251+ "3: b 5f \n"
6252+ " move %0, %1 \n"
6253+ " .set reorder \n"
6254+ _ASM_EXTABLE(2b, 3b)
6255+#endif
6256+ "4: dsubu %0, %1, %3 \n"
6257+#ifdef CONFIG_PAX_REFCOUNT
6258+ "5: \n"
6259+#endif
6260+ " .set mips0 \n"
6261+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6262+ : "Ir" (i), "m" (v->counter)
6263+ : "memory");
6264+ } else {
6265+ unsigned long flags;
6266+
6267+ raw_local_irq_save(flags);
6268+ __asm__ __volatile__(
6269+ " ld %0, %1 \n"
6270+#ifdef CONFIG_PAX_REFCOUNT
6271+ /* Exception on overflow. */
6272+ "1: dsub %0, %2 \n"
6273+#else
6274+ " dsubu %0, %2 \n"
6275+#endif
6276+ " sd %0, %1 \n"
6277+#ifdef CONFIG_PAX_REFCOUNT
6278+ /* Note: Dest reg is not modified on overflow */
6279+ "2: \n"
6280+ _ASM_EXTABLE(1b, 2b)
6281+#endif
6282+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6283+ raw_local_irq_restore(flags);
6284+ }
6285+
6286+ smp_llsc_mb();
6287+
6288+ return result;
6289+}
6290+
6291+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6292 {
6293 long result;
6294
6295@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6296 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6297 * The function returns the old value of @v minus @i.
6298 */
6299-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6300+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6301 {
6302 long result;
6303
6304@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6305 return result;
6306 }
6307
6308-#define atomic64_cmpxchg(v, o, n) \
6309- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6310-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6311+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6312+{
6313+ return cmpxchg(&v->counter, old, new);
6314+}
6315+
6316+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6317+ long new)
6318+{
6319+ return cmpxchg(&(v->counter), old, new);
6320+}
6321+
6322+static inline long atomic64_xchg(atomic64_t *v, long new)
6323+{
6324+ return xchg(&v->counter, new);
6325+}
6326+
6327+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6328+{
6329+ return xchg(&(v->counter), new);
6330+}
6331
6332 /**
6333 * atomic64_add_unless - add unless the number is a given value
6334@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6335
6336 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6337 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6338+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6339
6340 /*
6341 * atomic64_sub_and_test - subtract value from variable and test result
6342@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6343 * other cases.
6344 */
6345 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6346+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6347
6348 /*
6349 * atomic64_dec_and_test - decrement by 1 and test
6350@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6351 * Atomically increments @v by 1.
6352 */
6353 #define atomic64_inc(v) atomic64_add(1, (v))
6354+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6355
6356 /*
6357 * atomic64_dec - decrement and test
6358@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6359 * Atomically decrements @v by 1.
6360 */
6361 #define atomic64_dec(v) atomic64_sub(1, (v))
6362+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6363
6364 /*
6365 * atomic64_add_negative - add and test if negative
6366diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6367index b4db69f..8f3b093 100644
6368--- a/arch/mips/include/asm/cache.h
6369+++ b/arch/mips/include/asm/cache.h
6370@@ -9,10 +9,11 @@
6371 #ifndef _ASM_CACHE_H
6372 #define _ASM_CACHE_H
6373
6374+#include <linux/const.h>
6375 #include <kmalloc.h>
6376
6377 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6378-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6379+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6380
6381 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6382 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6383diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6384index a66359e..d3d474a 100644
6385--- a/arch/mips/include/asm/elf.h
6386+++ b/arch/mips/include/asm/elf.h
6387@@ -373,13 +373,16 @@ extern const char *__elf_platform;
6388 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6389 #endif
6390
6391+#ifdef CONFIG_PAX_ASLR
6392+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6393+
6394+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6395+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6396+#endif
6397+
6398 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6399 struct linux_binprm;
6400 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6401 int uses_interp);
6402
6403-struct mm_struct;
6404-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6405-#define arch_randomize_brk arch_randomize_brk
6406-
6407 #endif /* _ASM_ELF_H */
6408diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6409index c1f6afa..38cc6e9 100644
6410--- a/arch/mips/include/asm/exec.h
6411+++ b/arch/mips/include/asm/exec.h
6412@@ -12,6 +12,6 @@
6413 #ifndef _ASM_EXEC_H
6414 #define _ASM_EXEC_H
6415
6416-extern unsigned long arch_align_stack(unsigned long sp);
6417+#define arch_align_stack(x) ((x) & ~0xfUL)
6418
6419 #endif /* _ASM_EXEC_H */
6420diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6421index d44622c..64990d2 100644
6422--- a/arch/mips/include/asm/local.h
6423+++ b/arch/mips/include/asm/local.h
6424@@ -12,15 +12,25 @@ typedef struct
6425 atomic_long_t a;
6426 } local_t;
6427
6428+typedef struct {
6429+ atomic_long_unchecked_t a;
6430+} local_unchecked_t;
6431+
6432 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6433
6434 #define local_read(l) atomic_long_read(&(l)->a)
6435+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6436 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6437+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6438
6439 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6440+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6441 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6442+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6443 #define local_inc(l) atomic_long_inc(&(l)->a)
6444+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6445 #define local_dec(l) atomic_long_dec(&(l)->a)
6446+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6447
6448 /*
6449 * Same as above, but return the result value
6450@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6451 return result;
6452 }
6453
6454+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6455+{
6456+ unsigned long result;
6457+
6458+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6459+ unsigned long temp;
6460+
6461+ __asm__ __volatile__(
6462+ " .set mips3 \n"
6463+ "1:" __LL "%1, %2 # local_add_return \n"
6464+ " addu %0, %1, %3 \n"
6465+ __SC "%0, %2 \n"
6466+ " beqzl %0, 1b \n"
6467+ " addu %0, %1, %3 \n"
6468+ " .set mips0 \n"
6469+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6470+ : "Ir" (i), "m" (l->a.counter)
6471+ : "memory");
6472+ } else if (kernel_uses_llsc) {
6473+ unsigned long temp;
6474+
6475+ __asm__ __volatile__(
6476+ " .set mips3 \n"
6477+ "1:" __LL "%1, %2 # local_add_return \n"
6478+ " addu %0, %1, %3 \n"
6479+ __SC "%0, %2 \n"
6480+ " beqz %0, 1b \n"
6481+ " addu %0, %1, %3 \n"
6482+ " .set mips0 \n"
6483+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6484+ : "Ir" (i), "m" (l->a.counter)
6485+ : "memory");
6486+ } else {
6487+ unsigned long flags;
6488+
6489+ local_irq_save(flags);
6490+ result = l->a.counter;
6491+ result += i;
6492+ l->a.counter = result;
6493+ local_irq_restore(flags);
6494+ }
6495+
6496+ return result;
6497+}
6498+
6499 static __inline__ long local_sub_return(long i, local_t * l)
6500 {
6501 unsigned long result;
6502@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6503
6504 #define local_cmpxchg(l, o, n) \
6505 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6506+#define local_cmpxchg_unchecked(l, o, n) \
6507+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6508 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6509
6510 /**
6511diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6512index f6be474..12ad554 100644
6513--- a/arch/mips/include/asm/page.h
6514+++ b/arch/mips/include/asm/page.h
6515@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6516 #ifdef CONFIG_CPU_MIPS32
6517 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6518 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6519- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6520+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6521 #else
6522 typedef struct { unsigned long long pte; } pte_t;
6523 #define pte_val(x) ((x).pte)
6524diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6525index b336037..5b874cc 100644
6526--- a/arch/mips/include/asm/pgalloc.h
6527+++ b/arch/mips/include/asm/pgalloc.h
6528@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6529 {
6530 set_pud(pud, __pud((unsigned long)pmd));
6531 }
6532+
6533+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6534+{
6535+ pud_populate(mm, pud, pmd);
6536+}
6537 #endif
6538
6539 /*
6540diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h
6541index 25da651..ae2a259 100644
6542--- a/arch/mips/include/asm/smtc_proc.h
6543+++ b/arch/mips/include/asm/smtc_proc.h
6544@@ -18,6 +18,6 @@ extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6545
6546 /* Count of number of recoveries of "stolen" FPU access rights on 34K */
6547
6548-extern atomic_t smtc_fpu_recoveries;
6549+extern atomic_unchecked_t smtc_fpu_recoveries;
6550
6551 #endif /* __ASM_SMTC_PROC_H */
6552diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6553index 4f58ef6..5e7081b 100644
6554--- a/arch/mips/include/asm/thread_info.h
6555+++ b/arch/mips/include/asm/thread_info.h
6556@@ -115,6 +115,8 @@ static inline struct thread_info *current_thread_info(void)
6557 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
6558 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
6559 #define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
6560+/* li takes a 32bit immediate */
6561+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
6562 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
6563
6564 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6565@@ -132,13 +134,14 @@ static inline struct thread_info *current_thread_info(void)
6566 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
6567 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
6568 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6569+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6570
6571 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6572- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6573+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6574
6575 /* work to do in syscall_trace_leave() */
6576 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6577- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6578+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6579
6580 /* work to do on interrupt/exception return */
6581 #define _TIF_WORK_MASK \
6582@@ -146,7 +149,7 @@ static inline struct thread_info *current_thread_info(void)
6583 /* work to do on any return to u-space */
6584 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6585 _TIF_WORK_SYSCALL_EXIT | \
6586- _TIF_SYSCALL_TRACEPOINT)
6587+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6588
6589 /*
6590 * We stash processor id into a COP0 register to retrieve it fast
6591diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6592index 1188e00..41cf144 100644
6593--- a/arch/mips/kernel/binfmt_elfn32.c
6594+++ b/arch/mips/kernel/binfmt_elfn32.c
6595@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6596 #undef ELF_ET_DYN_BASE
6597 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6598
6599+#ifdef CONFIG_PAX_ASLR
6600+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6601+
6602+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6603+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6604+#endif
6605+
6606 #include <asm/processor.h>
6607 #include <linux/module.h>
6608 #include <linux/elfcore.h>
6609diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6610index 202e581..689ca79 100644
6611--- a/arch/mips/kernel/binfmt_elfo32.c
6612+++ b/arch/mips/kernel/binfmt_elfo32.c
6613@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6614 #undef ELF_ET_DYN_BASE
6615 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6616
6617+#ifdef CONFIG_PAX_ASLR
6618+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6619+
6620+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6621+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6622+#endif
6623+
6624 #include <asm/processor.h>
6625
6626 /*
6627diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6628index d1fea7a..45602ea 100644
6629--- a/arch/mips/kernel/irq.c
6630+++ b/arch/mips/kernel/irq.c
6631@@ -77,17 +77,17 @@ void ack_bad_irq(unsigned int irq)
6632 printk("unexpected IRQ # %d\n", irq);
6633 }
6634
6635-atomic_t irq_err_count;
6636+atomic_unchecked_t irq_err_count;
6637
6638 int arch_show_interrupts(struct seq_file *p, int prec)
6639 {
6640- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6641+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6642 return 0;
6643 }
6644
6645 asmlinkage void spurious_interrupt(void)
6646 {
6647- atomic_inc(&irq_err_count);
6648+ atomic_inc_unchecked(&irq_err_count);
6649 }
6650
6651 void __init init_IRQ(void)
6652diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6653index ddc7610..8c58f17 100644
6654--- a/arch/mips/kernel/process.c
6655+++ b/arch/mips/kernel/process.c
6656@@ -566,15 +566,3 @@ unsigned long get_wchan(struct task_struct *task)
6657 out:
6658 return pc;
6659 }
6660-
6661-/*
6662- * Don't forget that the stack pointer must be aligned on a 8 bytes
6663- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6664- */
6665-unsigned long arch_align_stack(unsigned long sp)
6666-{
6667- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6668- sp -= get_random_int() & ~PAGE_MASK;
6669-
6670- return sp & ALMASK;
6671-}
6672diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6673index b52e1d2..1a3ca09 100644
6674--- a/arch/mips/kernel/ptrace.c
6675+++ b/arch/mips/kernel/ptrace.c
6676@@ -652,6 +652,10 @@ long arch_ptrace(struct task_struct *child, long request,
6677 return ret;
6678 }
6679
6680+#ifdef CONFIG_GRKERNSEC_SETXID
6681+extern void gr_delayed_cred_worker(void);
6682+#endif
6683+
6684 /*
6685 * Notification of system call entry/exit
6686 * - triggered by current->work.syscall_trace
6687@@ -668,6 +672,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
6688 tracehook_report_syscall_entry(regs))
6689 ret = -1;
6690
6691+#ifdef CONFIG_GRKERNSEC_SETXID
6692+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6693+ gr_delayed_cred_worker();
6694+#endif
6695+
6696 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6697 trace_sys_enter(regs, regs->regs[2]);
6698
6699diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
6700index c10aa84..9ec2e60 100644
6701--- a/arch/mips/kernel/smtc-proc.c
6702+++ b/arch/mips/kernel/smtc-proc.c
6703@@ -31,7 +31,7 @@ unsigned long selfipis[NR_CPUS];
6704
6705 struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6706
6707-atomic_t smtc_fpu_recoveries;
6708+atomic_unchecked_t smtc_fpu_recoveries;
6709
6710 static int smtc_proc_show(struct seq_file *m, void *v)
6711 {
6712@@ -48,7 +48,7 @@ static int smtc_proc_show(struct seq_file *m, void *v)
6713 for(i = 0; i < NR_CPUS; i++)
6714 seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
6715 seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
6716- atomic_read(&smtc_fpu_recoveries));
6717+ atomic_read_unchecked(&smtc_fpu_recoveries));
6718 return 0;
6719 }
6720
6721@@ -73,7 +73,7 @@ void init_smtc_stats(void)
6722 smtc_cpu_stats[i].selfipis = 0;
6723 }
6724
6725- atomic_set(&smtc_fpu_recoveries, 0);
6726+ atomic_set_unchecked(&smtc_fpu_recoveries, 0);
6727
6728 proc_create("smtc", 0444, NULL, &smtc_proc_fops);
6729 }
6730diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
6731index dfc1b91..11a2c07 100644
6732--- a/arch/mips/kernel/smtc.c
6733+++ b/arch/mips/kernel/smtc.c
6734@@ -1359,7 +1359,7 @@ void smtc_soft_dump(void)
6735 }
6736 smtc_ipi_qdump();
6737 printk("%d Recoveries of \"stolen\" FPU\n",
6738- atomic_read(&smtc_fpu_recoveries));
6739+ atomic_read_unchecked(&smtc_fpu_recoveries));
6740 }
6741
6742
6743diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6744index 84536bf..79caa4d 100644
6745--- a/arch/mips/kernel/sync-r4k.c
6746+++ b/arch/mips/kernel/sync-r4k.c
6747@@ -21,8 +21,8 @@
6748 #include <asm/mipsregs.h>
6749
6750 static atomic_t count_start_flag = ATOMIC_INIT(0);
6751-static atomic_t count_count_start = ATOMIC_INIT(0);
6752-static atomic_t count_count_stop = ATOMIC_INIT(0);
6753+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6754+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6755 static atomic_t count_reference = ATOMIC_INIT(0);
6756
6757 #define COUNTON 100
6758@@ -69,13 +69,13 @@ void synchronise_count_master(int cpu)
6759
6760 for (i = 0; i < NR_LOOPS; i++) {
6761 /* slaves loop on '!= 2' */
6762- while (atomic_read(&count_count_start) != 1)
6763+ while (atomic_read_unchecked(&count_count_start) != 1)
6764 mb();
6765- atomic_set(&count_count_stop, 0);
6766+ atomic_set_unchecked(&count_count_stop, 0);
6767 smp_wmb();
6768
6769 /* this lets the slaves write their count register */
6770- atomic_inc(&count_count_start);
6771+ atomic_inc_unchecked(&count_count_start);
6772
6773 /*
6774 * Everyone initialises count in the last loop:
6775@@ -86,11 +86,11 @@ void synchronise_count_master(int cpu)
6776 /*
6777 * Wait for all slaves to leave the synchronization point:
6778 */
6779- while (atomic_read(&count_count_stop) != 1)
6780+ while (atomic_read_unchecked(&count_count_stop) != 1)
6781 mb();
6782- atomic_set(&count_count_start, 0);
6783+ atomic_set_unchecked(&count_count_start, 0);
6784 smp_wmb();
6785- atomic_inc(&count_count_stop);
6786+ atomic_inc_unchecked(&count_count_stop);
6787 }
6788 /* Arrange for an interrupt in a short while */
6789 write_c0_compare(read_c0_count() + COUNTON);
6790@@ -131,8 +131,8 @@ void synchronise_count_slave(int cpu)
6791 initcount = atomic_read(&count_reference);
6792
6793 for (i = 0; i < NR_LOOPS; i++) {
6794- atomic_inc(&count_count_start);
6795- while (atomic_read(&count_count_start) != 2)
6796+ atomic_inc_unchecked(&count_count_start);
6797+ while (atomic_read_unchecked(&count_count_start) != 2)
6798 mb();
6799
6800 /*
6801@@ -141,8 +141,8 @@ void synchronise_count_slave(int cpu)
6802 if (i == NR_LOOPS-1)
6803 write_c0_count(initcount);
6804
6805- atomic_inc(&count_count_stop);
6806- while (atomic_read(&count_count_stop) != 2)
6807+ atomic_inc_unchecked(&count_count_stop);
6808+ while (atomic_read_unchecked(&count_count_stop) != 2)
6809 mb();
6810 }
6811 /* Arrange for an interrupt in a short while */
6812diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6813index f9c8746..78b64e3 100644
6814--- a/arch/mips/kernel/traps.c
6815+++ b/arch/mips/kernel/traps.c
6816@@ -690,7 +690,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6817 siginfo_t info;
6818
6819 prev_state = exception_enter();
6820- die_if_kernel("Integer overflow", regs);
6821+ if (unlikely(!user_mode(regs))) {
6822+
6823+#ifdef CONFIG_PAX_REFCOUNT
6824+ if (fixup_exception(regs)) {
6825+ pax_report_refcount_overflow(regs);
6826+ exception_exit(prev_state);
6827+ return;
6828+ }
6829+#endif
6830+
6831+ die("Integer overflow", regs);
6832+ }
6833
6834 info.si_code = FPE_INTOVF;
6835 info.si_signo = SIGFPE;
6836diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6837index becc42b..9e43d4b 100644
6838--- a/arch/mips/mm/fault.c
6839+++ b/arch/mips/mm/fault.c
6840@@ -28,6 +28,23 @@
6841 #include <asm/highmem.h> /* For VMALLOC_END */
6842 #include <linux/kdebug.h>
6843
6844+#ifdef CONFIG_PAX_PAGEEXEC
6845+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6846+{
6847+ unsigned long i;
6848+
6849+ printk(KERN_ERR "PAX: bytes at PC: ");
6850+ for (i = 0; i < 5; i++) {
6851+ unsigned int c;
6852+ if (get_user(c, (unsigned int *)pc+i))
6853+ printk(KERN_CONT "???????? ");
6854+ else
6855+ printk(KERN_CONT "%08x ", c);
6856+ }
6857+ printk("\n");
6858+}
6859+#endif
6860+
6861 /*
6862 * This routine handles page faults. It determines the address,
6863 * and the problem, and then passes it off to one of the appropriate
6864@@ -199,6 +216,14 @@ bad_area:
6865 bad_area_nosemaphore:
6866 /* User mode accesses just cause a SIGSEGV */
6867 if (user_mode(regs)) {
6868+
6869+#ifdef CONFIG_PAX_PAGEEXEC
6870+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6871+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6872+ do_group_exit(SIGKILL);
6873+ }
6874+#endif
6875+
6876 tsk->thread.cp0_badvaddr = address;
6877 tsk->thread.error_code = write;
6878 #if 0
6879diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6880index f1baadd..8537544 100644
6881--- a/arch/mips/mm/mmap.c
6882+++ b/arch/mips/mm/mmap.c
6883@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6884 struct vm_area_struct *vma;
6885 unsigned long addr = addr0;
6886 int do_color_align;
6887+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6888 struct vm_unmapped_area_info info;
6889
6890 if (unlikely(len > TASK_SIZE))
6891@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6892 do_color_align = 1;
6893
6894 /* requesting a specific address */
6895+
6896+#ifdef CONFIG_PAX_RANDMMAP
6897+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6898+#endif
6899+
6900 if (addr) {
6901 if (do_color_align)
6902 addr = COLOUR_ALIGN(addr, pgoff);
6903@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6904 addr = PAGE_ALIGN(addr);
6905
6906 vma = find_vma(mm, addr);
6907- if (TASK_SIZE - len >= addr &&
6908- (!vma || addr + len <= vma->vm_start))
6909+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
6910 return addr;
6911 }
6912
6913 info.length = len;
6914 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6915 info.align_offset = pgoff << PAGE_SHIFT;
6916+ info.threadstack_offset = offset;
6917
6918 if (dir == DOWN) {
6919 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6920@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6921 {
6922 unsigned long random_factor = 0UL;
6923
6924+#ifdef CONFIG_PAX_RANDMMAP
6925+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6926+#endif
6927+
6928 if (current->flags & PF_RANDOMIZE) {
6929 random_factor = get_random_int();
6930 random_factor = random_factor << PAGE_SHIFT;
6931@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6932
6933 if (mmap_is_legacy()) {
6934 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6935+
6936+#ifdef CONFIG_PAX_RANDMMAP
6937+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6938+ mm->mmap_base += mm->delta_mmap;
6939+#endif
6940+
6941 mm->get_unmapped_area = arch_get_unmapped_area;
6942 } else {
6943 mm->mmap_base = mmap_base(random_factor);
6944+
6945+#ifdef CONFIG_PAX_RANDMMAP
6946+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6947+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6948+#endif
6949+
6950 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6951 }
6952 }
6953
6954-static inline unsigned long brk_rnd(void)
6955-{
6956- unsigned long rnd = get_random_int();
6957-
6958- rnd = rnd << PAGE_SHIFT;
6959- /* 8MB for 32bit, 256MB for 64bit */
6960- if (TASK_IS_32BIT_ADDR)
6961- rnd = rnd & 0x7ffffful;
6962- else
6963- rnd = rnd & 0xffffffful;
6964-
6965- return rnd;
6966-}
6967-
6968-unsigned long arch_randomize_brk(struct mm_struct *mm)
6969-{
6970- unsigned long base = mm->brk;
6971- unsigned long ret;
6972-
6973- ret = PAGE_ALIGN(base + brk_rnd());
6974-
6975- if (ret < mm->brk)
6976- return mm->brk;
6977-
6978- return ret;
6979-}
6980-
6981 int __virt_addr_valid(const volatile void *kaddr)
6982 {
6983 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
6984diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
6985index a2358b4..7cead4f 100644
6986--- a/arch/mips/sgi-ip27/ip27-nmi.c
6987+++ b/arch/mips/sgi-ip27/ip27-nmi.c
6988@@ -187,9 +187,9 @@ void
6989 cont_nmi_dump(void)
6990 {
6991 #ifndef REAL_NMI_SIGNAL
6992- static atomic_t nmied_cpus = ATOMIC_INIT(0);
6993+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
6994
6995- atomic_inc(&nmied_cpus);
6996+ atomic_inc_unchecked(&nmied_cpus);
6997 #endif
6998 /*
6999 * Only allow 1 cpu to proceed
7000@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7001 udelay(10000);
7002 }
7003 #else
7004- while (atomic_read(&nmied_cpus) != num_online_cpus());
7005+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7006 #endif
7007
7008 /*
7009diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7010index 967d144..db12197 100644
7011--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7012+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7013@@ -11,12 +11,14 @@
7014 #ifndef _ASM_PROC_CACHE_H
7015 #define _ASM_PROC_CACHE_H
7016
7017+#include <linux/const.h>
7018+
7019 /* L1 cache */
7020
7021 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7022 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7023-#define L1_CACHE_BYTES 16 /* bytes per entry */
7024 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7025+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7026 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7027
7028 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7029diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7030index bcb5df2..84fabd2 100644
7031--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7032+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7033@@ -16,13 +16,15 @@
7034 #ifndef _ASM_PROC_CACHE_H
7035 #define _ASM_PROC_CACHE_H
7036
7037+#include <linux/const.h>
7038+
7039 /*
7040 * L1 cache
7041 */
7042 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7043 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7044-#define L1_CACHE_BYTES 32 /* bytes per entry */
7045 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7046+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7047 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7048
7049 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7050diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7051index 4ce7a01..449202a 100644
7052--- a/arch/openrisc/include/asm/cache.h
7053+++ b/arch/openrisc/include/asm/cache.h
7054@@ -19,11 +19,13 @@
7055 #ifndef __ASM_OPENRISC_CACHE_H
7056 #define __ASM_OPENRISC_CACHE_H
7057
7058+#include <linux/const.h>
7059+
7060 /* FIXME: How can we replace these with values from the CPU...
7061 * they shouldn't be hard-coded!
7062 */
7063
7064-#define L1_CACHE_BYTES 16
7065 #define L1_CACHE_SHIFT 4
7066+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7067
7068 #endif /* __ASM_OPENRISC_CACHE_H */
7069diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7070index 472886c..00e7df9 100644
7071--- a/arch/parisc/include/asm/atomic.h
7072+++ b/arch/parisc/include/asm/atomic.h
7073@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7074 return dec;
7075 }
7076
7077+#define atomic64_read_unchecked(v) atomic64_read(v)
7078+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7079+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7080+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7081+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7082+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7083+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7084+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7085+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7086+
7087 #endif /* !CONFIG_64BIT */
7088
7089
7090diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7091index 47f11c7..3420df2 100644
7092--- a/arch/parisc/include/asm/cache.h
7093+++ b/arch/parisc/include/asm/cache.h
7094@@ -5,6 +5,7 @@
7095 #ifndef __ARCH_PARISC_CACHE_H
7096 #define __ARCH_PARISC_CACHE_H
7097
7098+#include <linux/const.h>
7099
7100 /*
7101 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7102@@ -15,13 +16,13 @@
7103 * just ruin performance.
7104 */
7105 #ifdef CONFIG_PA20
7106-#define L1_CACHE_BYTES 64
7107 #define L1_CACHE_SHIFT 6
7108 #else
7109-#define L1_CACHE_BYTES 32
7110 #define L1_CACHE_SHIFT 5
7111 #endif
7112
7113+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7114+
7115 #ifndef __ASSEMBLY__
7116
7117 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7118diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7119index ad2b503..bdf1651 100644
7120--- a/arch/parisc/include/asm/elf.h
7121+++ b/arch/parisc/include/asm/elf.h
7122@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7123
7124 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7125
7126+#ifdef CONFIG_PAX_ASLR
7127+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7128+
7129+#define PAX_DELTA_MMAP_LEN 16
7130+#define PAX_DELTA_STACK_LEN 16
7131+#endif
7132+
7133 /* This yields a mask that user programs can use to figure out what
7134 instruction set this CPU supports. This could be done in user space,
7135 but it's not easy, and we've already done it here. */
7136diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7137index f213f5b..0af3e8e 100644
7138--- a/arch/parisc/include/asm/pgalloc.h
7139+++ b/arch/parisc/include/asm/pgalloc.h
7140@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7141 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7142 }
7143
7144+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7145+{
7146+ pgd_populate(mm, pgd, pmd);
7147+}
7148+
7149 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7150 {
7151 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7152@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7153 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7154 #define pmd_free(mm, x) do { } while (0)
7155 #define pgd_populate(mm, pmd, pte) BUG()
7156+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7157
7158 #endif
7159
7160diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7161index 34899b5..02dd060 100644
7162--- a/arch/parisc/include/asm/pgtable.h
7163+++ b/arch/parisc/include/asm/pgtable.h
7164@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7165 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7166 #define PAGE_COPY PAGE_EXECREAD
7167 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7168+
7169+#ifdef CONFIG_PAX_PAGEEXEC
7170+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7171+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7172+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7173+#else
7174+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7175+# define PAGE_COPY_NOEXEC PAGE_COPY
7176+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7177+#endif
7178+
7179 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7180 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7181 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7182diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7183index 4006964..fcb3cc2 100644
7184--- a/arch/parisc/include/asm/uaccess.h
7185+++ b/arch/parisc/include/asm/uaccess.h
7186@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7187 const void __user *from,
7188 unsigned long n)
7189 {
7190- int sz = __compiletime_object_size(to);
7191+ size_t sz = __compiletime_object_size(to);
7192 int ret = -EFAULT;
7193
7194- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7195+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7196 ret = __copy_from_user(to, from, n);
7197 else
7198 copy_from_user_overflow();
7199diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7200index 50dfafc..b9fc230 100644
7201--- a/arch/parisc/kernel/module.c
7202+++ b/arch/parisc/kernel/module.c
7203@@ -98,16 +98,38 @@
7204
7205 /* three functions to determine where in the module core
7206 * or init pieces the location is */
7207+static inline int in_init_rx(struct module *me, void *loc)
7208+{
7209+ return (loc >= me->module_init_rx &&
7210+ loc < (me->module_init_rx + me->init_size_rx));
7211+}
7212+
7213+static inline int in_init_rw(struct module *me, void *loc)
7214+{
7215+ return (loc >= me->module_init_rw &&
7216+ loc < (me->module_init_rw + me->init_size_rw));
7217+}
7218+
7219 static inline int in_init(struct module *me, void *loc)
7220 {
7221- return (loc >= me->module_init &&
7222- loc <= (me->module_init + me->init_size));
7223+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7224+}
7225+
7226+static inline int in_core_rx(struct module *me, void *loc)
7227+{
7228+ return (loc >= me->module_core_rx &&
7229+ loc < (me->module_core_rx + me->core_size_rx));
7230+}
7231+
7232+static inline int in_core_rw(struct module *me, void *loc)
7233+{
7234+ return (loc >= me->module_core_rw &&
7235+ loc < (me->module_core_rw + me->core_size_rw));
7236 }
7237
7238 static inline int in_core(struct module *me, void *loc)
7239 {
7240- return (loc >= me->module_core &&
7241- loc <= (me->module_core + me->core_size));
7242+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7243 }
7244
7245 static inline int in_local(struct module *me, void *loc)
7246@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7247 }
7248
7249 /* align things a bit */
7250- me->core_size = ALIGN(me->core_size, 16);
7251- me->arch.got_offset = me->core_size;
7252- me->core_size += gots * sizeof(struct got_entry);
7253+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7254+ me->arch.got_offset = me->core_size_rw;
7255+ me->core_size_rw += gots * sizeof(struct got_entry);
7256
7257- me->core_size = ALIGN(me->core_size, 16);
7258- me->arch.fdesc_offset = me->core_size;
7259- me->core_size += fdescs * sizeof(Elf_Fdesc);
7260+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7261+ me->arch.fdesc_offset = me->core_size_rw;
7262+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7263
7264 me->arch.got_max = gots;
7265 me->arch.fdesc_max = fdescs;
7266@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7267
7268 BUG_ON(value == 0);
7269
7270- got = me->module_core + me->arch.got_offset;
7271+ got = me->module_core_rw + me->arch.got_offset;
7272 for (i = 0; got[i].addr; i++)
7273 if (got[i].addr == value)
7274 goto out;
7275@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7276 #ifdef CONFIG_64BIT
7277 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7278 {
7279- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7280+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7281
7282 if (!value) {
7283 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7284@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7285
7286 /* Create new one */
7287 fdesc->addr = value;
7288- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7289+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7290 return (Elf_Addr)fdesc;
7291 }
7292 #endif /* CONFIG_64BIT */
7293@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7294
7295 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7296 end = table + sechdrs[me->arch.unwind_section].sh_size;
7297- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7298+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7299
7300 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7301 me->arch.unwind_section, table, end, gp);
7302diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7303index 0d3a9d4..20a99b0 100644
7304--- a/arch/parisc/kernel/sys_parisc.c
7305+++ b/arch/parisc/kernel/sys_parisc.c
7306@@ -33,9 +33,11 @@
7307 #include <linux/utsname.h>
7308 #include <linux/personality.h>
7309
7310-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7311+static unsigned long get_unshared_area(unsigned long addr, unsigned long len,
7312+ unsigned long flags)
7313 {
7314 struct vm_unmapped_area_info info;
7315+ unsigned long offset = gr_rand_threadstack_offset(current->mm, NULL, flags);
7316
7317 info.flags = 0;
7318 info.length = len;
7319@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7320 info.high_limit = TASK_SIZE;
7321 info.align_mask = 0;
7322 info.align_offset = 0;
7323+ info.threadstack_offset = offset;
7324 return vm_unmapped_area(&info);
7325 }
7326
7327@@ -69,9 +72,10 @@ static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
7328 }
7329
7330 static unsigned long get_shared_area(struct file *filp, unsigned long addr,
7331- unsigned long len, unsigned long pgoff)
7332+ unsigned long len, unsigned long pgoff, unsigned long flags)
7333 {
7334 struct vm_unmapped_area_info info;
7335+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7336
7337 info.flags = 0;
7338 info.length = len;
7339@@ -79,6 +83,7 @@ static unsigned long get_shared_area(struct file *filp, unsigned long addr,
7340 info.high_limit = TASK_SIZE;
7341 info.align_mask = PAGE_MASK & (SHMLBA - 1);
7342 info.align_offset = shared_align_offset(filp, pgoff);
7343+ info.threadstack_offset = offset;
7344 return vm_unmapped_area(&info);
7345 }
7346
7347@@ -93,13 +98,20 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7348 return -EINVAL;
7349 return addr;
7350 }
7351- if (!addr)
7352+ if (!addr) {
7353 addr = TASK_UNMAPPED_BASE;
7354
7355+#ifdef CONFIG_PAX_RANDMMAP
7356+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
7357+ addr += current->mm->delta_mmap;
7358+#endif
7359+
7360+ }
7361+
7362 if (filp || (flags & MAP_SHARED))
7363- addr = get_shared_area(filp, addr, len, pgoff);
7364+ addr = get_shared_area(filp, addr, len, pgoff, flags);
7365 else
7366- addr = get_unshared_area(addr, len);
7367+ addr = get_unshared_area(addr, len, flags);
7368
7369 return addr;
7370 }
7371diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7372index 1cd1d0c..44ec918 100644
7373--- a/arch/parisc/kernel/traps.c
7374+++ b/arch/parisc/kernel/traps.c
7375@@ -722,9 +722,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7376
7377 down_read(&current->mm->mmap_sem);
7378 vma = find_vma(current->mm,regs->iaoq[0]);
7379- if (vma && (regs->iaoq[0] >= vma->vm_start)
7380- && (vma->vm_flags & VM_EXEC)) {
7381-
7382+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7383 fault_address = regs->iaoq[0];
7384 fault_space = regs->iasq[0];
7385
7386diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7387index 9d08c71..e2b4d20 100644
7388--- a/arch/parisc/mm/fault.c
7389+++ b/arch/parisc/mm/fault.c
7390@@ -15,6 +15,7 @@
7391 #include <linux/sched.h>
7392 #include <linux/interrupt.h>
7393 #include <linux/module.h>
7394+#include <linux/unistd.h>
7395
7396 #include <asm/uaccess.h>
7397 #include <asm/traps.h>
7398@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
7399 static unsigned long
7400 parisc_acctyp(unsigned long code, unsigned int inst)
7401 {
7402- if (code == 6 || code == 16)
7403+ if (code == 6 || code == 7 || code == 16)
7404 return VM_EXEC;
7405
7406 switch (inst & 0xf0000000) {
7407@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7408 }
7409 #endif
7410
7411+#ifdef CONFIG_PAX_PAGEEXEC
7412+/*
7413+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7414+ *
7415+ * returns 1 when task should be killed
7416+ * 2 when rt_sigreturn trampoline was detected
7417+ * 3 when unpatched PLT trampoline was detected
7418+ */
7419+static int pax_handle_fetch_fault(struct pt_regs *regs)
7420+{
7421+
7422+#ifdef CONFIG_PAX_EMUPLT
7423+ int err;
7424+
7425+ do { /* PaX: unpatched PLT emulation */
7426+ unsigned int bl, depwi;
7427+
7428+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7429+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7430+
7431+ if (err)
7432+ break;
7433+
7434+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7435+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7436+
7437+ err = get_user(ldw, (unsigned int *)addr);
7438+ err |= get_user(bv, (unsigned int *)(addr+4));
7439+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7440+
7441+ if (err)
7442+ break;
7443+
7444+ if (ldw == 0x0E801096U &&
7445+ bv == 0xEAC0C000U &&
7446+ ldw2 == 0x0E881095U)
7447+ {
7448+ unsigned int resolver, map;
7449+
7450+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7451+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7452+ if (err)
7453+ break;
7454+
7455+ regs->gr[20] = instruction_pointer(regs)+8;
7456+ regs->gr[21] = map;
7457+ regs->gr[22] = resolver;
7458+ regs->iaoq[0] = resolver | 3UL;
7459+ regs->iaoq[1] = regs->iaoq[0] + 4;
7460+ return 3;
7461+ }
7462+ }
7463+ } while (0);
7464+#endif
7465+
7466+#ifdef CONFIG_PAX_EMUTRAMP
7467+
7468+#ifndef CONFIG_PAX_EMUSIGRT
7469+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7470+ return 1;
7471+#endif
7472+
7473+ do { /* PaX: rt_sigreturn emulation */
7474+ unsigned int ldi1, ldi2, bel, nop;
7475+
7476+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7477+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7478+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7479+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7480+
7481+ if (err)
7482+ break;
7483+
7484+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7485+ ldi2 == 0x3414015AU &&
7486+ bel == 0xE4008200U &&
7487+ nop == 0x08000240U)
7488+ {
7489+ regs->gr[25] = (ldi1 & 2) >> 1;
7490+ regs->gr[20] = __NR_rt_sigreturn;
7491+ regs->gr[31] = regs->iaoq[1] + 16;
7492+ regs->sr[0] = regs->iasq[1];
7493+ regs->iaoq[0] = 0x100UL;
7494+ regs->iaoq[1] = regs->iaoq[0] + 4;
7495+ regs->iasq[0] = regs->sr[2];
7496+ regs->iasq[1] = regs->sr[2];
7497+ return 2;
7498+ }
7499+ } while (0);
7500+#endif
7501+
7502+ return 1;
7503+}
7504+
7505+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7506+{
7507+ unsigned long i;
7508+
7509+ printk(KERN_ERR "PAX: bytes at PC: ");
7510+ for (i = 0; i < 5; i++) {
7511+ unsigned int c;
7512+ if (get_user(c, (unsigned int *)pc+i))
7513+ printk(KERN_CONT "???????? ");
7514+ else
7515+ printk(KERN_CONT "%08x ", c);
7516+ }
7517+ printk("\n");
7518+}
7519+#endif
7520+
7521 int fixup_exception(struct pt_regs *regs)
7522 {
7523 const struct exception_table_entry *fix;
7524@@ -210,8 +321,33 @@ retry:
7525
7526 good_area:
7527
7528- if ((vma->vm_flags & acc_type) != acc_type)
7529+ if ((vma->vm_flags & acc_type) != acc_type) {
7530+
7531+#ifdef CONFIG_PAX_PAGEEXEC
7532+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7533+ (address & ~3UL) == instruction_pointer(regs))
7534+ {
7535+ up_read(&mm->mmap_sem);
7536+ switch (pax_handle_fetch_fault(regs)) {
7537+
7538+#ifdef CONFIG_PAX_EMUPLT
7539+ case 3:
7540+ return;
7541+#endif
7542+
7543+#ifdef CONFIG_PAX_EMUTRAMP
7544+ case 2:
7545+ return;
7546+#endif
7547+
7548+ }
7549+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7550+ do_group_exit(SIGKILL);
7551+ }
7552+#endif
7553+
7554 goto bad_area;
7555+ }
7556
7557 /*
7558 * If for any reason at all we couldn't handle the fault, make
7559diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7560index b44b52c..4cd253c 100644
7561--- a/arch/powerpc/Kconfig
7562+++ b/arch/powerpc/Kconfig
7563@@ -382,6 +382,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
7564 config KEXEC
7565 bool "kexec system call"
7566 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7567+ depends on !GRKERNSEC_KMEM
7568 help
7569 kexec is a system call that implements the ability to shutdown your
7570 current kernel, and to start another kernel. It is like a reboot
7571diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7572index e3b1d41..8e81edf 100644
7573--- a/arch/powerpc/include/asm/atomic.h
7574+++ b/arch/powerpc/include/asm/atomic.h
7575@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
7576 return t1;
7577 }
7578
7579+#define atomic64_read_unchecked(v) atomic64_read(v)
7580+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7581+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7582+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7583+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7584+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7585+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7586+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7587+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7588+
7589 #endif /* __powerpc64__ */
7590
7591 #endif /* __KERNEL__ */
7592diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
7593index 9e495c9..b6878e5 100644
7594--- a/arch/powerpc/include/asm/cache.h
7595+++ b/arch/powerpc/include/asm/cache.h
7596@@ -3,6 +3,7 @@
7597
7598 #ifdef __KERNEL__
7599
7600+#include <linux/const.h>
7601
7602 /* bytes per L1 cache line */
7603 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
7604@@ -22,7 +23,7 @@
7605 #define L1_CACHE_SHIFT 7
7606 #endif
7607
7608-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7609+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7610
7611 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7612
7613diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
7614index 935b5e7..7001d2d 100644
7615--- a/arch/powerpc/include/asm/elf.h
7616+++ b/arch/powerpc/include/asm/elf.h
7617@@ -28,8 +28,19 @@
7618 the loader. We need to make sure that it is out of the way of the program
7619 that it will "exec", and that there is sufficient room for the brk. */
7620
7621-extern unsigned long randomize_et_dyn(unsigned long base);
7622-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
7623+#define ELF_ET_DYN_BASE (0x20000000)
7624+
7625+#ifdef CONFIG_PAX_ASLR
7626+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
7627+
7628+#ifdef __powerpc64__
7629+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
7630+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
7631+#else
7632+#define PAX_DELTA_MMAP_LEN 15
7633+#define PAX_DELTA_STACK_LEN 15
7634+#endif
7635+#endif
7636
7637 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
7638
7639@@ -127,10 +138,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7640 (0x7ff >> (PAGE_SHIFT - 12)) : \
7641 (0x3ffff >> (PAGE_SHIFT - 12)))
7642
7643-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7644-#define arch_randomize_brk arch_randomize_brk
7645-
7646-
7647 #ifdef CONFIG_SPU_BASE
7648 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
7649 #define NT_SPU 1
7650diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
7651index 8196e9c..d83a9f3 100644
7652--- a/arch/powerpc/include/asm/exec.h
7653+++ b/arch/powerpc/include/asm/exec.h
7654@@ -4,6 +4,6 @@
7655 #ifndef _ASM_POWERPC_EXEC_H
7656 #define _ASM_POWERPC_EXEC_H
7657
7658-extern unsigned long arch_align_stack(unsigned long sp);
7659+#define arch_align_stack(x) ((x) & ~0xfUL)
7660
7661 #endif /* _ASM_POWERPC_EXEC_H */
7662diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
7663index 5acabbd..7ea14fa 100644
7664--- a/arch/powerpc/include/asm/kmap_types.h
7665+++ b/arch/powerpc/include/asm/kmap_types.h
7666@@ -10,7 +10,7 @@
7667 * 2 of the License, or (at your option) any later version.
7668 */
7669
7670-#define KM_TYPE_NR 16
7671+#define KM_TYPE_NR 17
7672
7673 #endif /* __KERNEL__ */
7674 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
7675diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
7676index 8565c25..2865190 100644
7677--- a/arch/powerpc/include/asm/mman.h
7678+++ b/arch/powerpc/include/asm/mman.h
7679@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
7680 }
7681 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
7682
7683-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
7684+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
7685 {
7686 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
7687 }
7688diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
7689index 32e4e21..62afb12 100644
7690--- a/arch/powerpc/include/asm/page.h
7691+++ b/arch/powerpc/include/asm/page.h
7692@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
7693 * and needs to be executable. This means the whole heap ends
7694 * up being executable.
7695 */
7696-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7697- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7698+#define VM_DATA_DEFAULT_FLAGS32 \
7699+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7700+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7701
7702 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7703 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7704@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
7705 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
7706 #endif
7707
7708+#define ktla_ktva(addr) (addr)
7709+#define ktva_ktla(addr) (addr)
7710+
7711 #ifndef CONFIG_PPC_BOOK3S_64
7712 /*
7713 * Use the top bit of the higher-level page table entries to indicate whether
7714diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
7715index 88693ce..ac6f9ab 100644
7716--- a/arch/powerpc/include/asm/page_64.h
7717+++ b/arch/powerpc/include/asm/page_64.h
7718@@ -153,15 +153,18 @@ do { \
7719 * stack by default, so in the absence of a PT_GNU_STACK program header
7720 * we turn execute permission off.
7721 */
7722-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7723- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7724+#define VM_STACK_DEFAULT_FLAGS32 \
7725+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7726+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7727
7728 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7729 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7730
7731+#ifndef CONFIG_PAX_PAGEEXEC
7732 #define VM_STACK_DEFAULT_FLAGS \
7733 (is_32bit_task() ? \
7734 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
7735+#endif
7736
7737 #include <asm-generic/getorder.h>
7738
7739diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
7740index 4b0be20..c15a27d 100644
7741--- a/arch/powerpc/include/asm/pgalloc-64.h
7742+++ b/arch/powerpc/include/asm/pgalloc-64.h
7743@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7744 #ifndef CONFIG_PPC_64K_PAGES
7745
7746 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
7747+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
7748
7749 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
7750 {
7751@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7752 pud_set(pud, (unsigned long)pmd);
7753 }
7754
7755+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7756+{
7757+ pud_populate(mm, pud, pmd);
7758+}
7759+
7760 #define pmd_populate(mm, pmd, pte_page) \
7761 pmd_populate_kernel(mm, pmd, page_address(pte_page))
7762 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
7763@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
7764 #endif
7765
7766 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
7767+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7768
7769 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
7770 pte_t *pte)
7771diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
7772index 7d6eacf..14c0240 100644
7773--- a/arch/powerpc/include/asm/pgtable.h
7774+++ b/arch/powerpc/include/asm/pgtable.h
7775@@ -2,6 +2,7 @@
7776 #define _ASM_POWERPC_PGTABLE_H
7777 #ifdef __KERNEL__
7778
7779+#include <linux/const.h>
7780 #ifndef __ASSEMBLY__
7781 #include <asm/processor.h> /* For TASK_SIZE */
7782 #include <asm/mmu.h>
7783diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
7784index 4aad413..85d86bf 100644
7785--- a/arch/powerpc/include/asm/pte-hash32.h
7786+++ b/arch/powerpc/include/asm/pte-hash32.h
7787@@ -21,6 +21,7 @@
7788 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
7789 #define _PAGE_USER 0x004 /* usermode access allowed */
7790 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
7791+#define _PAGE_EXEC _PAGE_GUARDED
7792 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
7793 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
7794 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
7795diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
7796index fa8388e..f985549 100644
7797--- a/arch/powerpc/include/asm/reg.h
7798+++ b/arch/powerpc/include/asm/reg.h
7799@@ -239,6 +239,7 @@
7800 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
7801 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
7802 #define DSISR_NOHPTE 0x40000000 /* no translation found */
7803+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
7804 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
7805 #define DSISR_ISSTORE 0x02000000 /* access was a store */
7806 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
7807diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
7808index 084e080..9415a3d 100644
7809--- a/arch/powerpc/include/asm/smp.h
7810+++ b/arch/powerpc/include/asm/smp.h
7811@@ -51,7 +51,7 @@ struct smp_ops_t {
7812 int (*cpu_disable)(void);
7813 void (*cpu_die)(unsigned int nr);
7814 int (*cpu_bootable)(unsigned int nr);
7815-};
7816+} __no_const;
7817
7818 extern void smp_send_debugger_break(void);
7819 extern void start_secondary_resume(void);
7820diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
7821index 9854c56..7517190 100644
7822--- a/arch/powerpc/include/asm/thread_info.h
7823+++ b/arch/powerpc/include/asm/thread_info.h
7824@@ -91,7 +91,6 @@ static inline struct thread_info *current_thread_info(void)
7825 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
7826 TIF_NEED_RESCHED */
7827 #define TIF_32BIT 4 /* 32 bit binary */
7828-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
7829 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
7830 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
7831 #define TIF_SINGLESTEP 8 /* singlestepping active */
7832@@ -108,6 +107,9 @@ static inline struct thread_info *current_thread_info(void)
7833 #if defined(CONFIG_PPC64)
7834 #define TIF_ELF2ABI 18 /* function descriptors must die! */
7835 #endif
7836+#define TIF_PERFMON_WORK 19 /* work for pfm_handle_work() */
7837+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
7838+#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
7839
7840 /* as above, but as bit values */
7841 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
7842@@ -127,9 +129,10 @@ static inline struct thread_info *current_thread_info(void)
7843 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7844 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
7845 #define _TIF_NOHZ (1<<TIF_NOHZ)
7846+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7847 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
7848 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
7849- _TIF_NOHZ)
7850+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
7851
7852 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
7853 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
7854diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
7855index 9485b43..4718d50 100644
7856--- a/arch/powerpc/include/asm/uaccess.h
7857+++ b/arch/powerpc/include/asm/uaccess.h
7858@@ -318,52 +318,6 @@ do { \
7859 extern unsigned long __copy_tofrom_user(void __user *to,
7860 const void __user *from, unsigned long size);
7861
7862-#ifndef __powerpc64__
7863-
7864-static inline unsigned long copy_from_user(void *to,
7865- const void __user *from, unsigned long n)
7866-{
7867- unsigned long over;
7868-
7869- if (access_ok(VERIFY_READ, from, n))
7870- return __copy_tofrom_user((__force void __user *)to, from, n);
7871- if ((unsigned long)from < TASK_SIZE) {
7872- over = (unsigned long)from + n - TASK_SIZE;
7873- return __copy_tofrom_user((__force void __user *)to, from,
7874- n - over) + over;
7875- }
7876- return n;
7877-}
7878-
7879-static inline unsigned long copy_to_user(void __user *to,
7880- const void *from, unsigned long n)
7881-{
7882- unsigned long over;
7883-
7884- if (access_ok(VERIFY_WRITE, to, n))
7885- return __copy_tofrom_user(to, (__force void __user *)from, n);
7886- if ((unsigned long)to < TASK_SIZE) {
7887- over = (unsigned long)to + n - TASK_SIZE;
7888- return __copy_tofrom_user(to, (__force void __user *)from,
7889- n - over) + over;
7890- }
7891- return n;
7892-}
7893-
7894-#else /* __powerpc64__ */
7895-
7896-#define __copy_in_user(to, from, size) \
7897- __copy_tofrom_user((to), (from), (size))
7898-
7899-extern unsigned long copy_from_user(void *to, const void __user *from,
7900- unsigned long n);
7901-extern unsigned long copy_to_user(void __user *to, const void *from,
7902- unsigned long n);
7903-extern unsigned long copy_in_user(void __user *to, const void __user *from,
7904- unsigned long n);
7905-
7906-#endif /* __powerpc64__ */
7907-
7908 static inline unsigned long __copy_from_user_inatomic(void *to,
7909 const void __user *from, unsigned long n)
7910 {
7911@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
7912 if (ret == 0)
7913 return 0;
7914 }
7915+
7916+ if (!__builtin_constant_p(n))
7917+ check_object_size(to, n, false);
7918+
7919 return __copy_tofrom_user((__force void __user *)to, from, n);
7920 }
7921
7922@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
7923 if (ret == 0)
7924 return 0;
7925 }
7926+
7927+ if (!__builtin_constant_p(n))
7928+ check_object_size(from, n, true);
7929+
7930 return __copy_tofrom_user(to, (__force const void __user *)from, n);
7931 }
7932
7933@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
7934 return __copy_to_user_inatomic(to, from, size);
7935 }
7936
7937+#ifndef __powerpc64__
7938+
7939+static inline unsigned long __must_check copy_from_user(void *to,
7940+ const void __user *from, unsigned long n)
7941+{
7942+ unsigned long over;
7943+
7944+ if ((long)n < 0)
7945+ return n;
7946+
7947+ if (access_ok(VERIFY_READ, from, n)) {
7948+ if (!__builtin_constant_p(n))
7949+ check_object_size(to, n, false);
7950+ return __copy_tofrom_user((__force void __user *)to, from, n);
7951+ }
7952+ if ((unsigned long)from < TASK_SIZE) {
7953+ over = (unsigned long)from + n - TASK_SIZE;
7954+ if (!__builtin_constant_p(n - over))
7955+ check_object_size(to, n - over, false);
7956+ return __copy_tofrom_user((__force void __user *)to, from,
7957+ n - over) + over;
7958+ }
7959+ return n;
7960+}
7961+
7962+static inline unsigned long __must_check copy_to_user(void __user *to,
7963+ const void *from, unsigned long n)
7964+{
7965+ unsigned long over;
7966+
7967+ if ((long)n < 0)
7968+ return n;
7969+
7970+ if (access_ok(VERIFY_WRITE, to, n)) {
7971+ if (!__builtin_constant_p(n))
7972+ check_object_size(from, n, true);
7973+ return __copy_tofrom_user(to, (__force void __user *)from, n);
7974+ }
7975+ if ((unsigned long)to < TASK_SIZE) {
7976+ over = (unsigned long)to + n - TASK_SIZE;
7977+ if (!__builtin_constant_p(n))
7978+ check_object_size(from, n - over, true);
7979+ return __copy_tofrom_user(to, (__force void __user *)from,
7980+ n - over) + over;
7981+ }
7982+ return n;
7983+}
7984+
7985+#else /* __powerpc64__ */
7986+
7987+#define __copy_in_user(to, from, size) \
7988+ __copy_tofrom_user((to), (from), (size))
7989+
7990+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
7991+{
7992+ if ((long)n < 0 || n > INT_MAX)
7993+ return n;
7994+
7995+ if (!__builtin_constant_p(n))
7996+ check_object_size(to, n, false);
7997+
7998+ if (likely(access_ok(VERIFY_READ, from, n)))
7999+ n = __copy_from_user(to, from, n);
8000+ else
8001+ memset(to, 0, n);
8002+ return n;
8003+}
8004+
8005+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8006+{
8007+ if ((long)n < 0 || n > INT_MAX)
8008+ return n;
8009+
8010+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8011+ if (!__builtin_constant_p(n))
8012+ check_object_size(from, n, true);
8013+ n = __copy_to_user(to, from, n);
8014+ }
8015+ return n;
8016+}
8017+
8018+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8019+ unsigned long n);
8020+
8021+#endif /* __powerpc64__ */
8022+
8023 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8024
8025 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8026diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8027index e775156..af2d1c0 100644
8028--- a/arch/powerpc/kernel/exceptions-64e.S
8029+++ b/arch/powerpc/kernel/exceptions-64e.S
8030@@ -759,6 +759,7 @@ storage_fault_common:
8031 std r14,_DAR(r1)
8032 std r15,_DSISR(r1)
8033 addi r3,r1,STACK_FRAME_OVERHEAD
8034+ bl .save_nvgprs
8035 mr r4,r14
8036 mr r5,r15
8037 ld r14,PACA_EXGEN+EX_R14(r13)
8038@@ -767,8 +768,7 @@ storage_fault_common:
8039 cmpdi r3,0
8040 bne- 1f
8041 b .ret_from_except_lite
8042-1: bl .save_nvgprs
8043- mr r5,r3
8044+1: mr r5,r3
8045 addi r3,r1,STACK_FRAME_OVERHEAD
8046 ld r4,_DAR(r1)
8047 bl .bad_page_fault
8048diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8049index 9f905e4..1d6b3d2 100644
8050--- a/arch/powerpc/kernel/exceptions-64s.S
8051+++ b/arch/powerpc/kernel/exceptions-64s.S
8052@@ -1390,10 +1390,10 @@ handle_page_fault:
8053 11: ld r4,_DAR(r1)
8054 ld r5,_DSISR(r1)
8055 addi r3,r1,STACK_FRAME_OVERHEAD
8056+ bl .save_nvgprs
8057 bl .do_page_fault
8058 cmpdi r3,0
8059 beq+ 12f
8060- bl .save_nvgprs
8061 mr r5,r3
8062 addi r3,r1,STACK_FRAME_OVERHEAD
8063 lwz r4,_DAR(r1)
8064diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8065index 6cff040..74ac5d1 100644
8066--- a/arch/powerpc/kernel/module_32.c
8067+++ b/arch/powerpc/kernel/module_32.c
8068@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8069 me->arch.core_plt_section = i;
8070 }
8071 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8072- printk("Module doesn't contain .plt or .init.plt sections.\n");
8073+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
8074 return -ENOEXEC;
8075 }
8076
8077@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
8078
8079 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8080 /* Init, or core PLT? */
8081- if (location >= mod->module_core
8082- && location < mod->module_core + mod->core_size)
8083+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8084+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8085 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8086- else
8087+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8088+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8089 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8090+ else {
8091+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8092+ return ~0UL;
8093+ }
8094
8095 /* Find this entry, or if that fails, the next avail. entry */
8096 while (entry->jump[0]) {
8097@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8098 }
8099 #ifdef CONFIG_DYNAMIC_FTRACE
8100 module->arch.tramp =
8101- do_plt_call(module->module_core,
8102+ do_plt_call(module->module_core_rx,
8103 (unsigned long)ftrace_caller,
8104 sechdrs, module);
8105 #endif
8106diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8107index 4a96556..dd95f6c 100644
8108--- a/arch/powerpc/kernel/process.c
8109+++ b/arch/powerpc/kernel/process.c
8110@@ -888,8 +888,8 @@ void show_regs(struct pt_regs * regs)
8111 * Lookup NIP late so we have the best change of getting the
8112 * above info out without failing
8113 */
8114- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8115- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8116+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8117+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8118 #endif
8119 show_stack(current, (unsigned long *) regs->gpr[1]);
8120 if (!user_mode(regs))
8121@@ -1376,10 +1376,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8122 newsp = stack[0];
8123 ip = stack[STACK_FRAME_LR_SAVE];
8124 if (!firstframe || ip != lr) {
8125- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8126+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8127 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8128 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
8129- printk(" (%pS)",
8130+ printk(" (%pA)",
8131 (void *)current->ret_stack[curr_frame].ret);
8132 curr_frame--;
8133 }
8134@@ -1399,7 +1399,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8135 struct pt_regs *regs = (struct pt_regs *)
8136 (sp + STACK_FRAME_OVERHEAD);
8137 lr = regs->link;
8138- printk("--- Exception: %lx at %pS\n LR = %pS\n",
8139+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
8140 regs->trap, (void *)regs->nip, (void *)lr);
8141 firstframe = 1;
8142 }
8143@@ -1435,58 +1435,3 @@ void notrace __ppc64_runlatch_off(void)
8144 mtspr(SPRN_CTRLT, ctrl);
8145 }
8146 #endif /* CONFIG_PPC64 */
8147-
8148-unsigned long arch_align_stack(unsigned long sp)
8149-{
8150- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8151- sp -= get_random_int() & ~PAGE_MASK;
8152- return sp & ~0xf;
8153-}
8154-
8155-static inline unsigned long brk_rnd(void)
8156-{
8157- unsigned long rnd = 0;
8158-
8159- /* 8MB for 32bit, 1GB for 64bit */
8160- if (is_32bit_task())
8161- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8162- else
8163- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8164-
8165- return rnd << PAGE_SHIFT;
8166-}
8167-
8168-unsigned long arch_randomize_brk(struct mm_struct *mm)
8169-{
8170- unsigned long base = mm->brk;
8171- unsigned long ret;
8172-
8173-#ifdef CONFIG_PPC_STD_MMU_64
8174- /*
8175- * If we are using 1TB segments and we are allowed to randomise
8176- * the heap, we can put it above 1TB so it is backed by a 1TB
8177- * segment. Otherwise the heap will be in the bottom 1TB
8178- * which always uses 256MB segments and this may result in a
8179- * performance penalty.
8180- */
8181- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8182- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8183-#endif
8184-
8185- ret = PAGE_ALIGN(base + brk_rnd());
8186-
8187- if (ret < mm->brk)
8188- return mm->brk;
8189-
8190- return ret;
8191-}
8192-
8193-unsigned long randomize_et_dyn(unsigned long base)
8194-{
8195- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8196-
8197- if (ret < base)
8198- return base;
8199-
8200- return ret;
8201-}
8202diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
8203index 2e3d2bf..35df241 100644
8204--- a/arch/powerpc/kernel/ptrace.c
8205+++ b/arch/powerpc/kernel/ptrace.c
8206@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
8207 return ret;
8208 }
8209
8210+#ifdef CONFIG_GRKERNSEC_SETXID
8211+extern void gr_delayed_cred_worker(void);
8212+#endif
8213+
8214 /*
8215 * We must return the syscall number to actually look up in the table.
8216 * This can be -1L to skip running any syscall at all.
8217@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
8218
8219 secure_computing_strict(regs->gpr[0]);
8220
8221+#ifdef CONFIG_GRKERNSEC_SETXID
8222+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8223+ gr_delayed_cred_worker();
8224+#endif
8225+
8226 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
8227 tracehook_report_syscall_entry(regs))
8228 /*
8229@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
8230 {
8231 int step;
8232
8233+#ifdef CONFIG_GRKERNSEC_SETXID
8234+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8235+ gr_delayed_cred_worker();
8236+#endif
8237+
8238 audit_syscall_exit(regs);
8239
8240 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8241diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
8242index 68027bf..b26fd31 100644
8243--- a/arch/powerpc/kernel/signal_32.c
8244+++ b/arch/powerpc/kernel/signal_32.c
8245@@ -1004,7 +1004,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
8246 /* Save user registers on the stack */
8247 frame = &rt_sf->uc.uc_mcontext;
8248 addr = frame;
8249- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
8250+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8251 sigret = 0;
8252 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
8253 } else {
8254diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
8255index 4299104..29e2c51 100644
8256--- a/arch/powerpc/kernel/signal_64.c
8257+++ b/arch/powerpc/kernel/signal_64.c
8258@@ -758,7 +758,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
8259 #endif
8260
8261 /* Set up to return from userspace. */
8262- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
8263+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8264 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
8265 } else {
8266 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
8267diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
8268index 907a472..4ba206f 100644
8269--- a/arch/powerpc/kernel/traps.c
8270+++ b/arch/powerpc/kernel/traps.c
8271@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
8272 return flags;
8273 }
8274
8275+extern void gr_handle_kernel_exploit(void);
8276+
8277 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8278 int signr)
8279 {
8280@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8281 panic("Fatal exception in interrupt");
8282 if (panic_on_oops)
8283 panic("Fatal exception");
8284+
8285+ gr_handle_kernel_exploit();
8286+
8287 do_exit(signr);
8288 }
8289
8290diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
8291index 094e45c..d82b848 100644
8292--- a/arch/powerpc/kernel/vdso.c
8293+++ b/arch/powerpc/kernel/vdso.c
8294@@ -35,6 +35,7 @@
8295 #include <asm/vdso.h>
8296 #include <asm/vdso_datapage.h>
8297 #include <asm/setup.h>
8298+#include <asm/mman.h>
8299
8300 #undef DEBUG
8301
8302@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8303 vdso_base = VDSO32_MBASE;
8304 #endif
8305
8306- current->mm->context.vdso_base = 0;
8307+ current->mm->context.vdso_base = ~0UL;
8308
8309 /* vDSO has a problem and was disabled, just don't "enable" it for the
8310 * process
8311@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8312 vdso_base = get_unmapped_area(NULL, vdso_base,
8313 (vdso_pages << PAGE_SHIFT) +
8314 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
8315- 0, 0);
8316+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
8317 if (IS_ERR_VALUE(vdso_base)) {
8318 rc = vdso_base;
8319 goto fail_mmapsem;
8320diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
8321index 5eea6f3..5d10396 100644
8322--- a/arch/powerpc/lib/usercopy_64.c
8323+++ b/arch/powerpc/lib/usercopy_64.c
8324@@ -9,22 +9,6 @@
8325 #include <linux/module.h>
8326 #include <asm/uaccess.h>
8327
8328-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8329-{
8330- if (likely(access_ok(VERIFY_READ, from, n)))
8331- n = __copy_from_user(to, from, n);
8332- else
8333- memset(to, 0, n);
8334- return n;
8335-}
8336-
8337-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8338-{
8339- if (likely(access_ok(VERIFY_WRITE, to, n)))
8340- n = __copy_to_user(to, from, n);
8341- return n;
8342-}
8343-
8344 unsigned long copy_in_user(void __user *to, const void __user *from,
8345 unsigned long n)
8346 {
8347@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
8348 return n;
8349 }
8350
8351-EXPORT_SYMBOL(copy_from_user);
8352-EXPORT_SYMBOL(copy_to_user);
8353 EXPORT_SYMBOL(copy_in_user);
8354
8355diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
8356index 51ab9e7..7d3c78b 100644
8357--- a/arch/powerpc/mm/fault.c
8358+++ b/arch/powerpc/mm/fault.c
8359@@ -33,6 +33,10 @@
8360 #include <linux/magic.h>
8361 #include <linux/ratelimit.h>
8362 #include <linux/context_tracking.h>
8363+#include <linux/slab.h>
8364+#include <linux/pagemap.h>
8365+#include <linux/compiler.h>
8366+#include <linux/unistd.h>
8367
8368 #include <asm/firmware.h>
8369 #include <asm/page.h>
8370@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
8371 }
8372 #endif
8373
8374+#ifdef CONFIG_PAX_PAGEEXEC
8375+/*
8376+ * PaX: decide what to do with offenders (regs->nip = fault address)
8377+ *
8378+ * returns 1 when task should be killed
8379+ */
8380+static int pax_handle_fetch_fault(struct pt_regs *regs)
8381+{
8382+ return 1;
8383+}
8384+
8385+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8386+{
8387+ unsigned long i;
8388+
8389+ printk(KERN_ERR "PAX: bytes at PC: ");
8390+ for (i = 0; i < 5; i++) {
8391+ unsigned int c;
8392+ if (get_user(c, (unsigned int __user *)pc+i))
8393+ printk(KERN_CONT "???????? ");
8394+ else
8395+ printk(KERN_CONT "%08x ", c);
8396+ }
8397+ printk("\n");
8398+}
8399+#endif
8400+
8401 /*
8402 * Check whether the instruction at regs->nip is a store using
8403 * an update addressing form which will update r1.
8404@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
8405 * indicate errors in DSISR but can validly be set in SRR1.
8406 */
8407 if (trap == 0x400)
8408- error_code &= 0x48200000;
8409+ error_code &= 0x58200000;
8410 else
8411 is_write = error_code & DSISR_ISSTORE;
8412 #else
8413@@ -378,7 +409,7 @@ good_area:
8414 * "undefined". Of those that can be set, this is the only
8415 * one which seems bad.
8416 */
8417- if (error_code & 0x10000000)
8418+ if (error_code & DSISR_GUARDED)
8419 /* Guarded storage error. */
8420 goto bad_area;
8421 #endif /* CONFIG_8xx */
8422@@ -393,7 +424,7 @@ good_area:
8423 * processors use the same I/D cache coherency mechanism
8424 * as embedded.
8425 */
8426- if (error_code & DSISR_PROTFAULT)
8427+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
8428 goto bad_area;
8429 #endif /* CONFIG_PPC_STD_MMU */
8430
8431@@ -483,6 +514,23 @@ bad_area:
8432 bad_area_nosemaphore:
8433 /* User mode accesses cause a SIGSEGV */
8434 if (user_mode(regs)) {
8435+
8436+#ifdef CONFIG_PAX_PAGEEXEC
8437+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
8438+#ifdef CONFIG_PPC_STD_MMU
8439+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
8440+#else
8441+ if (is_exec && regs->nip == address) {
8442+#endif
8443+ switch (pax_handle_fetch_fault(regs)) {
8444+ }
8445+
8446+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
8447+ do_group_exit(SIGKILL);
8448+ }
8449+ }
8450+#endif
8451+
8452 _exception(SIGSEGV, regs, code, address);
8453 goto bail;
8454 }
8455diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
8456index cb8bdbe..d770680 100644
8457--- a/arch/powerpc/mm/mmap.c
8458+++ b/arch/powerpc/mm/mmap.c
8459@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
8460 {
8461 unsigned long rnd = 0;
8462
8463+#ifdef CONFIG_PAX_RANDMMAP
8464+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8465+#endif
8466+
8467 if (current->flags & PF_RANDOMIZE) {
8468 /* 8MB for 32bit, 1GB for 64bit */
8469 if (is_32bit_task())
8470@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8471 */
8472 if (mmap_is_legacy()) {
8473 mm->mmap_base = TASK_UNMAPPED_BASE;
8474+
8475+#ifdef CONFIG_PAX_RANDMMAP
8476+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8477+ mm->mmap_base += mm->delta_mmap;
8478+#endif
8479+
8480 mm->get_unmapped_area = arch_get_unmapped_area;
8481 } else {
8482 mm->mmap_base = mmap_base();
8483+
8484+#ifdef CONFIG_PAX_RANDMMAP
8485+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8486+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8487+#endif
8488+
8489 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8490 }
8491 }
8492diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
8493index 7ce9cf3..a964087 100644
8494--- a/arch/powerpc/mm/slice.c
8495+++ b/arch/powerpc/mm/slice.c
8496@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
8497 if ((mm->task_size - len) < addr)
8498 return 0;
8499 vma = find_vma(mm, addr);
8500- return (!vma || (addr + len) <= vma->vm_start);
8501+ return check_heap_stack_gap(vma, addr, len, 0);
8502 }
8503
8504 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
8505@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
8506 info.align_offset = 0;
8507
8508 addr = TASK_UNMAPPED_BASE;
8509+
8510+#ifdef CONFIG_PAX_RANDMMAP
8511+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8512+ addr += mm->delta_mmap;
8513+#endif
8514+
8515 while (addr < TASK_SIZE) {
8516 info.low_limit = addr;
8517 if (!slice_scan_available(addr, available, 1, &addr))
8518@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
8519 if (fixed && addr > (mm->task_size - len))
8520 return -EINVAL;
8521
8522+#ifdef CONFIG_PAX_RANDMMAP
8523+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
8524+ addr = 0;
8525+#endif
8526+
8527 /* If hint, make sure it matches our alignment restrictions */
8528 if (!fixed && addr) {
8529 addr = _ALIGN_UP(addr, 1ul << pshift);
8530diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
8531index 9098692..3d54cd1 100644
8532--- a/arch/powerpc/platforms/cell/spufs/file.c
8533+++ b/arch/powerpc/platforms/cell/spufs/file.c
8534@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
8535 return VM_FAULT_NOPAGE;
8536 }
8537
8538-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
8539+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
8540 unsigned long address,
8541- void *buf, int len, int write)
8542+ void *buf, size_t len, int write)
8543 {
8544 struct spu_context *ctx = vma->vm_file->private_data;
8545 unsigned long offset = address - vma->vm_start;
8546diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
8547index fa9aaf7..3f5d836 100644
8548--- a/arch/s390/include/asm/atomic.h
8549+++ b/arch/s390/include/asm/atomic.h
8550@@ -398,6 +398,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
8551 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
8552 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8553
8554+#define atomic64_read_unchecked(v) atomic64_read(v)
8555+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8556+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8557+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8558+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8559+#define atomic64_inc_unchecked(v) atomic64_inc(v)
8560+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8561+#define atomic64_dec_unchecked(v) atomic64_dec(v)
8562+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8563+
8564 #define smp_mb__before_atomic_dec() smp_mb()
8565 #define smp_mb__after_atomic_dec() smp_mb()
8566 #define smp_mb__before_atomic_inc() smp_mb()
8567diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
8568index 4d7ccac..d03d0ad 100644
8569--- a/arch/s390/include/asm/cache.h
8570+++ b/arch/s390/include/asm/cache.h
8571@@ -9,8 +9,10 @@
8572 #ifndef __ARCH_S390_CACHE_H
8573 #define __ARCH_S390_CACHE_H
8574
8575-#define L1_CACHE_BYTES 256
8576+#include <linux/const.h>
8577+
8578 #define L1_CACHE_SHIFT 8
8579+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8580 #define NET_SKB_PAD 32
8581
8582 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8583diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
8584index 78f4f87..598ce39 100644
8585--- a/arch/s390/include/asm/elf.h
8586+++ b/arch/s390/include/asm/elf.h
8587@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
8588 the loader. We need to make sure that it is out of the way of the program
8589 that it will "exec", and that there is sufficient room for the brk. */
8590
8591-extern unsigned long randomize_et_dyn(unsigned long base);
8592-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
8593+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
8594+
8595+#ifdef CONFIG_PAX_ASLR
8596+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
8597+
8598+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8599+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8600+#endif
8601
8602 /* This yields a mask that user programs can use to figure out what
8603 instruction set this CPU supports. */
8604@@ -222,9 +228,6 @@ struct linux_binprm;
8605 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
8606 int arch_setup_additional_pages(struct linux_binprm *, int);
8607
8608-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8609-#define arch_randomize_brk arch_randomize_brk
8610-
8611 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
8612
8613 #endif
8614diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
8615index c4a93d6..4d2a9b4 100644
8616--- a/arch/s390/include/asm/exec.h
8617+++ b/arch/s390/include/asm/exec.h
8618@@ -7,6 +7,6 @@
8619 #ifndef __ASM_EXEC_H
8620 #define __ASM_EXEC_H
8621
8622-extern unsigned long arch_align_stack(unsigned long sp);
8623+#define arch_align_stack(x) ((x) & ~0xfUL)
8624
8625 #endif /* __ASM_EXEC_H */
8626diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
8627index 79330af..a3a7b06 100644
8628--- a/arch/s390/include/asm/uaccess.h
8629+++ b/arch/s390/include/asm/uaccess.h
8630@@ -245,6 +245,10 @@ static inline unsigned long __must_check
8631 copy_to_user(void __user *to, const void *from, unsigned long n)
8632 {
8633 might_fault();
8634+
8635+ if ((long)n < 0)
8636+ return n;
8637+
8638 return __copy_to_user(to, from, n);
8639 }
8640
8641@@ -268,6 +272,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
8642 static inline unsigned long __must_check
8643 __copy_from_user(void *to, const void __user *from, unsigned long n)
8644 {
8645+ if ((long)n < 0)
8646+ return n;
8647+
8648 return uaccess.copy_from_user(n, from, to);
8649 }
8650
8651@@ -296,10 +303,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
8652 static inline unsigned long __must_check
8653 copy_from_user(void *to, const void __user *from, unsigned long n)
8654 {
8655- unsigned int sz = __compiletime_object_size(to);
8656+ size_t sz = __compiletime_object_size(to);
8657
8658 might_fault();
8659- if (unlikely(sz != -1 && sz < n)) {
8660+
8661+ if ((long)n < 0)
8662+ return n;
8663+
8664+ if (unlikely(sz != (size_t)-1 && sz < n)) {
8665 copy_from_user_overflow();
8666 return n;
8667 }
8668diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
8669index b89b591..fd9609d 100644
8670--- a/arch/s390/kernel/module.c
8671+++ b/arch/s390/kernel/module.c
8672@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
8673
8674 /* Increase core size by size of got & plt and set start
8675 offsets for got and plt. */
8676- me->core_size = ALIGN(me->core_size, 4);
8677- me->arch.got_offset = me->core_size;
8678- me->core_size += me->arch.got_size;
8679- me->arch.plt_offset = me->core_size;
8680- me->core_size += me->arch.plt_size;
8681+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
8682+ me->arch.got_offset = me->core_size_rw;
8683+ me->core_size_rw += me->arch.got_size;
8684+ me->arch.plt_offset = me->core_size_rx;
8685+ me->core_size_rx += me->arch.plt_size;
8686 return 0;
8687 }
8688
8689@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8690 if (info->got_initialized == 0) {
8691 Elf_Addr *gotent;
8692
8693- gotent = me->module_core + me->arch.got_offset +
8694+ gotent = me->module_core_rw + me->arch.got_offset +
8695 info->got_offset;
8696 *gotent = val;
8697 info->got_initialized = 1;
8698@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8699 rc = apply_rela_bits(loc, val, 0, 64, 0);
8700 else if (r_type == R_390_GOTENT ||
8701 r_type == R_390_GOTPLTENT) {
8702- val += (Elf_Addr) me->module_core - loc;
8703+ val += (Elf_Addr) me->module_core_rw - loc;
8704 rc = apply_rela_bits(loc, val, 1, 32, 1);
8705 }
8706 break;
8707@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8708 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
8709 if (info->plt_initialized == 0) {
8710 unsigned int *ip;
8711- ip = me->module_core + me->arch.plt_offset +
8712+ ip = me->module_core_rx + me->arch.plt_offset +
8713 info->plt_offset;
8714 #ifndef CONFIG_64BIT
8715 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
8716@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8717 val - loc + 0xffffUL < 0x1ffffeUL) ||
8718 (r_type == R_390_PLT32DBL &&
8719 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
8720- val = (Elf_Addr) me->module_core +
8721+ val = (Elf_Addr) me->module_core_rx +
8722 me->arch.plt_offset +
8723 info->plt_offset;
8724 val += rela->r_addend - loc;
8725@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8726 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
8727 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
8728 val = val + rela->r_addend -
8729- ((Elf_Addr) me->module_core + me->arch.got_offset);
8730+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
8731 if (r_type == R_390_GOTOFF16)
8732 rc = apply_rela_bits(loc, val, 0, 16, 0);
8733 else if (r_type == R_390_GOTOFF32)
8734@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8735 break;
8736 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
8737 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
8738- val = (Elf_Addr) me->module_core + me->arch.got_offset +
8739+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
8740 rela->r_addend - loc;
8741 if (r_type == R_390_GOTPC)
8742 rc = apply_rela_bits(loc, val, 1, 32, 0);
8743diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
8744index 7ed0d4e..1dfc145 100644
8745--- a/arch/s390/kernel/process.c
8746+++ b/arch/s390/kernel/process.c
8747@@ -242,39 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
8748 }
8749 return 0;
8750 }
8751-
8752-unsigned long arch_align_stack(unsigned long sp)
8753-{
8754- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8755- sp -= get_random_int() & ~PAGE_MASK;
8756- return sp & ~0xf;
8757-}
8758-
8759-static inline unsigned long brk_rnd(void)
8760-{
8761- /* 8MB for 32bit, 1GB for 64bit */
8762- if (is_32bit_task())
8763- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
8764- else
8765- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
8766-}
8767-
8768-unsigned long arch_randomize_brk(struct mm_struct *mm)
8769-{
8770- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
8771-
8772- if (ret < mm->brk)
8773- return mm->brk;
8774- return ret;
8775-}
8776-
8777-unsigned long randomize_et_dyn(unsigned long base)
8778-{
8779- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8780-
8781- if (!(current->flags & PF_RANDOMIZE))
8782- return base;
8783- if (ret < base)
8784- return base;
8785- return ret;
8786-}
8787diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
8788index 9b436c2..54fbf0a 100644
8789--- a/arch/s390/mm/mmap.c
8790+++ b/arch/s390/mm/mmap.c
8791@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8792 */
8793 if (mmap_is_legacy()) {
8794 mm->mmap_base = mmap_base_legacy();
8795+
8796+#ifdef CONFIG_PAX_RANDMMAP
8797+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8798+ mm->mmap_base += mm->delta_mmap;
8799+#endif
8800+
8801 mm->get_unmapped_area = arch_get_unmapped_area;
8802 } else {
8803 mm->mmap_base = mmap_base();
8804+
8805+#ifdef CONFIG_PAX_RANDMMAP
8806+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8807+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8808+#endif
8809+
8810 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8811 }
8812 }
8813@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8814 */
8815 if (mmap_is_legacy()) {
8816 mm->mmap_base = mmap_base_legacy();
8817+
8818+#ifdef CONFIG_PAX_RANDMMAP
8819+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8820+ mm->mmap_base += mm->delta_mmap;
8821+#endif
8822+
8823 mm->get_unmapped_area = s390_get_unmapped_area;
8824 } else {
8825 mm->mmap_base = mmap_base();
8826+
8827+#ifdef CONFIG_PAX_RANDMMAP
8828+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8829+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8830+#endif
8831+
8832 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
8833 }
8834 }
8835diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
8836index ae3d59f..f65f075 100644
8837--- a/arch/score/include/asm/cache.h
8838+++ b/arch/score/include/asm/cache.h
8839@@ -1,7 +1,9 @@
8840 #ifndef _ASM_SCORE_CACHE_H
8841 #define _ASM_SCORE_CACHE_H
8842
8843+#include <linux/const.h>
8844+
8845 #define L1_CACHE_SHIFT 4
8846-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8847+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8848
8849 #endif /* _ASM_SCORE_CACHE_H */
8850diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
8851index f9f3cd5..58ff438 100644
8852--- a/arch/score/include/asm/exec.h
8853+++ b/arch/score/include/asm/exec.h
8854@@ -1,6 +1,6 @@
8855 #ifndef _ASM_SCORE_EXEC_H
8856 #define _ASM_SCORE_EXEC_H
8857
8858-extern unsigned long arch_align_stack(unsigned long sp);
8859+#define arch_align_stack(x) (x)
8860
8861 #endif /* _ASM_SCORE_EXEC_H */
8862diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
8863index a1519ad3..e8ac1ff 100644
8864--- a/arch/score/kernel/process.c
8865+++ b/arch/score/kernel/process.c
8866@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
8867
8868 return task_pt_regs(task)->cp0_epc;
8869 }
8870-
8871-unsigned long arch_align_stack(unsigned long sp)
8872-{
8873- return sp;
8874-}
8875diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
8876index ef9e555..331bd29 100644
8877--- a/arch/sh/include/asm/cache.h
8878+++ b/arch/sh/include/asm/cache.h
8879@@ -9,10 +9,11 @@
8880 #define __ASM_SH_CACHE_H
8881 #ifdef __KERNEL__
8882
8883+#include <linux/const.h>
8884 #include <linux/init.h>
8885 #include <cpu/cache.h>
8886
8887-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8888+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8889
8890 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8891
8892diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
8893index 6777177..cb5e44f 100644
8894--- a/arch/sh/mm/mmap.c
8895+++ b/arch/sh/mm/mmap.c
8896@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8897 struct mm_struct *mm = current->mm;
8898 struct vm_area_struct *vma;
8899 int do_colour_align;
8900+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8901 struct vm_unmapped_area_info info;
8902
8903 if (flags & MAP_FIXED) {
8904@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8905 if (filp || (flags & MAP_SHARED))
8906 do_colour_align = 1;
8907
8908+#ifdef CONFIG_PAX_RANDMMAP
8909+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8910+#endif
8911+
8912 if (addr) {
8913 if (do_colour_align)
8914 addr = COLOUR_ALIGN(addr, pgoff);
8915@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8916 addr = PAGE_ALIGN(addr);
8917
8918 vma = find_vma(mm, addr);
8919- if (TASK_SIZE - len >= addr &&
8920- (!vma || addr + len <= vma->vm_start))
8921+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8922 return addr;
8923 }
8924
8925 info.flags = 0;
8926 info.length = len;
8927- info.low_limit = TASK_UNMAPPED_BASE;
8928+ info.low_limit = mm->mmap_base;
8929 info.high_limit = TASK_SIZE;
8930 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
8931 info.align_offset = pgoff << PAGE_SHIFT;
8932@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8933 struct mm_struct *mm = current->mm;
8934 unsigned long addr = addr0;
8935 int do_colour_align;
8936+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8937 struct vm_unmapped_area_info info;
8938
8939 if (flags & MAP_FIXED) {
8940@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8941 if (filp || (flags & MAP_SHARED))
8942 do_colour_align = 1;
8943
8944+#ifdef CONFIG_PAX_RANDMMAP
8945+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8946+#endif
8947+
8948 /* requesting a specific address */
8949 if (addr) {
8950 if (do_colour_align)
8951@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8952 addr = PAGE_ALIGN(addr);
8953
8954 vma = find_vma(mm, addr);
8955- if (TASK_SIZE - len >= addr &&
8956- (!vma || addr + len <= vma->vm_start))
8957+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8958 return addr;
8959 }
8960
8961@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8962 VM_BUG_ON(addr != -ENOMEM);
8963 info.flags = 0;
8964 info.low_limit = TASK_UNMAPPED_BASE;
8965+
8966+#ifdef CONFIG_PAX_RANDMMAP
8967+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8968+ info.low_limit += mm->delta_mmap;
8969+#endif
8970+
8971 info.high_limit = TASK_SIZE;
8972 addr = vm_unmapped_area(&info);
8973 }
8974diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
8975index be56a24..443328f 100644
8976--- a/arch/sparc/include/asm/atomic_64.h
8977+++ b/arch/sparc/include/asm/atomic_64.h
8978@@ -14,18 +14,40 @@
8979 #define ATOMIC64_INIT(i) { (i) }
8980
8981 #define atomic_read(v) (*(volatile int *)&(v)->counter)
8982+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8983+{
8984+ return v->counter;
8985+}
8986 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
8987+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8988+{
8989+ return v->counter;
8990+}
8991
8992 #define atomic_set(v, i) (((v)->counter) = i)
8993+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8994+{
8995+ v->counter = i;
8996+}
8997 #define atomic64_set(v, i) (((v)->counter) = i)
8998+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8999+{
9000+ v->counter = i;
9001+}
9002
9003 extern void atomic_add(int, atomic_t *);
9004+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
9005 extern void atomic64_add(long, atomic64_t *);
9006+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
9007 extern void atomic_sub(int, atomic_t *);
9008+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
9009 extern void atomic64_sub(long, atomic64_t *);
9010+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
9011
9012 extern int atomic_add_ret(int, atomic_t *);
9013+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
9014 extern long atomic64_add_ret(long, atomic64_t *);
9015+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
9016 extern int atomic_sub_ret(int, atomic_t *);
9017 extern long atomic64_sub_ret(long, atomic64_t *);
9018
9019@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9020 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
9021
9022 #define atomic_inc_return(v) atomic_add_ret(1, v)
9023+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9024+{
9025+ return atomic_add_ret_unchecked(1, v);
9026+}
9027 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
9028+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9029+{
9030+ return atomic64_add_ret_unchecked(1, v);
9031+}
9032
9033 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
9034 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
9035
9036 #define atomic_add_return(i, v) atomic_add_ret(i, v)
9037+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9038+{
9039+ return atomic_add_ret_unchecked(i, v);
9040+}
9041 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
9042+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9043+{
9044+ return atomic64_add_ret_unchecked(i, v);
9045+}
9046
9047 /*
9048 * atomic_inc_and_test - increment and test
9049@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9050 * other cases.
9051 */
9052 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9053+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9054+{
9055+ return atomic_inc_return_unchecked(v) == 0;
9056+}
9057 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9058
9059 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
9060@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9061 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
9062
9063 #define atomic_inc(v) atomic_add(1, v)
9064+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9065+{
9066+ atomic_add_unchecked(1, v);
9067+}
9068 #define atomic64_inc(v) atomic64_add(1, v)
9069+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9070+{
9071+ atomic64_add_unchecked(1, v);
9072+}
9073
9074 #define atomic_dec(v) atomic_sub(1, v)
9075+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9076+{
9077+ atomic_sub_unchecked(1, v);
9078+}
9079 #define atomic64_dec(v) atomic64_sub(1, v)
9080+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9081+{
9082+ atomic64_sub_unchecked(1, v);
9083+}
9084
9085 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
9086 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
9087
9088 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9089+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9090+{
9091+ return cmpxchg(&v->counter, old, new);
9092+}
9093 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9094+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9095+{
9096+ return xchg(&v->counter, new);
9097+}
9098
9099 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9100 {
9101- int c, old;
9102+ int c, old, new;
9103 c = atomic_read(v);
9104 for (;;) {
9105- if (unlikely(c == (u)))
9106+ if (unlikely(c == u))
9107 break;
9108- old = atomic_cmpxchg((v), c, c + (a));
9109+
9110+ asm volatile("addcc %2, %0, %0\n"
9111+
9112+#ifdef CONFIG_PAX_REFCOUNT
9113+ "tvs %%icc, 6\n"
9114+#endif
9115+
9116+ : "=r" (new)
9117+ : "0" (c), "ir" (a)
9118+ : "cc");
9119+
9120+ old = atomic_cmpxchg(v, c, new);
9121 if (likely(old == c))
9122 break;
9123 c = old;
9124@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9125 #define atomic64_cmpxchg(v, o, n) \
9126 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9127 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9128+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9129+{
9130+ return xchg(&v->counter, new);
9131+}
9132
9133 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9134 {
9135- long c, old;
9136+ long c, old, new;
9137 c = atomic64_read(v);
9138 for (;;) {
9139- if (unlikely(c == (u)))
9140+ if (unlikely(c == u))
9141 break;
9142- old = atomic64_cmpxchg((v), c, c + (a));
9143+
9144+ asm volatile("addcc %2, %0, %0\n"
9145+
9146+#ifdef CONFIG_PAX_REFCOUNT
9147+ "tvs %%xcc, 6\n"
9148+#endif
9149+
9150+ : "=r" (new)
9151+ : "0" (c), "ir" (a)
9152+ : "cc");
9153+
9154+ old = atomic64_cmpxchg(v, c, new);
9155 if (likely(old == c))
9156 break;
9157 c = old;
9158 }
9159- return c != (u);
9160+ return c != u;
9161 }
9162
9163 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9164diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
9165index 5bb6991..5c2132e 100644
9166--- a/arch/sparc/include/asm/cache.h
9167+++ b/arch/sparc/include/asm/cache.h
9168@@ -7,10 +7,12 @@
9169 #ifndef _SPARC_CACHE_H
9170 #define _SPARC_CACHE_H
9171
9172+#include <linux/const.h>
9173+
9174 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
9175
9176 #define L1_CACHE_SHIFT 5
9177-#define L1_CACHE_BYTES 32
9178+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9179
9180 #ifdef CONFIG_SPARC32
9181 #define SMP_CACHE_BYTES_SHIFT 5
9182diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
9183index a24e41f..47677ff 100644
9184--- a/arch/sparc/include/asm/elf_32.h
9185+++ b/arch/sparc/include/asm/elf_32.h
9186@@ -114,6 +114,13 @@ typedef struct {
9187
9188 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
9189
9190+#ifdef CONFIG_PAX_ASLR
9191+#define PAX_ELF_ET_DYN_BASE 0x10000UL
9192+
9193+#define PAX_DELTA_MMAP_LEN 16
9194+#define PAX_DELTA_STACK_LEN 16
9195+#endif
9196+
9197 /* This yields a mask that user programs can use to figure out what
9198 instruction set this cpu supports. This can NOT be done in userspace
9199 on Sparc. */
9200diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
9201index 370ca1e..d4f4a98 100644
9202--- a/arch/sparc/include/asm/elf_64.h
9203+++ b/arch/sparc/include/asm/elf_64.h
9204@@ -189,6 +189,13 @@ typedef struct {
9205 #define ELF_ET_DYN_BASE 0x0000010000000000UL
9206 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
9207
9208+#ifdef CONFIG_PAX_ASLR
9209+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
9210+
9211+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
9212+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
9213+#endif
9214+
9215 extern unsigned long sparc64_elf_hwcap;
9216 #define ELF_HWCAP sparc64_elf_hwcap
9217
9218diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
9219index 9b1c36d..209298b 100644
9220--- a/arch/sparc/include/asm/pgalloc_32.h
9221+++ b/arch/sparc/include/asm/pgalloc_32.h
9222@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
9223 }
9224
9225 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
9226+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
9227
9228 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
9229 unsigned long address)
9230diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
9231index bcfe063..b333142 100644
9232--- a/arch/sparc/include/asm/pgalloc_64.h
9233+++ b/arch/sparc/include/asm/pgalloc_64.h
9234@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9235 }
9236
9237 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
9238+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
9239
9240 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
9241 {
9242diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
9243index 502f632..da1917f 100644
9244--- a/arch/sparc/include/asm/pgtable_32.h
9245+++ b/arch/sparc/include/asm/pgtable_32.h
9246@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
9247 #define PAGE_SHARED SRMMU_PAGE_SHARED
9248 #define PAGE_COPY SRMMU_PAGE_COPY
9249 #define PAGE_READONLY SRMMU_PAGE_RDONLY
9250+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
9251+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
9252+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
9253 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
9254
9255 /* Top-level page directory - dummy used by init-mm.
9256@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
9257
9258 /* xwr */
9259 #define __P000 PAGE_NONE
9260-#define __P001 PAGE_READONLY
9261-#define __P010 PAGE_COPY
9262-#define __P011 PAGE_COPY
9263+#define __P001 PAGE_READONLY_NOEXEC
9264+#define __P010 PAGE_COPY_NOEXEC
9265+#define __P011 PAGE_COPY_NOEXEC
9266 #define __P100 PAGE_READONLY
9267 #define __P101 PAGE_READONLY
9268 #define __P110 PAGE_COPY
9269 #define __P111 PAGE_COPY
9270
9271 #define __S000 PAGE_NONE
9272-#define __S001 PAGE_READONLY
9273-#define __S010 PAGE_SHARED
9274-#define __S011 PAGE_SHARED
9275+#define __S001 PAGE_READONLY_NOEXEC
9276+#define __S010 PAGE_SHARED_NOEXEC
9277+#define __S011 PAGE_SHARED_NOEXEC
9278 #define __S100 PAGE_READONLY
9279 #define __S101 PAGE_READONLY
9280 #define __S110 PAGE_SHARED
9281diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
9282index 79da178..c2eede8 100644
9283--- a/arch/sparc/include/asm/pgtsrmmu.h
9284+++ b/arch/sparc/include/asm/pgtsrmmu.h
9285@@ -115,6 +115,11 @@
9286 SRMMU_EXEC | SRMMU_REF)
9287 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
9288 SRMMU_EXEC | SRMMU_REF)
9289+
9290+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
9291+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9292+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9293+
9294 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
9295 SRMMU_DIRTY | SRMMU_REF)
9296
9297diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
9298index 9689176..63c18ea 100644
9299--- a/arch/sparc/include/asm/spinlock_64.h
9300+++ b/arch/sparc/include/asm/spinlock_64.h
9301@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
9302
9303 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
9304
9305-static void inline arch_read_lock(arch_rwlock_t *lock)
9306+static inline void arch_read_lock(arch_rwlock_t *lock)
9307 {
9308 unsigned long tmp1, tmp2;
9309
9310 __asm__ __volatile__ (
9311 "1: ldsw [%2], %0\n"
9312 " brlz,pn %0, 2f\n"
9313-"4: add %0, 1, %1\n"
9314+"4: addcc %0, 1, %1\n"
9315+
9316+#ifdef CONFIG_PAX_REFCOUNT
9317+" tvs %%icc, 6\n"
9318+#endif
9319+
9320 " cas [%2], %0, %1\n"
9321 " cmp %0, %1\n"
9322 " bne,pn %%icc, 1b\n"
9323@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
9324 " .previous"
9325 : "=&r" (tmp1), "=&r" (tmp2)
9326 : "r" (lock)
9327- : "memory");
9328+ : "memory", "cc");
9329 }
9330
9331-static int inline arch_read_trylock(arch_rwlock_t *lock)
9332+static inline int arch_read_trylock(arch_rwlock_t *lock)
9333 {
9334 int tmp1, tmp2;
9335
9336@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9337 "1: ldsw [%2], %0\n"
9338 " brlz,a,pn %0, 2f\n"
9339 " mov 0, %0\n"
9340-" add %0, 1, %1\n"
9341+" addcc %0, 1, %1\n"
9342+
9343+#ifdef CONFIG_PAX_REFCOUNT
9344+" tvs %%icc, 6\n"
9345+#endif
9346+
9347 " cas [%2], %0, %1\n"
9348 " cmp %0, %1\n"
9349 " bne,pn %%icc, 1b\n"
9350@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9351 return tmp1;
9352 }
9353
9354-static void inline arch_read_unlock(arch_rwlock_t *lock)
9355+static inline void arch_read_unlock(arch_rwlock_t *lock)
9356 {
9357 unsigned long tmp1, tmp2;
9358
9359 __asm__ __volatile__(
9360 "1: lduw [%2], %0\n"
9361-" sub %0, 1, %1\n"
9362+" subcc %0, 1, %1\n"
9363+
9364+#ifdef CONFIG_PAX_REFCOUNT
9365+" tvs %%icc, 6\n"
9366+#endif
9367+
9368 " cas [%2], %0, %1\n"
9369 " cmp %0, %1\n"
9370 " bne,pn %%xcc, 1b\n"
9371@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
9372 : "memory");
9373 }
9374
9375-static void inline arch_write_lock(arch_rwlock_t *lock)
9376+static inline void arch_write_lock(arch_rwlock_t *lock)
9377 {
9378 unsigned long mask, tmp1, tmp2;
9379
9380@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
9381 : "memory");
9382 }
9383
9384-static void inline arch_write_unlock(arch_rwlock_t *lock)
9385+static inline void arch_write_unlock(arch_rwlock_t *lock)
9386 {
9387 __asm__ __volatile__(
9388 " stw %%g0, [%0]"
9389@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
9390 : "memory");
9391 }
9392
9393-static int inline arch_write_trylock(arch_rwlock_t *lock)
9394+static inline int arch_write_trylock(arch_rwlock_t *lock)
9395 {
9396 unsigned long mask, tmp1, tmp2, result;
9397
9398diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
9399index 96efa7a..16858bf 100644
9400--- a/arch/sparc/include/asm/thread_info_32.h
9401+++ b/arch/sparc/include/asm/thread_info_32.h
9402@@ -49,6 +49,8 @@ struct thread_info {
9403 unsigned long w_saved;
9404
9405 struct restart_block restart_block;
9406+
9407+ unsigned long lowest_stack;
9408 };
9409
9410 /*
9411diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
9412index a5f01ac..703b554 100644
9413--- a/arch/sparc/include/asm/thread_info_64.h
9414+++ b/arch/sparc/include/asm/thread_info_64.h
9415@@ -63,6 +63,8 @@ struct thread_info {
9416 struct pt_regs *kern_una_regs;
9417 unsigned int kern_una_insn;
9418
9419+ unsigned long lowest_stack;
9420+
9421 unsigned long fpregs[0] __attribute__ ((aligned(64)));
9422 };
9423
9424@@ -188,12 +190,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
9425 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
9426 /* flag bit 4 is available */
9427 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
9428-/* flag bit 6 is available */
9429+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
9430 #define TIF_32BIT 7 /* 32-bit binary */
9431 #define TIF_NOHZ 8 /* in adaptive nohz mode */
9432 #define TIF_SECCOMP 9 /* secure computing */
9433 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
9434 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
9435+
9436 /* NOTE: Thread flags >= 12 should be ones we have no interest
9437 * in using in assembly, else we can't use the mask as
9438 * an immediate value in instructions such as andcc.
9439@@ -213,12 +216,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
9440 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
9441 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9442 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
9443+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9444
9445 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
9446 _TIF_DO_NOTIFY_RESUME_MASK | \
9447 _TIF_NEED_RESCHED)
9448 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
9449
9450+#define _TIF_WORK_SYSCALL \
9451+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
9452+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
9453+
9454+
9455 /*
9456 * Thread-synchronous status.
9457 *
9458diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
9459index 0167d26..767bb0c 100644
9460--- a/arch/sparc/include/asm/uaccess.h
9461+++ b/arch/sparc/include/asm/uaccess.h
9462@@ -1,5 +1,6 @@
9463 #ifndef ___ASM_SPARC_UACCESS_H
9464 #define ___ASM_SPARC_UACCESS_H
9465+
9466 #if defined(__sparc__) && defined(__arch64__)
9467 #include <asm/uaccess_64.h>
9468 #else
9469diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
9470index 53a28dd..50c38c3 100644
9471--- a/arch/sparc/include/asm/uaccess_32.h
9472+++ b/arch/sparc/include/asm/uaccess_32.h
9473@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
9474
9475 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9476 {
9477- if (n && __access_ok((unsigned long) to, n))
9478+ if ((long)n < 0)
9479+ return n;
9480+
9481+ if (n && __access_ok((unsigned long) to, n)) {
9482+ if (!__builtin_constant_p(n))
9483+ check_object_size(from, n, true);
9484 return __copy_user(to, (__force void __user *) from, n);
9485- else
9486+ } else
9487 return n;
9488 }
9489
9490 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
9491 {
9492+ if ((long)n < 0)
9493+ return n;
9494+
9495+ if (!__builtin_constant_p(n))
9496+ check_object_size(from, n, true);
9497+
9498 return __copy_user(to, (__force void __user *) from, n);
9499 }
9500
9501 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9502 {
9503- if (n && __access_ok((unsigned long) from, n))
9504+ if ((long)n < 0)
9505+ return n;
9506+
9507+ if (n && __access_ok((unsigned long) from, n)) {
9508+ if (!__builtin_constant_p(n))
9509+ check_object_size(to, n, false);
9510 return __copy_user((__force void __user *) to, from, n);
9511- else
9512+ } else
9513 return n;
9514 }
9515
9516 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
9517 {
9518+ if ((long)n < 0)
9519+ return n;
9520+
9521 return __copy_user((__force void __user *) to, from, n);
9522 }
9523
9524diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
9525index ad7e178..c9e7423 100644
9526--- a/arch/sparc/include/asm/uaccess_64.h
9527+++ b/arch/sparc/include/asm/uaccess_64.h
9528@@ -10,6 +10,7 @@
9529 #include <linux/compiler.h>
9530 #include <linux/string.h>
9531 #include <linux/thread_info.h>
9532+#include <linux/kernel.h>
9533 #include <asm/asi.h>
9534 #include <asm/spitfire.h>
9535 #include <asm-generic/uaccess-unaligned.h>
9536@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
9537 static inline unsigned long __must_check
9538 copy_from_user(void *to, const void __user *from, unsigned long size)
9539 {
9540- unsigned long ret = ___copy_from_user(to, from, size);
9541+ unsigned long ret;
9542
9543+ if ((long)size < 0 || size > INT_MAX)
9544+ return size;
9545+
9546+ if (!__builtin_constant_p(size))
9547+ check_object_size(to, size, false);
9548+
9549+ ret = ___copy_from_user(to, from, size);
9550 if (unlikely(ret))
9551 ret = copy_from_user_fixup(to, from, size);
9552
9553@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
9554 static inline unsigned long __must_check
9555 copy_to_user(void __user *to, const void *from, unsigned long size)
9556 {
9557- unsigned long ret = ___copy_to_user(to, from, size);
9558+ unsigned long ret;
9559
9560+ if ((long)size < 0 || size > INT_MAX)
9561+ return size;
9562+
9563+ if (!__builtin_constant_p(size))
9564+ check_object_size(from, size, true);
9565+
9566+ ret = ___copy_to_user(to, from, size);
9567 if (unlikely(ret))
9568 ret = copy_to_user_fixup(to, from, size);
9569 return ret;
9570diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
9571index d15cc17..d0ae796 100644
9572--- a/arch/sparc/kernel/Makefile
9573+++ b/arch/sparc/kernel/Makefile
9574@@ -4,7 +4,7 @@
9575 #
9576
9577 asflags-y := -ansi
9578-ccflags-y := -Werror
9579+#ccflags-y := -Werror
9580
9581 extra-y := head_$(BITS).o
9582
9583diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
9584index fdd819d..5af08c8 100644
9585--- a/arch/sparc/kernel/process_32.c
9586+++ b/arch/sparc/kernel/process_32.c
9587@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
9588
9589 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
9590 r->psr, r->pc, r->npc, r->y, print_tainted());
9591- printk("PC: <%pS>\n", (void *) r->pc);
9592+ printk("PC: <%pA>\n", (void *) r->pc);
9593 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9594 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
9595 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
9596 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9597 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
9598 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
9599- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
9600+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
9601
9602 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9603 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
9604@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9605 rw = (struct reg_window32 *) fp;
9606 pc = rw->ins[7];
9607 printk("[%08lx : ", pc);
9608- printk("%pS ] ", (void *) pc);
9609+ printk("%pA ] ", (void *) pc);
9610 fp = rw->ins[6];
9611 } while (++count < 16);
9612 printk("\n");
9613diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
9614index 32a280e..84fc6a9 100644
9615--- a/arch/sparc/kernel/process_64.c
9616+++ b/arch/sparc/kernel/process_64.c
9617@@ -159,7 +159,7 @@ static void show_regwindow(struct pt_regs *regs)
9618 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
9619 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
9620 if (regs->tstate & TSTATE_PRIV)
9621- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
9622+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
9623 }
9624
9625 void show_regs(struct pt_regs *regs)
9626@@ -168,7 +168,7 @@ void show_regs(struct pt_regs *regs)
9627
9628 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
9629 regs->tpc, regs->tnpc, regs->y, print_tainted());
9630- printk("TPC: <%pS>\n", (void *) regs->tpc);
9631+ printk("TPC: <%pA>\n", (void *) regs->tpc);
9632 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
9633 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
9634 regs->u_regs[3]);
9635@@ -181,7 +181,7 @@ void show_regs(struct pt_regs *regs)
9636 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
9637 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
9638 regs->u_regs[15]);
9639- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
9640+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
9641 show_regwindow(regs);
9642 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
9643 }
9644@@ -270,7 +270,7 @@ void arch_trigger_all_cpu_backtrace(void)
9645 ((tp && tp->task) ? tp->task->pid : -1));
9646
9647 if (gp->tstate & TSTATE_PRIV) {
9648- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
9649+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
9650 (void *) gp->tpc,
9651 (void *) gp->o7,
9652 (void *) gp->i7,
9653diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
9654index 79cc0d1..ec62734 100644
9655--- a/arch/sparc/kernel/prom_common.c
9656+++ b/arch/sparc/kernel/prom_common.c
9657@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
9658
9659 unsigned int prom_early_allocated __initdata;
9660
9661-static struct of_pdt_ops prom_sparc_ops __initdata = {
9662+static struct of_pdt_ops prom_sparc_ops __initconst = {
9663 .nextprop = prom_common_nextprop,
9664 .getproplen = prom_getproplen,
9665 .getproperty = prom_getproperty,
9666diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
9667index c13c9f2..d572c34 100644
9668--- a/arch/sparc/kernel/ptrace_64.c
9669+++ b/arch/sparc/kernel/ptrace_64.c
9670@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
9671 return ret;
9672 }
9673
9674+#ifdef CONFIG_GRKERNSEC_SETXID
9675+extern void gr_delayed_cred_worker(void);
9676+#endif
9677+
9678 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9679 {
9680 int ret = 0;
9681@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9682 if (test_thread_flag(TIF_NOHZ))
9683 user_exit();
9684
9685+#ifdef CONFIG_GRKERNSEC_SETXID
9686+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9687+ gr_delayed_cred_worker();
9688+#endif
9689+
9690 if (test_thread_flag(TIF_SYSCALL_TRACE))
9691 ret = tracehook_report_syscall_entry(regs);
9692
9693@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
9694 if (test_thread_flag(TIF_NOHZ))
9695 user_exit();
9696
9697+#ifdef CONFIG_GRKERNSEC_SETXID
9698+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9699+ gr_delayed_cred_worker();
9700+#endif
9701+
9702 audit_syscall_exit(regs);
9703
9704 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9705diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
9706index b085311..6f885f7 100644
9707--- a/arch/sparc/kernel/smp_64.c
9708+++ b/arch/sparc/kernel/smp_64.c
9709@@ -870,8 +870,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
9710 extern unsigned long xcall_flush_dcache_page_spitfire;
9711
9712 #ifdef CONFIG_DEBUG_DCFLUSH
9713-extern atomic_t dcpage_flushes;
9714-extern atomic_t dcpage_flushes_xcall;
9715+extern atomic_unchecked_t dcpage_flushes;
9716+extern atomic_unchecked_t dcpage_flushes_xcall;
9717 #endif
9718
9719 static inline void __local_flush_dcache_page(struct page *page)
9720@@ -895,7 +895,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
9721 return;
9722
9723 #ifdef CONFIG_DEBUG_DCFLUSH
9724- atomic_inc(&dcpage_flushes);
9725+ atomic_inc_unchecked(&dcpage_flushes);
9726 #endif
9727
9728 this_cpu = get_cpu();
9729@@ -919,7 +919,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
9730 xcall_deliver(data0, __pa(pg_addr),
9731 (u64) pg_addr, cpumask_of(cpu));
9732 #ifdef CONFIG_DEBUG_DCFLUSH
9733- atomic_inc(&dcpage_flushes_xcall);
9734+ atomic_inc_unchecked(&dcpage_flushes_xcall);
9735 #endif
9736 }
9737 }
9738@@ -938,7 +938,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
9739 preempt_disable();
9740
9741 #ifdef CONFIG_DEBUG_DCFLUSH
9742- atomic_inc(&dcpage_flushes);
9743+ atomic_inc_unchecked(&dcpage_flushes);
9744 #endif
9745 data0 = 0;
9746 pg_addr = page_address(page);
9747@@ -955,7 +955,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
9748 xcall_deliver(data0, __pa(pg_addr),
9749 (u64) pg_addr, cpu_online_mask);
9750 #ifdef CONFIG_DEBUG_DCFLUSH
9751- atomic_inc(&dcpage_flushes_xcall);
9752+ atomic_inc_unchecked(&dcpage_flushes_xcall);
9753 #endif
9754 }
9755 __local_flush_dcache_page(page);
9756diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
9757index 3a8d184..49498a8 100644
9758--- a/arch/sparc/kernel/sys_sparc_32.c
9759+++ b/arch/sparc/kernel/sys_sparc_32.c
9760@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9761 if (len > TASK_SIZE - PAGE_SIZE)
9762 return -ENOMEM;
9763 if (!addr)
9764- addr = TASK_UNMAPPED_BASE;
9765+ addr = current->mm->mmap_base;
9766
9767 info.flags = 0;
9768 info.length = len;
9769diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
9770index beb0b5a..5a153f7 100644
9771--- a/arch/sparc/kernel/sys_sparc_64.c
9772+++ b/arch/sparc/kernel/sys_sparc_64.c
9773@@ -88,13 +88,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9774 struct vm_area_struct * vma;
9775 unsigned long task_size = TASK_SIZE;
9776 int do_color_align;
9777+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9778 struct vm_unmapped_area_info info;
9779
9780 if (flags & MAP_FIXED) {
9781 /* We do not accept a shared mapping if it would violate
9782 * cache aliasing constraints.
9783 */
9784- if ((flags & MAP_SHARED) &&
9785+ if ((filp || (flags & MAP_SHARED)) &&
9786 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9787 return -EINVAL;
9788 return addr;
9789@@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9790 if (filp || (flags & MAP_SHARED))
9791 do_color_align = 1;
9792
9793+#ifdef CONFIG_PAX_RANDMMAP
9794+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9795+#endif
9796+
9797 if (addr) {
9798 if (do_color_align)
9799 addr = COLOR_ALIGN(addr, pgoff);
9800@@ -116,22 +121,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9801 addr = PAGE_ALIGN(addr);
9802
9803 vma = find_vma(mm, addr);
9804- if (task_size - len >= addr &&
9805- (!vma || addr + len <= vma->vm_start))
9806+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9807 return addr;
9808 }
9809
9810 info.flags = 0;
9811 info.length = len;
9812- info.low_limit = TASK_UNMAPPED_BASE;
9813+ info.low_limit = mm->mmap_base;
9814 info.high_limit = min(task_size, VA_EXCLUDE_START);
9815 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9816 info.align_offset = pgoff << PAGE_SHIFT;
9817+ info.threadstack_offset = offset;
9818 addr = vm_unmapped_area(&info);
9819
9820 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9821 VM_BUG_ON(addr != -ENOMEM);
9822 info.low_limit = VA_EXCLUDE_END;
9823+
9824+#ifdef CONFIG_PAX_RANDMMAP
9825+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9826+ info.low_limit += mm->delta_mmap;
9827+#endif
9828+
9829 info.high_limit = task_size;
9830 addr = vm_unmapped_area(&info);
9831 }
9832@@ -149,6 +160,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9833 unsigned long task_size = STACK_TOP32;
9834 unsigned long addr = addr0;
9835 int do_color_align;
9836+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9837 struct vm_unmapped_area_info info;
9838
9839 /* This should only ever run for 32-bit processes. */
9840@@ -158,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9841 /* We do not accept a shared mapping if it would violate
9842 * cache aliasing constraints.
9843 */
9844- if ((flags & MAP_SHARED) &&
9845+ if ((filp || (flags & MAP_SHARED)) &&
9846 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9847 return -EINVAL;
9848 return addr;
9849@@ -171,6 +183,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9850 if (filp || (flags & MAP_SHARED))
9851 do_color_align = 1;
9852
9853+#ifdef CONFIG_PAX_RANDMMAP
9854+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9855+#endif
9856+
9857 /* requesting a specific address */
9858 if (addr) {
9859 if (do_color_align)
9860@@ -179,8 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9861 addr = PAGE_ALIGN(addr);
9862
9863 vma = find_vma(mm, addr);
9864- if (task_size - len >= addr &&
9865- (!vma || addr + len <= vma->vm_start))
9866+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9867 return addr;
9868 }
9869
9870@@ -190,6 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9871 info.high_limit = mm->mmap_base;
9872 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9873 info.align_offset = pgoff << PAGE_SHIFT;
9874+ info.threadstack_offset = offset;
9875 addr = vm_unmapped_area(&info);
9876
9877 /*
9878@@ -202,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9879 VM_BUG_ON(addr != -ENOMEM);
9880 info.flags = 0;
9881 info.low_limit = TASK_UNMAPPED_BASE;
9882+
9883+#ifdef CONFIG_PAX_RANDMMAP
9884+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9885+ info.low_limit += mm->delta_mmap;
9886+#endif
9887+
9888 info.high_limit = STACK_TOP32;
9889 addr = vm_unmapped_area(&info);
9890 }
9891@@ -258,10 +280,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
9892 EXPORT_SYMBOL(get_fb_unmapped_area);
9893
9894 /* Essentially the same as PowerPC. */
9895-static unsigned long mmap_rnd(void)
9896+static unsigned long mmap_rnd(struct mm_struct *mm)
9897 {
9898 unsigned long rnd = 0UL;
9899
9900+#ifdef CONFIG_PAX_RANDMMAP
9901+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9902+#endif
9903+
9904 if (current->flags & PF_RANDOMIZE) {
9905 unsigned long val = get_random_int();
9906 if (test_thread_flag(TIF_32BIT))
9907@@ -274,7 +300,7 @@ static unsigned long mmap_rnd(void)
9908
9909 void arch_pick_mmap_layout(struct mm_struct *mm)
9910 {
9911- unsigned long random_factor = mmap_rnd();
9912+ unsigned long random_factor = mmap_rnd(mm);
9913 unsigned long gap;
9914
9915 /*
9916@@ -287,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9917 gap == RLIM_INFINITY ||
9918 sysctl_legacy_va_layout) {
9919 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
9920+
9921+#ifdef CONFIG_PAX_RANDMMAP
9922+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9923+ mm->mmap_base += mm->delta_mmap;
9924+#endif
9925+
9926 mm->get_unmapped_area = arch_get_unmapped_area;
9927 } else {
9928 /* We know it's 32-bit */
9929@@ -298,6 +330,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9930 gap = (task_size / 6 * 5);
9931
9932 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
9933+
9934+#ifdef CONFIG_PAX_RANDMMAP
9935+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9936+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9937+#endif
9938+
9939 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9940 }
9941 }
9942diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
9943index 87729ff..192f9d8 100644
9944--- a/arch/sparc/kernel/syscalls.S
9945+++ b/arch/sparc/kernel/syscalls.S
9946@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
9947 #endif
9948 .align 32
9949 1: ldx [%g6 + TI_FLAGS], %l5
9950- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
9951+ andcc %l5, _TIF_WORK_SYSCALL, %g0
9952 be,pt %icc, rtrap
9953 nop
9954 call syscall_trace_leave
9955@@ -184,7 +184,7 @@ linux_sparc_syscall32:
9956
9957 srl %i3, 0, %o3 ! IEU0
9958 srl %i2, 0, %o2 ! IEU0 Group
9959- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
9960+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9961 bne,pn %icc, linux_syscall_trace32 ! CTI
9962 mov %i0, %l5 ! IEU1
9963 5: call %l7 ! CTI Group brk forced
9964@@ -207,7 +207,7 @@ linux_sparc_syscall:
9965
9966 mov %i3, %o3 ! IEU1
9967 mov %i4, %o4 ! IEU0 Group
9968- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
9969+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9970 bne,pn %icc, linux_syscall_trace ! CTI Group
9971 mov %i0, %l5 ! IEU0
9972 2: call %l7 ! CTI Group brk forced
9973@@ -223,7 +223,7 @@ ret_sys_call:
9974
9975 cmp %o0, -ERESTART_RESTARTBLOCK
9976 bgeu,pn %xcc, 1f
9977- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
9978+ andcc %l0, _TIF_WORK_SYSCALL, %g0
9979 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
9980
9981 2:
9982diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
9983index 6629829..036032d 100644
9984--- a/arch/sparc/kernel/traps_32.c
9985+++ b/arch/sparc/kernel/traps_32.c
9986@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
9987 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
9988 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
9989
9990+extern void gr_handle_kernel_exploit(void);
9991+
9992 void die_if_kernel(char *str, struct pt_regs *regs)
9993 {
9994 static int die_counter;
9995@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9996 count++ < 30 &&
9997 (((unsigned long) rw) >= PAGE_OFFSET) &&
9998 !(((unsigned long) rw) & 0x7)) {
9999- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10000+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10001 (void *) rw->ins[7]);
10002 rw = (struct reg_window32 *)rw->ins[6];
10003 }
10004 }
10005 printk("Instruction DUMP:");
10006 instruction_dump ((unsigned long *) regs->pc);
10007- if(regs->psr & PSR_PS)
10008+ if(regs->psr & PSR_PS) {
10009+ gr_handle_kernel_exploit();
10010 do_exit(SIGKILL);
10011+ }
10012 do_exit(SIGSEGV);
10013 }
10014
10015diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10016index 4ced92f..965eeed 100644
10017--- a/arch/sparc/kernel/traps_64.c
10018+++ b/arch/sparc/kernel/traps_64.c
10019@@ -77,7 +77,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10020 i + 1,
10021 p->trapstack[i].tstate, p->trapstack[i].tpc,
10022 p->trapstack[i].tnpc, p->trapstack[i].tt);
10023- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10024+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10025 }
10026 }
10027
10028@@ -97,6 +97,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10029
10030 lvl -= 0x100;
10031 if (regs->tstate & TSTATE_PRIV) {
10032+
10033+#ifdef CONFIG_PAX_REFCOUNT
10034+ if (lvl == 6)
10035+ pax_report_refcount_overflow(regs);
10036+#endif
10037+
10038 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10039 die_if_kernel(buffer, regs);
10040 }
10041@@ -115,11 +121,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10042 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10043 {
10044 char buffer[32];
10045-
10046+
10047 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10048 0, lvl, SIGTRAP) == NOTIFY_STOP)
10049 return;
10050
10051+#ifdef CONFIG_PAX_REFCOUNT
10052+ if (lvl == 6)
10053+ pax_report_refcount_overflow(regs);
10054+#endif
10055+
10056 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10057
10058 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10059@@ -1149,7 +1160,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10060 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10061 printk("%s" "ERROR(%d): ",
10062 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10063- printk("TPC<%pS>\n", (void *) regs->tpc);
10064+ printk("TPC<%pA>\n", (void *) regs->tpc);
10065 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10066 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10067 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10068@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10069 smp_processor_id(),
10070 (type & 0x1) ? 'I' : 'D',
10071 regs->tpc);
10072- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
10073+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
10074 panic("Irrecoverable Cheetah+ parity error.");
10075 }
10076
10077@@ -1764,7 +1775,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10078 smp_processor_id(),
10079 (type & 0x1) ? 'I' : 'D',
10080 regs->tpc);
10081- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
10082+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
10083 }
10084
10085 struct sun4v_error_entry {
10086@@ -1837,8 +1848,8 @@ struct sun4v_error_entry {
10087 /*0x38*/u64 reserved_5;
10088 };
10089
10090-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10091-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10092+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10093+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10094
10095 static const char *sun4v_err_type_to_str(u8 type)
10096 {
10097@@ -1930,7 +1941,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
10098 }
10099
10100 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10101- int cpu, const char *pfx, atomic_t *ocnt)
10102+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
10103 {
10104 u64 *raw_ptr = (u64 *) ent;
10105 u32 attrs;
10106@@ -1988,8 +1999,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10107
10108 show_regs(regs);
10109
10110- if ((cnt = atomic_read(ocnt)) != 0) {
10111- atomic_set(ocnt, 0);
10112+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
10113+ atomic_set_unchecked(ocnt, 0);
10114 wmb();
10115 printk("%s: Queue overflowed %d times.\n",
10116 pfx, cnt);
10117@@ -2046,7 +2057,7 @@ out:
10118 */
10119 void sun4v_resum_overflow(struct pt_regs *regs)
10120 {
10121- atomic_inc(&sun4v_resum_oflow_cnt);
10122+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
10123 }
10124
10125 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
10126@@ -2099,7 +2110,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
10127 /* XXX Actually even this can make not that much sense. Perhaps
10128 * XXX we should just pull the plug and panic directly from here?
10129 */
10130- atomic_inc(&sun4v_nonresum_oflow_cnt);
10131+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
10132 }
10133
10134 unsigned long sun4v_err_itlb_vaddr;
10135@@ -2114,9 +2125,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
10136
10137 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
10138 regs->tpc, tl);
10139- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
10140+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
10141 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10142- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
10143+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
10144 (void *) regs->u_regs[UREG_I7]);
10145 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
10146 "pte[%lx] error[%lx]\n",
10147@@ -2138,9 +2149,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
10148
10149 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
10150 regs->tpc, tl);
10151- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
10152+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
10153 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10154- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
10155+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
10156 (void *) regs->u_regs[UREG_I7]);
10157 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
10158 "pte[%lx] error[%lx]\n",
10159@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10160 fp = (unsigned long)sf->fp + STACK_BIAS;
10161 }
10162
10163- printk(" [%016lx] %pS\n", pc, (void *) pc);
10164+ printk(" [%016lx] %pA\n", pc, (void *) pc);
10165 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
10166 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
10167 int index = tsk->curr_ret_stack;
10168 if (tsk->ret_stack && index >= graph) {
10169 pc = tsk->ret_stack[index - graph].ret;
10170- printk(" [%016lx] %pS\n", pc, (void *) pc);
10171+ printk(" [%016lx] %pA\n", pc, (void *) pc);
10172 graph++;
10173 }
10174 }
10175@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
10176 return (struct reg_window *) (fp + STACK_BIAS);
10177 }
10178
10179+extern void gr_handle_kernel_exploit(void);
10180+
10181 void die_if_kernel(char *str, struct pt_regs *regs)
10182 {
10183 static int die_counter;
10184@@ -2411,7 +2424,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10185 while (rw &&
10186 count++ < 30 &&
10187 kstack_valid(tp, (unsigned long) rw)) {
10188- printk("Caller[%016lx]: %pS\n", rw->ins[7],
10189+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
10190 (void *) rw->ins[7]);
10191
10192 rw = kernel_stack_up(rw);
10193@@ -2424,8 +2437,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10194 }
10195 user_instruction_dump ((unsigned int __user *) regs->tpc);
10196 }
10197- if (regs->tstate & TSTATE_PRIV)
10198+ if (regs->tstate & TSTATE_PRIV) {
10199+ gr_handle_kernel_exploit();
10200 do_exit(SIGKILL);
10201+ }
10202 do_exit(SIGSEGV);
10203 }
10204 EXPORT_SYMBOL(die_if_kernel);
10205diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
10206index 3c1a7cb..73e1923 100644
10207--- a/arch/sparc/kernel/unaligned_64.c
10208+++ b/arch/sparc/kernel/unaligned_64.c
10209@@ -289,7 +289,7 @@ static void log_unaligned(struct pt_regs *regs)
10210 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
10211
10212 if (__ratelimit(&ratelimit)) {
10213- printk("Kernel unaligned access at TPC[%lx] %pS\n",
10214+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
10215 regs->tpc, (void *) regs->tpc);
10216 }
10217 }
10218diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
10219index dbe119b..089c7c1 100644
10220--- a/arch/sparc/lib/Makefile
10221+++ b/arch/sparc/lib/Makefile
10222@@ -2,7 +2,7 @@
10223 #
10224
10225 asflags-y := -ansi -DST_DIV0=0x02
10226-ccflags-y := -Werror
10227+#ccflags-y := -Werror
10228
10229 lib-$(CONFIG_SPARC32) += ashrdi3.o
10230 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
10231diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
10232index 85c233d..68500e0 100644
10233--- a/arch/sparc/lib/atomic_64.S
10234+++ b/arch/sparc/lib/atomic_64.S
10235@@ -17,7 +17,12 @@
10236 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10237 BACKOFF_SETUP(%o2)
10238 1: lduw [%o1], %g1
10239- add %g1, %o0, %g7
10240+ addcc %g1, %o0, %g7
10241+
10242+#ifdef CONFIG_PAX_REFCOUNT
10243+ tvs %icc, 6
10244+#endif
10245+
10246 cas [%o1], %g1, %g7
10247 cmp %g1, %g7
10248 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10249@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10250 2: BACKOFF_SPIN(%o2, %o3, 1b)
10251 ENDPROC(atomic_add)
10252
10253+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10254+ BACKOFF_SETUP(%o2)
10255+1: lduw [%o1], %g1
10256+ add %g1, %o0, %g7
10257+ cas [%o1], %g1, %g7
10258+ cmp %g1, %g7
10259+ bne,pn %icc, 2f
10260+ nop
10261+ retl
10262+ nop
10263+2: BACKOFF_SPIN(%o2, %o3, 1b)
10264+ENDPROC(atomic_add_unchecked)
10265+
10266 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10267 BACKOFF_SETUP(%o2)
10268 1: lduw [%o1], %g1
10269- sub %g1, %o0, %g7
10270+ subcc %g1, %o0, %g7
10271+
10272+#ifdef CONFIG_PAX_REFCOUNT
10273+ tvs %icc, 6
10274+#endif
10275+
10276 cas [%o1], %g1, %g7
10277 cmp %g1, %g7
10278 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10279@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10280 2: BACKOFF_SPIN(%o2, %o3, 1b)
10281 ENDPROC(atomic_sub)
10282
10283+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10284+ BACKOFF_SETUP(%o2)
10285+1: lduw [%o1], %g1
10286+ sub %g1, %o0, %g7
10287+ cas [%o1], %g1, %g7
10288+ cmp %g1, %g7
10289+ bne,pn %icc, 2f
10290+ nop
10291+ retl
10292+ nop
10293+2: BACKOFF_SPIN(%o2, %o3, 1b)
10294+ENDPROC(atomic_sub_unchecked)
10295+
10296 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10297 BACKOFF_SETUP(%o2)
10298 1: lduw [%o1], %g1
10299- add %g1, %o0, %g7
10300+ addcc %g1, %o0, %g7
10301+
10302+#ifdef CONFIG_PAX_REFCOUNT
10303+ tvs %icc, 6
10304+#endif
10305+
10306 cas [%o1], %g1, %g7
10307 cmp %g1, %g7
10308 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10309@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10310 2: BACKOFF_SPIN(%o2, %o3, 1b)
10311 ENDPROC(atomic_add_ret)
10312
10313+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10314+ BACKOFF_SETUP(%o2)
10315+1: lduw [%o1], %g1
10316+ addcc %g1, %o0, %g7
10317+ cas [%o1], %g1, %g7
10318+ cmp %g1, %g7
10319+ bne,pn %icc, 2f
10320+ add %g7, %o0, %g7
10321+ sra %g7, 0, %o0
10322+ retl
10323+ nop
10324+2: BACKOFF_SPIN(%o2, %o3, 1b)
10325+ENDPROC(atomic_add_ret_unchecked)
10326+
10327 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10328 BACKOFF_SETUP(%o2)
10329 1: lduw [%o1], %g1
10330- sub %g1, %o0, %g7
10331+ subcc %g1, %o0, %g7
10332+
10333+#ifdef CONFIG_PAX_REFCOUNT
10334+ tvs %icc, 6
10335+#endif
10336+
10337 cas [%o1], %g1, %g7
10338 cmp %g1, %g7
10339 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10340@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
10341 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10342 BACKOFF_SETUP(%o2)
10343 1: ldx [%o1], %g1
10344- add %g1, %o0, %g7
10345+ addcc %g1, %o0, %g7
10346+
10347+#ifdef CONFIG_PAX_REFCOUNT
10348+ tvs %xcc, 6
10349+#endif
10350+
10351 casx [%o1], %g1, %g7
10352 cmp %g1, %g7
10353 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10354@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10355 2: BACKOFF_SPIN(%o2, %o3, 1b)
10356 ENDPROC(atomic64_add)
10357
10358+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10359+ BACKOFF_SETUP(%o2)
10360+1: ldx [%o1], %g1
10361+ addcc %g1, %o0, %g7
10362+ casx [%o1], %g1, %g7
10363+ cmp %g1, %g7
10364+ bne,pn %xcc, 2f
10365+ nop
10366+ retl
10367+ nop
10368+2: BACKOFF_SPIN(%o2, %o3, 1b)
10369+ENDPROC(atomic64_add_unchecked)
10370+
10371 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10372 BACKOFF_SETUP(%o2)
10373 1: ldx [%o1], %g1
10374- sub %g1, %o0, %g7
10375+ subcc %g1, %o0, %g7
10376+
10377+#ifdef CONFIG_PAX_REFCOUNT
10378+ tvs %xcc, 6
10379+#endif
10380+
10381 casx [%o1], %g1, %g7
10382 cmp %g1, %g7
10383 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10384@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10385 2: BACKOFF_SPIN(%o2, %o3, 1b)
10386 ENDPROC(atomic64_sub)
10387
10388+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10389+ BACKOFF_SETUP(%o2)
10390+1: ldx [%o1], %g1
10391+ subcc %g1, %o0, %g7
10392+ casx [%o1], %g1, %g7
10393+ cmp %g1, %g7
10394+ bne,pn %xcc, 2f
10395+ nop
10396+ retl
10397+ nop
10398+2: BACKOFF_SPIN(%o2, %o3, 1b)
10399+ENDPROC(atomic64_sub_unchecked)
10400+
10401 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10402 BACKOFF_SETUP(%o2)
10403 1: ldx [%o1], %g1
10404- add %g1, %o0, %g7
10405+ addcc %g1, %o0, %g7
10406+
10407+#ifdef CONFIG_PAX_REFCOUNT
10408+ tvs %xcc, 6
10409+#endif
10410+
10411 casx [%o1], %g1, %g7
10412 cmp %g1, %g7
10413 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10414@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10415 2: BACKOFF_SPIN(%o2, %o3, 1b)
10416 ENDPROC(atomic64_add_ret)
10417
10418+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10419+ BACKOFF_SETUP(%o2)
10420+1: ldx [%o1], %g1
10421+ addcc %g1, %o0, %g7
10422+ casx [%o1], %g1, %g7
10423+ cmp %g1, %g7
10424+ bne,pn %xcc, 2f
10425+ add %g7, %o0, %g7
10426+ mov %g7, %o0
10427+ retl
10428+ nop
10429+2: BACKOFF_SPIN(%o2, %o3, 1b)
10430+ENDPROC(atomic64_add_ret_unchecked)
10431+
10432 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10433 BACKOFF_SETUP(%o2)
10434 1: ldx [%o1], %g1
10435- sub %g1, %o0, %g7
10436+ subcc %g1, %o0, %g7
10437+
10438+#ifdef CONFIG_PAX_REFCOUNT
10439+ tvs %xcc, 6
10440+#endif
10441+
10442 casx [%o1], %g1, %g7
10443 cmp %g1, %g7
10444 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10445diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
10446index 323335b..ed85ea2 100644
10447--- a/arch/sparc/lib/ksyms.c
10448+++ b/arch/sparc/lib/ksyms.c
10449@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
10450
10451 /* Atomic counter implementation. */
10452 EXPORT_SYMBOL(atomic_add);
10453+EXPORT_SYMBOL(atomic_add_unchecked);
10454 EXPORT_SYMBOL(atomic_add_ret);
10455+EXPORT_SYMBOL(atomic_add_ret_unchecked);
10456 EXPORT_SYMBOL(atomic_sub);
10457+EXPORT_SYMBOL(atomic_sub_unchecked);
10458 EXPORT_SYMBOL(atomic_sub_ret);
10459 EXPORT_SYMBOL(atomic64_add);
10460+EXPORT_SYMBOL(atomic64_add_unchecked);
10461 EXPORT_SYMBOL(atomic64_add_ret);
10462+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
10463 EXPORT_SYMBOL(atomic64_sub);
10464+EXPORT_SYMBOL(atomic64_sub_unchecked);
10465 EXPORT_SYMBOL(atomic64_sub_ret);
10466 EXPORT_SYMBOL(atomic64_dec_if_positive);
10467
10468diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
10469index 30c3ecc..736f015 100644
10470--- a/arch/sparc/mm/Makefile
10471+++ b/arch/sparc/mm/Makefile
10472@@ -2,7 +2,7 @@
10473 #
10474
10475 asflags-y := -ansi
10476-ccflags-y := -Werror
10477+#ccflags-y := -Werror
10478
10479 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
10480 obj-y += fault_$(BITS).o
10481diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
10482index 59dbd46..1dd7f5e 100644
10483--- a/arch/sparc/mm/fault_32.c
10484+++ b/arch/sparc/mm/fault_32.c
10485@@ -21,6 +21,9 @@
10486 #include <linux/perf_event.h>
10487 #include <linux/interrupt.h>
10488 #include <linux/kdebug.h>
10489+#include <linux/slab.h>
10490+#include <linux/pagemap.h>
10491+#include <linux/compiler.h>
10492
10493 #include <asm/page.h>
10494 #include <asm/pgtable.h>
10495@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
10496 return safe_compute_effective_address(regs, insn);
10497 }
10498
10499+#ifdef CONFIG_PAX_PAGEEXEC
10500+#ifdef CONFIG_PAX_DLRESOLVE
10501+static void pax_emuplt_close(struct vm_area_struct *vma)
10502+{
10503+ vma->vm_mm->call_dl_resolve = 0UL;
10504+}
10505+
10506+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10507+{
10508+ unsigned int *kaddr;
10509+
10510+ vmf->page = alloc_page(GFP_HIGHUSER);
10511+ if (!vmf->page)
10512+ return VM_FAULT_OOM;
10513+
10514+ kaddr = kmap(vmf->page);
10515+ memset(kaddr, 0, PAGE_SIZE);
10516+ kaddr[0] = 0x9DE3BFA8U; /* save */
10517+ flush_dcache_page(vmf->page);
10518+ kunmap(vmf->page);
10519+ return VM_FAULT_MAJOR;
10520+}
10521+
10522+static const struct vm_operations_struct pax_vm_ops = {
10523+ .close = pax_emuplt_close,
10524+ .fault = pax_emuplt_fault
10525+};
10526+
10527+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10528+{
10529+ int ret;
10530+
10531+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10532+ vma->vm_mm = current->mm;
10533+ vma->vm_start = addr;
10534+ vma->vm_end = addr + PAGE_SIZE;
10535+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10536+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10537+ vma->vm_ops = &pax_vm_ops;
10538+
10539+ ret = insert_vm_struct(current->mm, vma);
10540+ if (ret)
10541+ return ret;
10542+
10543+ ++current->mm->total_vm;
10544+ return 0;
10545+}
10546+#endif
10547+
10548+/*
10549+ * PaX: decide what to do with offenders (regs->pc = fault address)
10550+ *
10551+ * returns 1 when task should be killed
10552+ * 2 when patched PLT trampoline was detected
10553+ * 3 when unpatched PLT trampoline was detected
10554+ */
10555+static int pax_handle_fetch_fault(struct pt_regs *regs)
10556+{
10557+
10558+#ifdef CONFIG_PAX_EMUPLT
10559+ int err;
10560+
10561+ do { /* PaX: patched PLT emulation #1 */
10562+ unsigned int sethi1, sethi2, jmpl;
10563+
10564+ err = get_user(sethi1, (unsigned int *)regs->pc);
10565+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
10566+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
10567+
10568+ if (err)
10569+ break;
10570+
10571+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10572+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10573+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10574+ {
10575+ unsigned int addr;
10576+
10577+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10578+ addr = regs->u_regs[UREG_G1];
10579+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10580+ regs->pc = addr;
10581+ regs->npc = addr+4;
10582+ return 2;
10583+ }
10584+ } while (0);
10585+
10586+ do { /* PaX: patched PLT emulation #2 */
10587+ unsigned int ba;
10588+
10589+ err = get_user(ba, (unsigned int *)regs->pc);
10590+
10591+ if (err)
10592+ break;
10593+
10594+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10595+ unsigned int addr;
10596+
10597+ if ((ba & 0xFFC00000U) == 0x30800000U)
10598+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10599+ else
10600+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10601+ regs->pc = addr;
10602+ regs->npc = addr+4;
10603+ return 2;
10604+ }
10605+ } while (0);
10606+
10607+ do { /* PaX: patched PLT emulation #3 */
10608+ unsigned int sethi, bajmpl, nop;
10609+
10610+ err = get_user(sethi, (unsigned int *)regs->pc);
10611+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
10612+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
10613+
10614+ if (err)
10615+ break;
10616+
10617+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10618+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10619+ nop == 0x01000000U)
10620+ {
10621+ unsigned int addr;
10622+
10623+ addr = (sethi & 0x003FFFFFU) << 10;
10624+ regs->u_regs[UREG_G1] = addr;
10625+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10626+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10627+ else
10628+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10629+ regs->pc = addr;
10630+ regs->npc = addr+4;
10631+ return 2;
10632+ }
10633+ } while (0);
10634+
10635+ do { /* PaX: unpatched PLT emulation step 1 */
10636+ unsigned int sethi, ba, nop;
10637+
10638+ err = get_user(sethi, (unsigned int *)regs->pc);
10639+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
10640+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
10641+
10642+ if (err)
10643+ break;
10644+
10645+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10646+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10647+ nop == 0x01000000U)
10648+ {
10649+ unsigned int addr, save, call;
10650+
10651+ if ((ba & 0xFFC00000U) == 0x30800000U)
10652+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10653+ else
10654+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10655+
10656+ err = get_user(save, (unsigned int *)addr);
10657+ err |= get_user(call, (unsigned int *)(addr+4));
10658+ err |= get_user(nop, (unsigned int *)(addr+8));
10659+ if (err)
10660+ break;
10661+
10662+#ifdef CONFIG_PAX_DLRESOLVE
10663+ if (save == 0x9DE3BFA8U &&
10664+ (call & 0xC0000000U) == 0x40000000U &&
10665+ nop == 0x01000000U)
10666+ {
10667+ struct vm_area_struct *vma;
10668+ unsigned long call_dl_resolve;
10669+
10670+ down_read(&current->mm->mmap_sem);
10671+ call_dl_resolve = current->mm->call_dl_resolve;
10672+ up_read(&current->mm->mmap_sem);
10673+ if (likely(call_dl_resolve))
10674+ goto emulate;
10675+
10676+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10677+
10678+ down_write(&current->mm->mmap_sem);
10679+ if (current->mm->call_dl_resolve) {
10680+ call_dl_resolve = current->mm->call_dl_resolve;
10681+ up_write(&current->mm->mmap_sem);
10682+ if (vma)
10683+ kmem_cache_free(vm_area_cachep, vma);
10684+ goto emulate;
10685+ }
10686+
10687+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10688+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10689+ up_write(&current->mm->mmap_sem);
10690+ if (vma)
10691+ kmem_cache_free(vm_area_cachep, vma);
10692+ return 1;
10693+ }
10694+
10695+ if (pax_insert_vma(vma, call_dl_resolve)) {
10696+ up_write(&current->mm->mmap_sem);
10697+ kmem_cache_free(vm_area_cachep, vma);
10698+ return 1;
10699+ }
10700+
10701+ current->mm->call_dl_resolve = call_dl_resolve;
10702+ up_write(&current->mm->mmap_sem);
10703+
10704+emulate:
10705+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10706+ regs->pc = call_dl_resolve;
10707+ regs->npc = addr+4;
10708+ return 3;
10709+ }
10710+#endif
10711+
10712+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10713+ if ((save & 0xFFC00000U) == 0x05000000U &&
10714+ (call & 0xFFFFE000U) == 0x85C0A000U &&
10715+ nop == 0x01000000U)
10716+ {
10717+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10718+ regs->u_regs[UREG_G2] = addr + 4;
10719+ addr = (save & 0x003FFFFFU) << 10;
10720+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10721+ regs->pc = addr;
10722+ regs->npc = addr+4;
10723+ return 3;
10724+ }
10725+ }
10726+ } while (0);
10727+
10728+ do { /* PaX: unpatched PLT emulation step 2 */
10729+ unsigned int save, call, nop;
10730+
10731+ err = get_user(save, (unsigned int *)(regs->pc-4));
10732+ err |= get_user(call, (unsigned int *)regs->pc);
10733+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
10734+ if (err)
10735+ break;
10736+
10737+ if (save == 0x9DE3BFA8U &&
10738+ (call & 0xC0000000U) == 0x40000000U &&
10739+ nop == 0x01000000U)
10740+ {
10741+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
10742+
10743+ regs->u_regs[UREG_RETPC] = regs->pc;
10744+ regs->pc = dl_resolve;
10745+ regs->npc = dl_resolve+4;
10746+ return 3;
10747+ }
10748+ } while (0);
10749+#endif
10750+
10751+ return 1;
10752+}
10753+
10754+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10755+{
10756+ unsigned long i;
10757+
10758+ printk(KERN_ERR "PAX: bytes at PC: ");
10759+ for (i = 0; i < 8; i++) {
10760+ unsigned int c;
10761+ if (get_user(c, (unsigned int *)pc+i))
10762+ printk(KERN_CONT "???????? ");
10763+ else
10764+ printk(KERN_CONT "%08x ", c);
10765+ }
10766+ printk("\n");
10767+}
10768+#endif
10769+
10770 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
10771 int text_fault)
10772 {
10773@@ -229,6 +503,24 @@ good_area:
10774 if (!(vma->vm_flags & VM_WRITE))
10775 goto bad_area;
10776 } else {
10777+
10778+#ifdef CONFIG_PAX_PAGEEXEC
10779+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
10780+ up_read(&mm->mmap_sem);
10781+ switch (pax_handle_fetch_fault(regs)) {
10782+
10783+#ifdef CONFIG_PAX_EMUPLT
10784+ case 2:
10785+ case 3:
10786+ return;
10787+#endif
10788+
10789+ }
10790+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
10791+ do_group_exit(SIGKILL);
10792+ }
10793+#endif
10794+
10795 /* Allow reads even for write-only mappings */
10796 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
10797 goto bad_area;
10798diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
10799index 69bb818..6ca35c8 100644
10800--- a/arch/sparc/mm/fault_64.c
10801+++ b/arch/sparc/mm/fault_64.c
10802@@ -22,6 +22,9 @@
10803 #include <linux/kdebug.h>
10804 #include <linux/percpu.h>
10805 #include <linux/context_tracking.h>
10806+#include <linux/slab.h>
10807+#include <linux/pagemap.h>
10808+#include <linux/compiler.h>
10809
10810 #include <asm/page.h>
10811 #include <asm/pgtable.h>
10812@@ -75,7 +78,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
10813 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
10814 regs->tpc);
10815 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
10816- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
10817+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
10818 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
10819 dump_stack();
10820 unhandled_fault(regs->tpc, current, regs);
10821@@ -271,6 +274,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
10822 show_regs(regs);
10823 }
10824
10825+#ifdef CONFIG_PAX_PAGEEXEC
10826+#ifdef CONFIG_PAX_DLRESOLVE
10827+static void pax_emuplt_close(struct vm_area_struct *vma)
10828+{
10829+ vma->vm_mm->call_dl_resolve = 0UL;
10830+}
10831+
10832+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10833+{
10834+ unsigned int *kaddr;
10835+
10836+ vmf->page = alloc_page(GFP_HIGHUSER);
10837+ if (!vmf->page)
10838+ return VM_FAULT_OOM;
10839+
10840+ kaddr = kmap(vmf->page);
10841+ memset(kaddr, 0, PAGE_SIZE);
10842+ kaddr[0] = 0x9DE3BFA8U; /* save */
10843+ flush_dcache_page(vmf->page);
10844+ kunmap(vmf->page);
10845+ return VM_FAULT_MAJOR;
10846+}
10847+
10848+static const struct vm_operations_struct pax_vm_ops = {
10849+ .close = pax_emuplt_close,
10850+ .fault = pax_emuplt_fault
10851+};
10852+
10853+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10854+{
10855+ int ret;
10856+
10857+ INIT_LIST_HEAD(&vma->anon_vma_chain);
10858+ vma->vm_mm = current->mm;
10859+ vma->vm_start = addr;
10860+ vma->vm_end = addr + PAGE_SIZE;
10861+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10862+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10863+ vma->vm_ops = &pax_vm_ops;
10864+
10865+ ret = insert_vm_struct(current->mm, vma);
10866+ if (ret)
10867+ return ret;
10868+
10869+ ++current->mm->total_vm;
10870+ return 0;
10871+}
10872+#endif
10873+
10874+/*
10875+ * PaX: decide what to do with offenders (regs->tpc = fault address)
10876+ *
10877+ * returns 1 when task should be killed
10878+ * 2 when patched PLT trampoline was detected
10879+ * 3 when unpatched PLT trampoline was detected
10880+ */
10881+static int pax_handle_fetch_fault(struct pt_regs *regs)
10882+{
10883+
10884+#ifdef CONFIG_PAX_EMUPLT
10885+ int err;
10886+
10887+ do { /* PaX: patched PLT emulation #1 */
10888+ unsigned int sethi1, sethi2, jmpl;
10889+
10890+ err = get_user(sethi1, (unsigned int *)regs->tpc);
10891+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
10892+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
10893+
10894+ if (err)
10895+ break;
10896+
10897+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10898+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
10899+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
10900+ {
10901+ unsigned long addr;
10902+
10903+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10904+ addr = regs->u_regs[UREG_G1];
10905+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10906+
10907+ if (test_thread_flag(TIF_32BIT))
10908+ addr &= 0xFFFFFFFFUL;
10909+
10910+ regs->tpc = addr;
10911+ regs->tnpc = addr+4;
10912+ return 2;
10913+ }
10914+ } while (0);
10915+
10916+ do { /* PaX: patched PLT emulation #2 */
10917+ unsigned int ba;
10918+
10919+ err = get_user(ba, (unsigned int *)regs->tpc);
10920+
10921+ if (err)
10922+ break;
10923+
10924+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10925+ unsigned long addr;
10926+
10927+ if ((ba & 0xFFC00000U) == 0x30800000U)
10928+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10929+ else
10930+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10931+
10932+ if (test_thread_flag(TIF_32BIT))
10933+ addr &= 0xFFFFFFFFUL;
10934+
10935+ regs->tpc = addr;
10936+ regs->tnpc = addr+4;
10937+ return 2;
10938+ }
10939+ } while (0);
10940+
10941+ do { /* PaX: patched PLT emulation #3 */
10942+ unsigned int sethi, bajmpl, nop;
10943+
10944+ err = get_user(sethi, (unsigned int *)regs->tpc);
10945+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
10946+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10947+
10948+ if (err)
10949+ break;
10950+
10951+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10952+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10953+ nop == 0x01000000U)
10954+ {
10955+ unsigned long addr;
10956+
10957+ addr = (sethi & 0x003FFFFFU) << 10;
10958+ regs->u_regs[UREG_G1] = addr;
10959+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10960+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10961+ else
10962+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10963+
10964+ if (test_thread_flag(TIF_32BIT))
10965+ addr &= 0xFFFFFFFFUL;
10966+
10967+ regs->tpc = addr;
10968+ regs->tnpc = addr+4;
10969+ return 2;
10970+ }
10971+ } while (0);
10972+
10973+ do { /* PaX: patched PLT emulation #4 */
10974+ unsigned int sethi, mov1, call, mov2;
10975+
10976+ err = get_user(sethi, (unsigned int *)regs->tpc);
10977+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
10978+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
10979+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
10980+
10981+ if (err)
10982+ break;
10983+
10984+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
10985+ mov1 == 0x8210000FU &&
10986+ (call & 0xC0000000U) == 0x40000000U &&
10987+ mov2 == 0x9E100001U)
10988+ {
10989+ unsigned long addr;
10990+
10991+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
10992+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10993+
10994+ if (test_thread_flag(TIF_32BIT))
10995+ addr &= 0xFFFFFFFFUL;
10996+
10997+ regs->tpc = addr;
10998+ regs->tnpc = addr+4;
10999+ return 2;
11000+ }
11001+ } while (0);
11002+
11003+ do { /* PaX: patched PLT emulation #5 */
11004+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11005+
11006+ err = get_user(sethi, (unsigned int *)regs->tpc);
11007+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11008+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11009+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11010+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11011+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11012+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11013+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11014+
11015+ if (err)
11016+ break;
11017+
11018+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11019+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11020+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11021+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11022+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11023+ sllx == 0x83287020U &&
11024+ jmpl == 0x81C04005U &&
11025+ nop == 0x01000000U)
11026+ {
11027+ unsigned long addr;
11028+
11029+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11030+ regs->u_regs[UREG_G1] <<= 32;
11031+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11032+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11033+ regs->tpc = addr;
11034+ regs->tnpc = addr+4;
11035+ return 2;
11036+ }
11037+ } while (0);
11038+
11039+ do { /* PaX: patched PLT emulation #6 */
11040+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11041+
11042+ err = get_user(sethi, (unsigned int *)regs->tpc);
11043+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11044+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11045+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11046+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11047+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11048+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11049+
11050+ if (err)
11051+ break;
11052+
11053+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11054+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11055+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11056+ sllx == 0x83287020U &&
11057+ (or & 0xFFFFE000U) == 0x8A116000U &&
11058+ jmpl == 0x81C04005U &&
11059+ nop == 0x01000000U)
11060+ {
11061+ unsigned long addr;
11062+
11063+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11064+ regs->u_regs[UREG_G1] <<= 32;
11065+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11066+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11067+ regs->tpc = addr;
11068+ regs->tnpc = addr+4;
11069+ return 2;
11070+ }
11071+ } while (0);
11072+
11073+ do { /* PaX: unpatched PLT emulation step 1 */
11074+ unsigned int sethi, ba, nop;
11075+
11076+ err = get_user(sethi, (unsigned int *)regs->tpc);
11077+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11078+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11079+
11080+ if (err)
11081+ break;
11082+
11083+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11084+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11085+ nop == 0x01000000U)
11086+ {
11087+ unsigned long addr;
11088+ unsigned int save, call;
11089+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11090+
11091+ if ((ba & 0xFFC00000U) == 0x30800000U)
11092+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11093+ else
11094+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11095+
11096+ if (test_thread_flag(TIF_32BIT))
11097+ addr &= 0xFFFFFFFFUL;
11098+
11099+ err = get_user(save, (unsigned int *)addr);
11100+ err |= get_user(call, (unsigned int *)(addr+4));
11101+ err |= get_user(nop, (unsigned int *)(addr+8));
11102+ if (err)
11103+ break;
11104+
11105+#ifdef CONFIG_PAX_DLRESOLVE
11106+ if (save == 0x9DE3BFA8U &&
11107+ (call & 0xC0000000U) == 0x40000000U &&
11108+ nop == 0x01000000U)
11109+ {
11110+ struct vm_area_struct *vma;
11111+ unsigned long call_dl_resolve;
11112+
11113+ down_read(&current->mm->mmap_sem);
11114+ call_dl_resolve = current->mm->call_dl_resolve;
11115+ up_read(&current->mm->mmap_sem);
11116+ if (likely(call_dl_resolve))
11117+ goto emulate;
11118+
11119+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11120+
11121+ down_write(&current->mm->mmap_sem);
11122+ if (current->mm->call_dl_resolve) {
11123+ call_dl_resolve = current->mm->call_dl_resolve;
11124+ up_write(&current->mm->mmap_sem);
11125+ if (vma)
11126+ kmem_cache_free(vm_area_cachep, vma);
11127+ goto emulate;
11128+ }
11129+
11130+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11131+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11132+ up_write(&current->mm->mmap_sem);
11133+ if (vma)
11134+ kmem_cache_free(vm_area_cachep, vma);
11135+ return 1;
11136+ }
11137+
11138+ if (pax_insert_vma(vma, call_dl_resolve)) {
11139+ up_write(&current->mm->mmap_sem);
11140+ kmem_cache_free(vm_area_cachep, vma);
11141+ return 1;
11142+ }
11143+
11144+ current->mm->call_dl_resolve = call_dl_resolve;
11145+ up_write(&current->mm->mmap_sem);
11146+
11147+emulate:
11148+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11149+ regs->tpc = call_dl_resolve;
11150+ regs->tnpc = addr+4;
11151+ return 3;
11152+ }
11153+#endif
11154+
11155+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11156+ if ((save & 0xFFC00000U) == 0x05000000U &&
11157+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11158+ nop == 0x01000000U)
11159+ {
11160+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11161+ regs->u_regs[UREG_G2] = addr + 4;
11162+ addr = (save & 0x003FFFFFU) << 10;
11163+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11164+
11165+ if (test_thread_flag(TIF_32BIT))
11166+ addr &= 0xFFFFFFFFUL;
11167+
11168+ regs->tpc = addr;
11169+ regs->tnpc = addr+4;
11170+ return 3;
11171+ }
11172+
11173+ /* PaX: 64-bit PLT stub */
11174+ err = get_user(sethi1, (unsigned int *)addr);
11175+ err |= get_user(sethi2, (unsigned int *)(addr+4));
11176+ err |= get_user(or1, (unsigned int *)(addr+8));
11177+ err |= get_user(or2, (unsigned int *)(addr+12));
11178+ err |= get_user(sllx, (unsigned int *)(addr+16));
11179+ err |= get_user(add, (unsigned int *)(addr+20));
11180+ err |= get_user(jmpl, (unsigned int *)(addr+24));
11181+ err |= get_user(nop, (unsigned int *)(addr+28));
11182+ if (err)
11183+ break;
11184+
11185+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
11186+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11187+ (or1 & 0xFFFFE000U) == 0x88112000U &&
11188+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11189+ sllx == 0x89293020U &&
11190+ add == 0x8A010005U &&
11191+ jmpl == 0x89C14000U &&
11192+ nop == 0x01000000U)
11193+ {
11194+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11195+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11196+ regs->u_regs[UREG_G4] <<= 32;
11197+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11198+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
11199+ regs->u_regs[UREG_G4] = addr + 24;
11200+ addr = regs->u_regs[UREG_G5];
11201+ regs->tpc = addr;
11202+ regs->tnpc = addr+4;
11203+ return 3;
11204+ }
11205+ }
11206+ } while (0);
11207+
11208+#ifdef CONFIG_PAX_DLRESOLVE
11209+ do { /* PaX: unpatched PLT emulation step 2 */
11210+ unsigned int save, call, nop;
11211+
11212+ err = get_user(save, (unsigned int *)(regs->tpc-4));
11213+ err |= get_user(call, (unsigned int *)regs->tpc);
11214+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
11215+ if (err)
11216+ break;
11217+
11218+ if (save == 0x9DE3BFA8U &&
11219+ (call & 0xC0000000U) == 0x40000000U &&
11220+ nop == 0x01000000U)
11221+ {
11222+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11223+
11224+ if (test_thread_flag(TIF_32BIT))
11225+ dl_resolve &= 0xFFFFFFFFUL;
11226+
11227+ regs->u_regs[UREG_RETPC] = regs->tpc;
11228+ regs->tpc = dl_resolve;
11229+ regs->tnpc = dl_resolve+4;
11230+ return 3;
11231+ }
11232+ } while (0);
11233+#endif
11234+
11235+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
11236+ unsigned int sethi, ba, nop;
11237+
11238+ err = get_user(sethi, (unsigned int *)regs->tpc);
11239+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11240+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11241+
11242+ if (err)
11243+ break;
11244+
11245+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11246+ (ba & 0xFFF00000U) == 0x30600000U &&
11247+ nop == 0x01000000U)
11248+ {
11249+ unsigned long addr;
11250+
11251+ addr = (sethi & 0x003FFFFFU) << 10;
11252+ regs->u_regs[UREG_G1] = addr;
11253+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11254+
11255+ if (test_thread_flag(TIF_32BIT))
11256+ addr &= 0xFFFFFFFFUL;
11257+
11258+ regs->tpc = addr;
11259+ regs->tnpc = addr+4;
11260+ return 2;
11261+ }
11262+ } while (0);
11263+
11264+#endif
11265+
11266+ return 1;
11267+}
11268+
11269+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11270+{
11271+ unsigned long i;
11272+
11273+ printk(KERN_ERR "PAX: bytes at PC: ");
11274+ for (i = 0; i < 8; i++) {
11275+ unsigned int c;
11276+ if (get_user(c, (unsigned int *)pc+i))
11277+ printk(KERN_CONT "???????? ");
11278+ else
11279+ printk(KERN_CONT "%08x ", c);
11280+ }
11281+ printk("\n");
11282+}
11283+#endif
11284+
11285 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
11286 {
11287 enum ctx_state prev_state = exception_enter();
11288@@ -344,6 +807,29 @@ retry:
11289 if (!vma)
11290 goto bad_area;
11291
11292+#ifdef CONFIG_PAX_PAGEEXEC
11293+ /* PaX: detect ITLB misses on non-exec pages */
11294+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
11295+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
11296+ {
11297+ if (address != regs->tpc)
11298+ goto good_area;
11299+
11300+ up_read(&mm->mmap_sem);
11301+ switch (pax_handle_fetch_fault(regs)) {
11302+
11303+#ifdef CONFIG_PAX_EMUPLT
11304+ case 2:
11305+ case 3:
11306+ return;
11307+#endif
11308+
11309+ }
11310+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
11311+ do_group_exit(SIGKILL);
11312+ }
11313+#endif
11314+
11315 /* Pure DTLB misses do not tell us whether the fault causing
11316 * load/store/atomic was a write or not, it only says that there
11317 * was no match. So in such a case we (carefully) read the
11318diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
11319index 3096317..a7b7654 100644
11320--- a/arch/sparc/mm/hugetlbpage.c
11321+++ b/arch/sparc/mm/hugetlbpage.c
11322@@ -26,7 +26,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11323 unsigned long addr,
11324 unsigned long len,
11325 unsigned long pgoff,
11326- unsigned long flags)
11327+ unsigned long flags,
11328+ unsigned long offset)
11329 {
11330 unsigned long task_size = TASK_SIZE;
11331 struct vm_unmapped_area_info info;
11332@@ -36,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11333
11334 info.flags = 0;
11335 info.length = len;
11336- info.low_limit = TASK_UNMAPPED_BASE;
11337+ info.low_limit = mm->mmap_base;
11338 info.high_limit = min(task_size, VA_EXCLUDE_START);
11339 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11340 info.align_offset = 0;
11341+ info.threadstack_offset = offset;
11342 addr = vm_unmapped_area(&info);
11343
11344 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11345 VM_BUG_ON(addr != -ENOMEM);
11346 info.low_limit = VA_EXCLUDE_END;
11347+
11348+#ifdef CONFIG_PAX_RANDMMAP
11349+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11350+ info.low_limit += mm->delta_mmap;
11351+#endif
11352+
11353 info.high_limit = task_size;
11354 addr = vm_unmapped_area(&info);
11355 }
11356@@ -56,7 +64,8 @@ static unsigned long
11357 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11358 const unsigned long len,
11359 const unsigned long pgoff,
11360- const unsigned long flags)
11361+ const unsigned long flags,
11362+ const unsigned long offset)
11363 {
11364 struct mm_struct *mm = current->mm;
11365 unsigned long addr = addr0;
11366@@ -71,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11367 info.high_limit = mm->mmap_base;
11368 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11369 info.align_offset = 0;
11370+ info.threadstack_offset = offset;
11371 addr = vm_unmapped_area(&info);
11372
11373 /*
11374@@ -83,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11375 VM_BUG_ON(addr != -ENOMEM);
11376 info.flags = 0;
11377 info.low_limit = TASK_UNMAPPED_BASE;
11378+
11379+#ifdef CONFIG_PAX_RANDMMAP
11380+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11381+ info.low_limit += mm->delta_mmap;
11382+#endif
11383+
11384 info.high_limit = STACK_TOP32;
11385 addr = vm_unmapped_area(&info);
11386 }
11387@@ -97,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11388 struct mm_struct *mm = current->mm;
11389 struct vm_area_struct *vma;
11390 unsigned long task_size = TASK_SIZE;
11391+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
11392
11393 if (test_thread_flag(TIF_32BIT))
11394 task_size = STACK_TOP32;
11395@@ -112,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11396 return addr;
11397 }
11398
11399+#ifdef CONFIG_PAX_RANDMMAP
11400+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11401+#endif
11402+
11403 if (addr) {
11404 addr = ALIGN(addr, HPAGE_SIZE);
11405 vma = find_vma(mm, addr);
11406- if (task_size - len >= addr &&
11407- (!vma || addr + len <= vma->vm_start))
11408+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11409 return addr;
11410 }
11411 if (mm->get_unmapped_area == arch_get_unmapped_area)
11412 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
11413- pgoff, flags);
11414+ pgoff, flags, offset);
11415 else
11416 return hugetlb_get_unmapped_area_topdown(file, addr, len,
11417- pgoff, flags);
11418+ pgoff, flags, offset);
11419 }
11420
11421 pte_t *huge_pte_alloc(struct mm_struct *mm,
11422diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
11423index 5322e53..f820c5e 100644
11424--- a/arch/sparc/mm/init_64.c
11425+++ b/arch/sparc/mm/init_64.c
11426@@ -188,9 +188,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
11427 int num_kernel_image_mappings;
11428
11429 #ifdef CONFIG_DEBUG_DCFLUSH
11430-atomic_t dcpage_flushes = ATOMIC_INIT(0);
11431+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
11432 #ifdef CONFIG_SMP
11433-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
11434+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
11435 #endif
11436 #endif
11437
11438@@ -198,7 +198,7 @@ inline void flush_dcache_page_impl(struct page *page)
11439 {
11440 BUG_ON(tlb_type == hypervisor);
11441 #ifdef CONFIG_DEBUG_DCFLUSH
11442- atomic_inc(&dcpage_flushes);
11443+ atomic_inc_unchecked(&dcpage_flushes);
11444 #endif
11445
11446 #ifdef DCACHE_ALIASING_POSSIBLE
11447@@ -466,10 +466,10 @@ void mmu_info(struct seq_file *m)
11448
11449 #ifdef CONFIG_DEBUG_DCFLUSH
11450 seq_printf(m, "DCPageFlushes\t: %d\n",
11451- atomic_read(&dcpage_flushes));
11452+ atomic_read_unchecked(&dcpage_flushes));
11453 #ifdef CONFIG_SMP
11454 seq_printf(m, "DCPageFlushesXC\t: %d\n",
11455- atomic_read(&dcpage_flushes_xcall));
11456+ atomic_read_unchecked(&dcpage_flushes_xcall));
11457 #endif /* CONFIG_SMP */
11458 #endif /* CONFIG_DEBUG_DCFLUSH */
11459 }
11460diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
11461index b3692ce..e4517c9 100644
11462--- a/arch/tile/Kconfig
11463+++ b/arch/tile/Kconfig
11464@@ -184,6 +184,7 @@ source "kernel/Kconfig.hz"
11465
11466 config KEXEC
11467 bool "kexec system call"
11468+ depends on !GRKERNSEC_KMEM
11469 ---help---
11470 kexec is a system call that implements the ability to shutdown your
11471 current kernel, and to start another kernel. It is like a reboot
11472diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
11473index ad220ee..2f537b3 100644
11474--- a/arch/tile/include/asm/atomic_64.h
11475+++ b/arch/tile/include/asm/atomic_64.h
11476@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
11477
11478 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
11479
11480+#define atomic64_read_unchecked(v) atomic64_read(v)
11481+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
11482+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
11483+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
11484+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
11485+#define atomic64_inc_unchecked(v) atomic64_inc(v)
11486+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
11487+#define atomic64_dec_unchecked(v) atomic64_dec(v)
11488+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
11489+
11490 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
11491 #define smp_mb__before_atomic_dec() smp_mb()
11492 #define smp_mb__after_atomic_dec() smp_mb()
11493diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
11494index 6160761..00cac88 100644
11495--- a/arch/tile/include/asm/cache.h
11496+++ b/arch/tile/include/asm/cache.h
11497@@ -15,11 +15,12 @@
11498 #ifndef _ASM_TILE_CACHE_H
11499 #define _ASM_TILE_CACHE_H
11500
11501+#include <linux/const.h>
11502 #include <arch/chip.h>
11503
11504 /* bytes per L1 data cache line */
11505 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
11506-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11507+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11508
11509 /* bytes per L2 cache line */
11510 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
11511diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
11512index b6cde32..c0cb736 100644
11513--- a/arch/tile/include/asm/uaccess.h
11514+++ b/arch/tile/include/asm/uaccess.h
11515@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
11516 const void __user *from,
11517 unsigned long n)
11518 {
11519- int sz = __compiletime_object_size(to);
11520+ size_t sz = __compiletime_object_size(to);
11521
11522- if (likely(sz == -1 || sz >= n))
11523+ if (likely(sz == (size_t)-1 || sz >= n))
11524 n = _copy_from_user(to, from, n);
11525 else
11526 copy_from_user_overflow();
11527diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
11528index 0cb3bba..7338b2d 100644
11529--- a/arch/tile/mm/hugetlbpage.c
11530+++ b/arch/tile/mm/hugetlbpage.c
11531@@ -212,6 +212,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
11532 info.high_limit = TASK_SIZE;
11533 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11534 info.align_offset = 0;
11535+ info.threadstack_offset = 0;
11536 return vm_unmapped_area(&info);
11537 }
11538
11539@@ -229,6 +230,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
11540 info.high_limit = current->mm->mmap_base;
11541 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11542 info.align_offset = 0;
11543+ info.threadstack_offset = 0;
11544 addr = vm_unmapped_area(&info);
11545
11546 /*
11547diff --git a/arch/um/Makefile b/arch/um/Makefile
11548index 36e658a..71a5c5a 100644
11549--- a/arch/um/Makefile
11550+++ b/arch/um/Makefile
11551@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
11552 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
11553 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
11554
11555+ifdef CONSTIFY_PLUGIN
11556+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11557+endif
11558+
11559 #This will adjust *FLAGS accordingly to the platform.
11560 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
11561
11562diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
11563index 19e1bdd..3665b77 100644
11564--- a/arch/um/include/asm/cache.h
11565+++ b/arch/um/include/asm/cache.h
11566@@ -1,6 +1,7 @@
11567 #ifndef __UM_CACHE_H
11568 #define __UM_CACHE_H
11569
11570+#include <linux/const.h>
11571
11572 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
11573 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11574@@ -12,6 +13,6 @@
11575 # define L1_CACHE_SHIFT 5
11576 #endif
11577
11578-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11579+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11580
11581 #endif
11582diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
11583index 2e0a6b1..a64d0f5 100644
11584--- a/arch/um/include/asm/kmap_types.h
11585+++ b/arch/um/include/asm/kmap_types.h
11586@@ -8,6 +8,6 @@
11587
11588 /* No more #include "asm/arch/kmap_types.h" ! */
11589
11590-#define KM_TYPE_NR 14
11591+#define KM_TYPE_NR 15
11592
11593 #endif
11594diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
11595index 5ff53d9..5850cdf 100644
11596--- a/arch/um/include/asm/page.h
11597+++ b/arch/um/include/asm/page.h
11598@@ -14,6 +14,9 @@
11599 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
11600 #define PAGE_MASK (~(PAGE_SIZE-1))
11601
11602+#define ktla_ktva(addr) (addr)
11603+#define ktva_ktla(addr) (addr)
11604+
11605 #ifndef __ASSEMBLY__
11606
11607 struct page;
11608diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
11609index 0032f92..cd151e0 100644
11610--- a/arch/um/include/asm/pgtable-3level.h
11611+++ b/arch/um/include/asm/pgtable-3level.h
11612@@ -58,6 +58,7 @@
11613 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
11614 #define pud_populate(mm, pud, pmd) \
11615 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
11616+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
11617
11618 #ifdef CONFIG_64BIT
11619 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
11620diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
11621index eecc414..48adb87 100644
11622--- a/arch/um/kernel/process.c
11623+++ b/arch/um/kernel/process.c
11624@@ -356,22 +356,6 @@ int singlestepping(void * t)
11625 return 2;
11626 }
11627
11628-/*
11629- * Only x86 and x86_64 have an arch_align_stack().
11630- * All other arches have "#define arch_align_stack(x) (x)"
11631- * in their asm/system.h
11632- * As this is included in UML from asm-um/system-generic.h,
11633- * we can use it to behave as the subarch does.
11634- */
11635-#ifndef arch_align_stack
11636-unsigned long arch_align_stack(unsigned long sp)
11637-{
11638- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
11639- sp -= get_random_int() % 8192;
11640- return sp & ~0xf;
11641-}
11642-#endif
11643-
11644 unsigned long get_wchan(struct task_struct *p)
11645 {
11646 unsigned long stack_page, sp, ip;
11647diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
11648index ad8f795..2c7eec6 100644
11649--- a/arch/unicore32/include/asm/cache.h
11650+++ b/arch/unicore32/include/asm/cache.h
11651@@ -12,8 +12,10 @@
11652 #ifndef __UNICORE_CACHE_H__
11653 #define __UNICORE_CACHE_H__
11654
11655-#define L1_CACHE_SHIFT (5)
11656-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11657+#include <linux/const.h>
11658+
11659+#define L1_CACHE_SHIFT 5
11660+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11661
11662 /*
11663 * Memory returned by kmalloc() may be used for DMA, so we must make
11664diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
11665index 0952ecd..d31bcc7 100644
11666--- a/arch/x86/Kconfig
11667+++ b/arch/x86/Kconfig
11668@@ -249,7 +249,7 @@ config X86_HT
11669
11670 config X86_32_LAZY_GS
11671 def_bool y
11672- depends on X86_32 && !CC_STACKPROTECTOR
11673+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11674
11675 config ARCH_HWEIGHT_CFLAGS
11676 string
11677@@ -1127,7 +1127,7 @@ choice
11678
11679 config NOHIGHMEM
11680 bool "off"
11681- depends on !X86_NUMAQ
11682+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11683 ---help---
11684 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11685 However, the address space of 32-bit x86 processors is only 4
11686@@ -1164,7 +1164,7 @@ config NOHIGHMEM
11687
11688 config HIGHMEM4G
11689 bool "4GB"
11690- depends on !X86_NUMAQ
11691+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11692 ---help---
11693 Select this if you have a 32-bit processor and between 1 and 4
11694 gigabytes of physical RAM.
11695@@ -1217,7 +1217,7 @@ config PAGE_OFFSET
11696 hex
11697 default 0xB0000000 if VMSPLIT_3G_OPT
11698 default 0x80000000 if VMSPLIT_2G
11699- default 0x78000000 if VMSPLIT_2G_OPT
11700+ default 0x70000000 if VMSPLIT_2G_OPT
11701 default 0x40000000 if VMSPLIT_1G
11702 default 0xC0000000
11703 depends on X86_32
11704@@ -1619,6 +1619,7 @@ config SECCOMP
11705
11706 config CC_STACKPROTECTOR
11707 bool "Enable -fstack-protector buffer overflow detection"
11708+ depends on X86_64 || !PAX_MEMORY_UDEREF
11709 ---help---
11710 This option turns on the -fstack-protector GCC feature. This
11711 feature puts, at the beginning of functions, a canary value on
11712@@ -1637,6 +1638,7 @@ source kernel/Kconfig.hz
11713
11714 config KEXEC
11715 bool "kexec system call"
11716+ depends on !GRKERNSEC_KMEM
11717 ---help---
11718 kexec is a system call that implements the ability to shutdown your
11719 current kernel, and to start another kernel. It is like a reboot
11720@@ -1738,6 +1740,8 @@ config X86_NEED_RELOCS
11721 config PHYSICAL_ALIGN
11722 hex "Alignment value to which kernel should be aligned"
11723 default "0x1000000"
11724+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
11725+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
11726 range 0x2000 0x1000000 if X86_32
11727 range 0x200000 0x1000000 if X86_64
11728 ---help---
11729@@ -1817,9 +1821,10 @@ config DEBUG_HOTPLUG_CPU0
11730 If unsure, say N.
11731
11732 config COMPAT_VDSO
11733- def_bool y
11734+ def_bool n
11735 prompt "Compat VDSO support"
11736 depends on X86_32 || IA32_EMULATION
11737+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
11738 ---help---
11739 Map the 32-bit VDSO to the predictable old-style address too.
11740
11741diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
11742index c026cca..14657ae 100644
11743--- a/arch/x86/Kconfig.cpu
11744+++ b/arch/x86/Kconfig.cpu
11745@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
11746
11747 config X86_F00F_BUG
11748 def_bool y
11749- depends on M586MMX || M586TSC || M586 || M486
11750+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
11751
11752 config X86_INVD_BUG
11753 def_bool y
11754@@ -327,7 +327,7 @@ config X86_INVD_BUG
11755
11756 config X86_ALIGNMENT_16
11757 def_bool y
11758- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11759+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11760
11761 config X86_INTEL_USERCOPY
11762 def_bool y
11763@@ -373,7 +373,7 @@ config X86_CMPXCHG64
11764 # generates cmov.
11765 config X86_CMOV
11766 def_bool y
11767- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11768+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11769
11770 config X86_MINIMUM_CPU_FAMILY
11771 int
11772diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
11773index 0f3621e..282f24b 100644
11774--- a/arch/x86/Kconfig.debug
11775+++ b/arch/x86/Kconfig.debug
11776@@ -84,7 +84,7 @@ config X86_PTDUMP
11777 config DEBUG_RODATA
11778 bool "Write protect kernel read-only data structures"
11779 default y
11780- depends on DEBUG_KERNEL
11781+ depends on DEBUG_KERNEL && BROKEN
11782 ---help---
11783 Mark the kernel read-only data as write-protected in the pagetables,
11784 in order to catch accidental (and incorrect) writes to such const
11785@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
11786
11787 config DEBUG_SET_MODULE_RONX
11788 bool "Set loadable kernel module data as NX and text as RO"
11789- depends on MODULES
11790+ depends on MODULES && BROKEN
11791 ---help---
11792 This option helps catch unintended modifications to loadable
11793 kernel module's text and read-only data. It also prevents execution
11794diff --git a/arch/x86/Makefile b/arch/x86/Makefile
11795index 57d0215..b4373fb 100644
11796--- a/arch/x86/Makefile
11797+++ b/arch/x86/Makefile
11798@@ -49,14 +49,12 @@ ifeq ($(CONFIG_X86_32),y)
11799 # CPU-specific tuning. Anything which can be shared with UML should go here.
11800 include $(srctree)/arch/x86/Makefile_32.cpu
11801 KBUILD_CFLAGS += $(cflags-y)
11802-
11803- # temporary until string.h is fixed
11804- KBUILD_CFLAGS += -ffreestanding
11805 else
11806 BITS := 64
11807 UTS_MACHINE := x86_64
11808 CHECKFLAGS += -D__x86_64__ -m64
11809
11810+ biarch := $(call cc-option,-m64)
11811 KBUILD_AFLAGS += -m64
11812 KBUILD_CFLAGS += -m64
11813
11814@@ -89,6 +87,9 @@ else
11815 KBUILD_CFLAGS += -maccumulate-outgoing-args
11816 endif
11817
11818+# temporary until string.h is fixed
11819+KBUILD_CFLAGS += -ffreestanding
11820+
11821 ifdef CONFIG_CC_STACKPROTECTOR
11822 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
11823 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
11824@@ -247,3 +248,12 @@ define archhelp
11825 echo ' FDINITRD=file initrd for the booted kernel'
11826 echo ' kvmconfig - Enable additional options for guest kernel support'
11827 endef
11828+
11829+define OLD_LD
11830+
11831+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
11832+*** Please upgrade your binutils to 2.18 or newer
11833+endef
11834+
11835+archprepare:
11836+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
11837diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
11838index d9c1195..a26ca0d 100644
11839--- a/arch/x86/boot/Makefile
11840+++ b/arch/x86/boot/Makefile
11841@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
11842 $(call cc-option, -fno-unit-at-a-time)) \
11843 $(call cc-option, -fno-stack-protector) \
11844 $(call cc-option, -mpreferred-stack-boundary=2)
11845+ifdef CONSTIFY_PLUGIN
11846+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11847+endif
11848 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11849 GCOV_PROFILE := n
11850
11851diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
11852index 878e4b9..20537ab 100644
11853--- a/arch/x86/boot/bitops.h
11854+++ b/arch/x86/boot/bitops.h
11855@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11856 u8 v;
11857 const u32 *p = (const u32 *)addr;
11858
11859- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11860+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11861 return v;
11862 }
11863
11864@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11865
11866 static inline void set_bit(int nr, void *addr)
11867 {
11868- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11869+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11870 }
11871
11872 #endif /* BOOT_BITOPS_H */
11873diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
11874index ef72bae..353a184 100644
11875--- a/arch/x86/boot/boot.h
11876+++ b/arch/x86/boot/boot.h
11877@@ -85,7 +85,7 @@ static inline void io_delay(void)
11878 static inline u16 ds(void)
11879 {
11880 u16 seg;
11881- asm("movw %%ds,%0" : "=rm" (seg));
11882+ asm volatile("movw %%ds,%0" : "=rm" (seg));
11883 return seg;
11884 }
11885
11886@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
11887 static inline int memcmp(const void *s1, const void *s2, size_t len)
11888 {
11889 u8 diff;
11890- asm("repe; cmpsb; setnz %0"
11891+ asm volatile("repe; cmpsb; setnz %0"
11892 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
11893 return diff;
11894 }
11895diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
11896index c8a6792..2402765 100644
11897--- a/arch/x86/boot/compressed/Makefile
11898+++ b/arch/x86/boot/compressed/Makefile
11899@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
11900 KBUILD_CFLAGS += -mno-mmx -mno-sse
11901 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
11902 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
11903+ifdef CONSTIFY_PLUGIN
11904+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11905+endif
11906
11907 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11908 GCOV_PROFILE := n
11909diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
11910index a53440e..c3dbf1e 100644
11911--- a/arch/x86/boot/compressed/efi_stub_32.S
11912+++ b/arch/x86/boot/compressed/efi_stub_32.S
11913@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
11914 * parameter 2, ..., param n. To make things easy, we save the return
11915 * address of efi_call_phys in a global variable.
11916 */
11917- popl %ecx
11918- movl %ecx, saved_return_addr(%edx)
11919- /* get the function pointer into ECX*/
11920- popl %ecx
11921- movl %ecx, efi_rt_function_ptr(%edx)
11922+ popl saved_return_addr(%edx)
11923+ popl efi_rt_function_ptr(%edx)
11924
11925 /*
11926 * 3. Call the physical function.
11927 */
11928- call *%ecx
11929+ call *efi_rt_function_ptr(%edx)
11930
11931 /*
11932 * 4. Balance the stack. And because EAX contain the return value,
11933@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
11934 1: popl %edx
11935 subl $1b, %edx
11936
11937- movl efi_rt_function_ptr(%edx), %ecx
11938- pushl %ecx
11939+ pushl efi_rt_function_ptr(%edx)
11940
11941 /*
11942 * 10. Push the saved return address onto the stack and return.
11943 */
11944- movl saved_return_addr(%edx), %ecx
11945- pushl %ecx
11946- ret
11947+ jmpl *saved_return_addr(%edx)
11948 ENDPROC(efi_call_phys)
11949 .previous
11950
11951diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
11952index 5d6f689..9d06730 100644
11953--- a/arch/x86/boot/compressed/head_32.S
11954+++ b/arch/x86/boot/compressed/head_32.S
11955@@ -118,7 +118,7 @@ preferred_addr:
11956 notl %eax
11957 andl %eax, %ebx
11958 #else
11959- movl $LOAD_PHYSICAL_ADDR, %ebx
11960+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11961 #endif
11962
11963 /* Target address to relocate to for decompression */
11964diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
11965index c337422..2c5be72 100644
11966--- a/arch/x86/boot/compressed/head_64.S
11967+++ b/arch/x86/boot/compressed/head_64.S
11968@@ -95,7 +95,7 @@ ENTRY(startup_32)
11969 notl %eax
11970 andl %eax, %ebx
11971 #else
11972- movl $LOAD_PHYSICAL_ADDR, %ebx
11973+ movl $____LOAD_PHYSICAL_ADDR, %ebx
11974 #endif
11975
11976 /* Target address to relocate to for decompression */
11977@@ -270,7 +270,7 @@ preferred_addr:
11978 notq %rax
11979 andq %rax, %rbp
11980 #else
11981- movq $LOAD_PHYSICAL_ADDR, %rbp
11982+ movq $____LOAD_PHYSICAL_ADDR, %rbp
11983 #endif
11984
11985 /* Target address to relocate to for decompression */
11986@@ -362,8 +362,8 @@ gdt:
11987 .long gdt
11988 .word 0
11989 .quad 0x0000000000000000 /* NULL descriptor */
11990- .quad 0x00af9a000000ffff /* __KERNEL_CS */
11991- .quad 0x00cf92000000ffff /* __KERNEL_DS */
11992+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
11993+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
11994 .quad 0x0080890000000000 /* TS descriptor */
11995 .quad 0x0000000000000000 /* TS continued */
11996 gdt_end:
11997diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
11998index 434f077..b6b4b38 100644
11999--- a/arch/x86/boot/compressed/misc.c
12000+++ b/arch/x86/boot/compressed/misc.c
12001@@ -283,7 +283,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12002 * Calculate the delta between where vmlinux was linked to load
12003 * and where it was actually loaded.
12004 */
12005- delta = min_addr - LOAD_PHYSICAL_ADDR;
12006+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12007 if (!delta) {
12008 debug_putstr("No relocation needed... ");
12009 return;
12010@@ -380,7 +380,7 @@ static void parse_elf(void *output)
12011 case PT_LOAD:
12012 #ifdef CONFIG_RELOCATABLE
12013 dest = output;
12014- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12015+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12016 #else
12017 dest = (void *)(phdr->p_paddr);
12018 #endif
12019@@ -432,7 +432,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
12020 error("Destination address too large");
12021 #endif
12022 #ifndef CONFIG_RELOCATABLE
12023- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12024+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12025 error("Wrong destination address");
12026 #endif
12027
12028diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12029index 4d3ff03..e4972ff 100644
12030--- a/arch/x86/boot/cpucheck.c
12031+++ b/arch/x86/boot/cpucheck.c
12032@@ -74,7 +74,7 @@ static int has_fpu(void)
12033 u16 fcw = -1, fsw = -1;
12034 u32 cr0;
12035
12036- asm("movl %%cr0,%0" : "=r" (cr0));
12037+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
12038 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
12039 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
12040 asm volatile("movl %0,%%cr0" : : "r" (cr0));
12041@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
12042 {
12043 u32 f0, f1;
12044
12045- asm("pushfl ; "
12046+ asm volatile("pushfl ; "
12047 "pushfl ; "
12048 "popl %0 ; "
12049 "movl %0,%1 ; "
12050@@ -115,7 +115,7 @@ static void get_flags(void)
12051 set_bit(X86_FEATURE_FPU, cpu.flags);
12052
12053 if (has_eflag(X86_EFLAGS_ID)) {
12054- asm("cpuid"
12055+ asm volatile("cpuid"
12056 : "=a" (max_intel_level),
12057 "=b" (cpu_vendor[0]),
12058 "=d" (cpu_vendor[1]),
12059@@ -124,7 +124,7 @@ static void get_flags(void)
12060
12061 if (max_intel_level >= 0x00000001 &&
12062 max_intel_level <= 0x0000ffff) {
12063- asm("cpuid"
12064+ asm volatile("cpuid"
12065 : "=a" (tfms),
12066 "=c" (cpu.flags[4]),
12067 "=d" (cpu.flags[0])
12068@@ -136,7 +136,7 @@ static void get_flags(void)
12069 cpu.model += ((tfms >> 16) & 0xf) << 4;
12070 }
12071
12072- asm("cpuid"
12073+ asm volatile("cpuid"
12074 : "=a" (max_amd_level)
12075 : "a" (0x80000000)
12076 : "ebx", "ecx", "edx");
12077@@ -144,7 +144,7 @@ static void get_flags(void)
12078 if (max_amd_level >= 0x80000001 &&
12079 max_amd_level <= 0x8000ffff) {
12080 u32 eax = 0x80000001;
12081- asm("cpuid"
12082+ asm volatile("cpuid"
12083 : "+a" (eax),
12084 "=c" (cpu.flags[6]),
12085 "=d" (cpu.flags[1])
12086@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12087 u32 ecx = MSR_K7_HWCR;
12088 u32 eax, edx;
12089
12090- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12091+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12092 eax &= ~(1 << 15);
12093- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12094+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12095
12096 get_flags(); /* Make sure it really did something */
12097 err = check_flags();
12098@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12099 u32 ecx = MSR_VIA_FCR;
12100 u32 eax, edx;
12101
12102- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12103+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12104 eax |= (1<<1)|(1<<7);
12105- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12106+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12107
12108 set_bit(X86_FEATURE_CX8, cpu.flags);
12109 err = check_flags();
12110@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12111 u32 eax, edx;
12112 u32 level = 1;
12113
12114- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12115- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12116- asm("cpuid"
12117+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12118+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12119+ asm volatile("cpuid"
12120 : "+a" (level), "=d" (cpu.flags[0])
12121 : : "ecx", "ebx");
12122- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12123+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12124
12125 err = check_flags();
12126 }
12127diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12128index 9ec06a1..2c25e79 100644
12129--- a/arch/x86/boot/header.S
12130+++ b/arch/x86/boot/header.S
12131@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12132 # single linked list of
12133 # struct setup_data
12134
12135-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12136+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12137
12138 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12139+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12140+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12141+#else
12142 #define VO_INIT_SIZE (VO__end - VO__text)
12143+#endif
12144 #if ZO_INIT_SIZE > VO_INIT_SIZE
12145 #define INIT_SIZE ZO_INIT_SIZE
12146 #else
12147diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12148index db75d07..8e6d0af 100644
12149--- a/arch/x86/boot/memory.c
12150+++ b/arch/x86/boot/memory.c
12151@@ -19,7 +19,7 @@
12152
12153 static int detect_memory_e820(void)
12154 {
12155- int count = 0;
12156+ unsigned int count = 0;
12157 struct biosregs ireg, oreg;
12158 struct e820entry *desc = boot_params.e820_map;
12159 static struct e820entry buf; /* static so it is zeroed */
12160diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12161index 11e8c6e..fdbb1ed 100644
12162--- a/arch/x86/boot/video-vesa.c
12163+++ b/arch/x86/boot/video-vesa.c
12164@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
12165
12166 boot_params.screen_info.vesapm_seg = oreg.es;
12167 boot_params.screen_info.vesapm_off = oreg.di;
12168+ boot_params.screen_info.vesapm_size = oreg.cx;
12169 }
12170
12171 /*
12172diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
12173index 43eda28..5ab5fdb 100644
12174--- a/arch/x86/boot/video.c
12175+++ b/arch/x86/boot/video.c
12176@@ -96,7 +96,7 @@ static void store_mode_params(void)
12177 static unsigned int get_entry(void)
12178 {
12179 char entry_buf[4];
12180- int i, len = 0;
12181+ unsigned int i, len = 0;
12182 int key;
12183 unsigned int v;
12184
12185diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
12186index 9105655..41779c1 100644
12187--- a/arch/x86/crypto/aes-x86_64-asm_64.S
12188+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
12189@@ -8,6 +8,8 @@
12190 * including this sentence is retained in full.
12191 */
12192
12193+#include <asm/alternative-asm.h>
12194+
12195 .extern crypto_ft_tab
12196 .extern crypto_it_tab
12197 .extern crypto_fl_tab
12198@@ -70,6 +72,8 @@
12199 je B192; \
12200 leaq 32(r9),r9;
12201
12202+#define ret pax_force_retaddr; ret
12203+
12204 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
12205 movq r1,r2; \
12206 movq r3,r4; \
12207diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
12208index 477e9d7..c92c7d8 100644
12209--- a/arch/x86/crypto/aesni-intel_asm.S
12210+++ b/arch/x86/crypto/aesni-intel_asm.S
12211@@ -31,6 +31,7 @@
12212
12213 #include <linux/linkage.h>
12214 #include <asm/inst.h>
12215+#include <asm/alternative-asm.h>
12216
12217 #ifdef __x86_64__
12218 .data
12219@@ -205,7 +206,7 @@ enc: .octa 0x2
12220 * num_initial_blocks = b mod 4
12221 * encrypt the initial num_initial_blocks blocks and apply ghash on
12222 * the ciphertext
12223-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12224+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12225 * are clobbered
12226 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12227 */
12228@@ -214,8 +215,8 @@ enc: .octa 0x2
12229 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
12230 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
12231 mov arg7, %r10 # %r10 = AAD
12232- mov arg8, %r12 # %r12 = aadLen
12233- mov %r12, %r11
12234+ mov arg8, %r15 # %r15 = aadLen
12235+ mov %r15, %r11
12236 pxor %xmm\i, %xmm\i
12237 _get_AAD_loop\num_initial_blocks\operation:
12238 movd (%r10), \TMP1
12239@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
12240 psrldq $4, %xmm\i
12241 pxor \TMP1, %xmm\i
12242 add $4, %r10
12243- sub $4, %r12
12244+ sub $4, %r15
12245 jne _get_AAD_loop\num_initial_blocks\operation
12246 cmp $16, %r11
12247 je _get_AAD_loop2_done\num_initial_blocks\operation
12248- mov $16, %r12
12249+ mov $16, %r15
12250 _get_AAD_loop2\num_initial_blocks\operation:
12251 psrldq $4, %xmm\i
12252- sub $4, %r12
12253- cmp %r11, %r12
12254+ sub $4, %r15
12255+ cmp %r11, %r15
12256 jne _get_AAD_loop2\num_initial_blocks\operation
12257 _get_AAD_loop2_done\num_initial_blocks\operation:
12258 movdqa SHUF_MASK(%rip), %xmm14
12259@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
12260 * num_initial_blocks = b mod 4
12261 * encrypt the initial num_initial_blocks blocks and apply ghash on
12262 * the ciphertext
12263-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12264+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12265 * are clobbered
12266 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12267 */
12268@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
12269 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
12270 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
12271 mov arg7, %r10 # %r10 = AAD
12272- mov arg8, %r12 # %r12 = aadLen
12273- mov %r12, %r11
12274+ mov arg8, %r15 # %r15 = aadLen
12275+ mov %r15, %r11
12276 pxor %xmm\i, %xmm\i
12277 _get_AAD_loop\num_initial_blocks\operation:
12278 movd (%r10), \TMP1
12279@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
12280 psrldq $4, %xmm\i
12281 pxor \TMP1, %xmm\i
12282 add $4, %r10
12283- sub $4, %r12
12284+ sub $4, %r15
12285 jne _get_AAD_loop\num_initial_blocks\operation
12286 cmp $16, %r11
12287 je _get_AAD_loop2_done\num_initial_blocks\operation
12288- mov $16, %r12
12289+ mov $16, %r15
12290 _get_AAD_loop2\num_initial_blocks\operation:
12291 psrldq $4, %xmm\i
12292- sub $4, %r12
12293- cmp %r11, %r12
12294+ sub $4, %r15
12295+ cmp %r11, %r15
12296 jne _get_AAD_loop2\num_initial_blocks\operation
12297 _get_AAD_loop2_done\num_initial_blocks\operation:
12298 movdqa SHUF_MASK(%rip), %xmm14
12299@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
12300 *
12301 *****************************************************************************/
12302 ENTRY(aesni_gcm_dec)
12303- push %r12
12304+ push %r15
12305 push %r13
12306 push %r14
12307 mov %rsp, %r14
12308@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
12309 */
12310 sub $VARIABLE_OFFSET, %rsp
12311 and $~63, %rsp # align rsp to 64 bytes
12312- mov %arg6, %r12
12313- movdqu (%r12), %xmm13 # %xmm13 = HashKey
12314+ mov %arg6, %r15
12315+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
12316 movdqa SHUF_MASK(%rip), %xmm2
12317 PSHUFB_XMM %xmm2, %xmm13
12318
12319@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
12320 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
12321 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
12322 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
12323- mov %r13, %r12
12324- and $(3<<4), %r12
12325+ mov %r13, %r15
12326+ and $(3<<4), %r15
12327 jz _initial_num_blocks_is_0_decrypt
12328- cmp $(2<<4), %r12
12329+ cmp $(2<<4), %r15
12330 jb _initial_num_blocks_is_1_decrypt
12331 je _initial_num_blocks_is_2_decrypt
12332 _initial_num_blocks_is_3_decrypt:
12333@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
12334 sub $16, %r11
12335 add %r13, %r11
12336 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
12337- lea SHIFT_MASK+16(%rip), %r12
12338- sub %r13, %r12
12339+ lea SHIFT_MASK+16(%rip), %r15
12340+ sub %r13, %r15
12341 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
12342 # (%r13 is the number of bytes in plaintext mod 16)
12343- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
12344+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
12345 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
12346
12347 movdqa %xmm1, %xmm2
12348 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
12349- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
12350+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
12351 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
12352 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
12353 pand %xmm1, %xmm2
12354@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
12355 sub $1, %r13
12356 jne _less_than_8_bytes_left_decrypt
12357 _multiple_of_16_bytes_decrypt:
12358- mov arg8, %r12 # %r13 = aadLen (number of bytes)
12359- shl $3, %r12 # convert into number of bits
12360- movd %r12d, %xmm15 # len(A) in %xmm15
12361+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
12362+ shl $3, %r15 # convert into number of bits
12363+ movd %r15d, %xmm15 # len(A) in %xmm15
12364 shl $3, %arg4 # len(C) in bits (*128)
12365 MOVQ_R64_XMM %arg4, %xmm1
12366 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
12367@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
12368 mov %r14, %rsp
12369 pop %r14
12370 pop %r13
12371- pop %r12
12372+ pop %r15
12373+ pax_force_retaddr
12374 ret
12375 ENDPROC(aesni_gcm_dec)
12376
12377@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
12378 * poly = x^128 + x^127 + x^126 + x^121 + 1
12379 ***************************************************************************/
12380 ENTRY(aesni_gcm_enc)
12381- push %r12
12382+ push %r15
12383 push %r13
12384 push %r14
12385 mov %rsp, %r14
12386@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
12387 #
12388 sub $VARIABLE_OFFSET, %rsp
12389 and $~63, %rsp
12390- mov %arg6, %r12
12391- movdqu (%r12), %xmm13
12392+ mov %arg6, %r15
12393+ movdqu (%r15), %xmm13
12394 movdqa SHUF_MASK(%rip), %xmm2
12395 PSHUFB_XMM %xmm2, %xmm13
12396
12397@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
12398 movdqa %xmm13, HashKey(%rsp)
12399 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
12400 and $-16, %r13
12401- mov %r13, %r12
12402+ mov %r13, %r15
12403
12404 # Encrypt first few blocks
12405
12406- and $(3<<4), %r12
12407+ and $(3<<4), %r15
12408 jz _initial_num_blocks_is_0_encrypt
12409- cmp $(2<<4), %r12
12410+ cmp $(2<<4), %r15
12411 jb _initial_num_blocks_is_1_encrypt
12412 je _initial_num_blocks_is_2_encrypt
12413 _initial_num_blocks_is_3_encrypt:
12414@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
12415 sub $16, %r11
12416 add %r13, %r11
12417 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
12418- lea SHIFT_MASK+16(%rip), %r12
12419- sub %r13, %r12
12420+ lea SHIFT_MASK+16(%rip), %r15
12421+ sub %r13, %r15
12422 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
12423 # (%r13 is the number of bytes in plaintext mod 16)
12424- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
12425+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
12426 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
12427 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
12428- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
12429+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
12430 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
12431 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
12432 movdqa SHUF_MASK(%rip), %xmm10
12433@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
12434 sub $1, %r13
12435 jne _less_than_8_bytes_left_encrypt
12436 _multiple_of_16_bytes_encrypt:
12437- mov arg8, %r12 # %r12 = addLen (number of bytes)
12438- shl $3, %r12
12439- movd %r12d, %xmm15 # len(A) in %xmm15
12440+ mov arg8, %r15 # %r15 = addLen (number of bytes)
12441+ shl $3, %r15
12442+ movd %r15d, %xmm15 # len(A) in %xmm15
12443 shl $3, %arg4 # len(C) in bits (*128)
12444 MOVQ_R64_XMM %arg4, %xmm1
12445 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
12446@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
12447 mov %r14, %rsp
12448 pop %r14
12449 pop %r13
12450- pop %r12
12451+ pop %r15
12452+ pax_force_retaddr
12453 ret
12454 ENDPROC(aesni_gcm_enc)
12455
12456@@ -1722,6 +1725,7 @@ _key_expansion_256a:
12457 pxor %xmm1, %xmm0
12458 movaps %xmm0, (TKEYP)
12459 add $0x10, TKEYP
12460+ pax_force_retaddr
12461 ret
12462 ENDPROC(_key_expansion_128)
12463 ENDPROC(_key_expansion_256a)
12464@@ -1748,6 +1752,7 @@ _key_expansion_192a:
12465 shufps $0b01001110, %xmm2, %xmm1
12466 movaps %xmm1, 0x10(TKEYP)
12467 add $0x20, TKEYP
12468+ pax_force_retaddr
12469 ret
12470 ENDPROC(_key_expansion_192a)
12471
12472@@ -1768,6 +1773,7 @@ _key_expansion_192b:
12473
12474 movaps %xmm0, (TKEYP)
12475 add $0x10, TKEYP
12476+ pax_force_retaddr
12477 ret
12478 ENDPROC(_key_expansion_192b)
12479
12480@@ -1781,6 +1787,7 @@ _key_expansion_256b:
12481 pxor %xmm1, %xmm2
12482 movaps %xmm2, (TKEYP)
12483 add $0x10, TKEYP
12484+ pax_force_retaddr
12485 ret
12486 ENDPROC(_key_expansion_256b)
12487
12488@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
12489 #ifndef __x86_64__
12490 popl KEYP
12491 #endif
12492+ pax_force_retaddr
12493 ret
12494 ENDPROC(aesni_set_key)
12495
12496@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
12497 popl KLEN
12498 popl KEYP
12499 #endif
12500+ pax_force_retaddr
12501 ret
12502 ENDPROC(aesni_enc)
12503
12504@@ -1974,6 +1983,7 @@ _aesni_enc1:
12505 AESENC KEY STATE
12506 movaps 0x70(TKEYP), KEY
12507 AESENCLAST KEY STATE
12508+ pax_force_retaddr
12509 ret
12510 ENDPROC(_aesni_enc1)
12511
12512@@ -2083,6 +2093,7 @@ _aesni_enc4:
12513 AESENCLAST KEY STATE2
12514 AESENCLAST KEY STATE3
12515 AESENCLAST KEY STATE4
12516+ pax_force_retaddr
12517 ret
12518 ENDPROC(_aesni_enc4)
12519
12520@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
12521 popl KLEN
12522 popl KEYP
12523 #endif
12524+ pax_force_retaddr
12525 ret
12526 ENDPROC(aesni_dec)
12527
12528@@ -2164,6 +2176,7 @@ _aesni_dec1:
12529 AESDEC KEY STATE
12530 movaps 0x70(TKEYP), KEY
12531 AESDECLAST KEY STATE
12532+ pax_force_retaddr
12533 ret
12534 ENDPROC(_aesni_dec1)
12535
12536@@ -2273,6 +2286,7 @@ _aesni_dec4:
12537 AESDECLAST KEY STATE2
12538 AESDECLAST KEY STATE3
12539 AESDECLAST KEY STATE4
12540+ pax_force_retaddr
12541 ret
12542 ENDPROC(_aesni_dec4)
12543
12544@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
12545 popl KEYP
12546 popl LEN
12547 #endif
12548+ pax_force_retaddr
12549 ret
12550 ENDPROC(aesni_ecb_enc)
12551
12552@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
12553 popl KEYP
12554 popl LEN
12555 #endif
12556+ pax_force_retaddr
12557 ret
12558 ENDPROC(aesni_ecb_dec)
12559
12560@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
12561 popl LEN
12562 popl IVP
12563 #endif
12564+ pax_force_retaddr
12565 ret
12566 ENDPROC(aesni_cbc_enc)
12567
12568@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
12569 popl LEN
12570 popl IVP
12571 #endif
12572+ pax_force_retaddr
12573 ret
12574 ENDPROC(aesni_cbc_dec)
12575
12576@@ -2550,6 +2568,7 @@ _aesni_inc_init:
12577 mov $1, TCTR_LOW
12578 MOVQ_R64_XMM TCTR_LOW INC
12579 MOVQ_R64_XMM CTR TCTR_LOW
12580+ pax_force_retaddr
12581 ret
12582 ENDPROC(_aesni_inc_init)
12583
12584@@ -2579,6 +2598,7 @@ _aesni_inc:
12585 .Linc_low:
12586 movaps CTR, IV
12587 PSHUFB_XMM BSWAP_MASK IV
12588+ pax_force_retaddr
12589 ret
12590 ENDPROC(_aesni_inc)
12591
12592@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
12593 .Lctr_enc_ret:
12594 movups IV, (IVP)
12595 .Lctr_enc_just_ret:
12596+ pax_force_retaddr
12597 ret
12598 ENDPROC(aesni_ctr_enc)
12599
12600@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
12601 pxor INC, STATE4
12602 movdqu STATE4, 0x70(OUTP)
12603
12604+ pax_force_retaddr
12605 ret
12606 ENDPROC(aesni_xts_crypt8)
12607
12608diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12609index 246c670..466e2d6 100644
12610--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
12611+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12612@@ -21,6 +21,7 @@
12613 */
12614
12615 #include <linux/linkage.h>
12616+#include <asm/alternative-asm.h>
12617
12618 .file "blowfish-x86_64-asm.S"
12619 .text
12620@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
12621 jnz .L__enc_xor;
12622
12623 write_block();
12624+ pax_force_retaddr
12625 ret;
12626 .L__enc_xor:
12627 xor_block();
12628+ pax_force_retaddr
12629 ret;
12630 ENDPROC(__blowfish_enc_blk)
12631
12632@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
12633
12634 movq %r11, %rbp;
12635
12636+ pax_force_retaddr
12637 ret;
12638 ENDPROC(blowfish_dec_blk)
12639
12640@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
12641
12642 popq %rbx;
12643 popq %rbp;
12644+ pax_force_retaddr
12645 ret;
12646
12647 .L__enc_xor4:
12648@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
12649
12650 popq %rbx;
12651 popq %rbp;
12652+ pax_force_retaddr
12653 ret;
12654 ENDPROC(__blowfish_enc_blk_4way)
12655
12656@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
12657 popq %rbx;
12658 popq %rbp;
12659
12660+ pax_force_retaddr
12661 ret;
12662 ENDPROC(blowfish_dec_blk_4way)
12663diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12664index ce71f92..1dce7ec 100644
12665--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12666+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12667@@ -16,6 +16,7 @@
12668 */
12669
12670 #include <linux/linkage.h>
12671+#include <asm/alternative-asm.h>
12672
12673 #define CAMELLIA_TABLE_BYTE_LEN 272
12674
12675@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12676 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
12677 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
12678 %rcx, (%r9));
12679+ pax_force_retaddr
12680 ret;
12681 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12682
12683@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12684 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
12685 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
12686 %rax, (%r9));
12687+ pax_force_retaddr
12688 ret;
12689 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12690
12691@@ -780,6 +783,7 @@ __camellia_enc_blk16:
12692 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12693 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
12694
12695+ pax_force_retaddr
12696 ret;
12697
12698 .align 8
12699@@ -865,6 +869,7 @@ __camellia_dec_blk16:
12700 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12701 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
12702
12703+ pax_force_retaddr
12704 ret;
12705
12706 .align 8
12707@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
12708 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12709 %xmm8, %rsi);
12710
12711+ pax_force_retaddr
12712 ret;
12713 ENDPROC(camellia_ecb_enc_16way)
12714
12715@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
12716 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12717 %xmm8, %rsi);
12718
12719+ pax_force_retaddr
12720 ret;
12721 ENDPROC(camellia_ecb_dec_16way)
12722
12723@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
12724 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12725 %xmm8, %rsi);
12726
12727+ pax_force_retaddr
12728 ret;
12729 ENDPROC(camellia_cbc_dec_16way)
12730
12731@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
12732 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12733 %xmm8, %rsi);
12734
12735+ pax_force_retaddr
12736 ret;
12737 ENDPROC(camellia_ctr_16way)
12738
12739@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
12740 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12741 %xmm8, %rsi);
12742
12743+ pax_force_retaddr
12744 ret;
12745 ENDPROC(camellia_xts_crypt_16way)
12746
12747diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12748index 0e0b886..5a3123c 100644
12749--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12750+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12751@@ -11,6 +11,7 @@
12752 */
12753
12754 #include <linux/linkage.h>
12755+#include <asm/alternative-asm.h>
12756
12757 #define CAMELLIA_TABLE_BYTE_LEN 272
12758
12759@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12760 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
12761 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
12762 %rcx, (%r9));
12763+ pax_force_retaddr
12764 ret;
12765 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12766
12767@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12768 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
12769 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
12770 %rax, (%r9));
12771+ pax_force_retaddr
12772 ret;
12773 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12774
12775@@ -820,6 +823,7 @@ __camellia_enc_blk32:
12776 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12777 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
12778
12779+ pax_force_retaddr
12780 ret;
12781
12782 .align 8
12783@@ -905,6 +909,7 @@ __camellia_dec_blk32:
12784 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12785 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
12786
12787+ pax_force_retaddr
12788 ret;
12789
12790 .align 8
12791@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
12792
12793 vzeroupper;
12794
12795+ pax_force_retaddr
12796 ret;
12797 ENDPROC(camellia_ecb_enc_32way)
12798
12799@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
12800
12801 vzeroupper;
12802
12803+ pax_force_retaddr
12804 ret;
12805 ENDPROC(camellia_ecb_dec_32way)
12806
12807@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
12808
12809 vzeroupper;
12810
12811+ pax_force_retaddr
12812 ret;
12813 ENDPROC(camellia_cbc_dec_32way)
12814
12815@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
12816
12817 vzeroupper;
12818
12819+ pax_force_retaddr
12820 ret;
12821 ENDPROC(camellia_ctr_32way)
12822
12823@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
12824
12825 vzeroupper;
12826
12827+ pax_force_retaddr
12828 ret;
12829 ENDPROC(camellia_xts_crypt_32way)
12830
12831diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
12832index 310319c..db3d7b5 100644
12833--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
12834+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
12835@@ -21,6 +21,7 @@
12836 */
12837
12838 #include <linux/linkage.h>
12839+#include <asm/alternative-asm.h>
12840
12841 .file "camellia-x86_64-asm_64.S"
12842 .text
12843@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
12844 enc_outunpack(mov, RT1);
12845
12846 movq RRBP, %rbp;
12847+ pax_force_retaddr
12848 ret;
12849
12850 .L__enc_xor:
12851 enc_outunpack(xor, RT1);
12852
12853 movq RRBP, %rbp;
12854+ pax_force_retaddr
12855 ret;
12856 ENDPROC(__camellia_enc_blk)
12857
12858@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
12859 dec_outunpack();
12860
12861 movq RRBP, %rbp;
12862+ pax_force_retaddr
12863 ret;
12864 ENDPROC(camellia_dec_blk)
12865
12866@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
12867
12868 movq RRBP, %rbp;
12869 popq %rbx;
12870+ pax_force_retaddr
12871 ret;
12872
12873 .L__enc2_xor:
12874@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
12875
12876 movq RRBP, %rbp;
12877 popq %rbx;
12878+ pax_force_retaddr
12879 ret;
12880 ENDPROC(__camellia_enc_blk_2way)
12881
12882@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
12883
12884 movq RRBP, %rbp;
12885 movq RXOR, %rbx;
12886+ pax_force_retaddr
12887 ret;
12888 ENDPROC(camellia_dec_blk_2way)
12889diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12890index c35fd5d..2d8c7db 100644
12891--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12892+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12893@@ -24,6 +24,7 @@
12894 */
12895
12896 #include <linux/linkage.h>
12897+#include <asm/alternative-asm.h>
12898
12899 .file "cast5-avx-x86_64-asm_64.S"
12900
12901@@ -281,6 +282,7 @@ __cast5_enc_blk16:
12902 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12903 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12904
12905+ pax_force_retaddr
12906 ret;
12907 ENDPROC(__cast5_enc_blk16)
12908
12909@@ -352,6 +354,7 @@ __cast5_dec_blk16:
12910 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12911 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12912
12913+ pax_force_retaddr
12914 ret;
12915
12916 .L__skip_dec:
12917@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
12918 vmovdqu RR4, (6*4*4)(%r11);
12919 vmovdqu RL4, (7*4*4)(%r11);
12920
12921+ pax_force_retaddr
12922 ret;
12923 ENDPROC(cast5_ecb_enc_16way)
12924
12925@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
12926 vmovdqu RR4, (6*4*4)(%r11);
12927 vmovdqu RL4, (7*4*4)(%r11);
12928
12929+ pax_force_retaddr
12930 ret;
12931 ENDPROC(cast5_ecb_dec_16way)
12932
12933@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
12934 * %rdx: src
12935 */
12936
12937- pushq %r12;
12938+ pushq %r14;
12939
12940 movq %rsi, %r11;
12941- movq %rdx, %r12;
12942+ movq %rdx, %r14;
12943
12944 vmovdqu (0*16)(%rdx), RL1;
12945 vmovdqu (1*16)(%rdx), RR1;
12946@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
12947 call __cast5_dec_blk16;
12948
12949 /* xor with src */
12950- vmovq (%r12), RX;
12951+ vmovq (%r14), RX;
12952 vpshufd $0x4f, RX, RX;
12953 vpxor RX, RR1, RR1;
12954- vpxor 0*16+8(%r12), RL1, RL1;
12955- vpxor 1*16+8(%r12), RR2, RR2;
12956- vpxor 2*16+8(%r12), RL2, RL2;
12957- vpxor 3*16+8(%r12), RR3, RR3;
12958- vpxor 4*16+8(%r12), RL3, RL3;
12959- vpxor 5*16+8(%r12), RR4, RR4;
12960- vpxor 6*16+8(%r12), RL4, RL4;
12961+ vpxor 0*16+8(%r14), RL1, RL1;
12962+ vpxor 1*16+8(%r14), RR2, RR2;
12963+ vpxor 2*16+8(%r14), RL2, RL2;
12964+ vpxor 3*16+8(%r14), RR3, RR3;
12965+ vpxor 4*16+8(%r14), RL3, RL3;
12966+ vpxor 5*16+8(%r14), RR4, RR4;
12967+ vpxor 6*16+8(%r14), RL4, RL4;
12968
12969 vmovdqu RR1, (0*16)(%r11);
12970 vmovdqu RL1, (1*16)(%r11);
12971@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
12972 vmovdqu RR4, (6*16)(%r11);
12973 vmovdqu RL4, (7*16)(%r11);
12974
12975- popq %r12;
12976+ popq %r14;
12977
12978+ pax_force_retaddr
12979 ret;
12980 ENDPROC(cast5_cbc_dec_16way)
12981
12982@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
12983 * %rcx: iv (big endian, 64bit)
12984 */
12985
12986- pushq %r12;
12987+ pushq %r14;
12988
12989 movq %rsi, %r11;
12990- movq %rdx, %r12;
12991+ movq %rdx, %r14;
12992
12993 vpcmpeqd RTMP, RTMP, RTMP;
12994 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
12995@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
12996 call __cast5_enc_blk16;
12997
12998 /* dst = src ^ iv */
12999- vpxor (0*16)(%r12), RR1, RR1;
13000- vpxor (1*16)(%r12), RL1, RL1;
13001- vpxor (2*16)(%r12), RR2, RR2;
13002- vpxor (3*16)(%r12), RL2, RL2;
13003- vpxor (4*16)(%r12), RR3, RR3;
13004- vpxor (5*16)(%r12), RL3, RL3;
13005- vpxor (6*16)(%r12), RR4, RR4;
13006- vpxor (7*16)(%r12), RL4, RL4;
13007+ vpxor (0*16)(%r14), RR1, RR1;
13008+ vpxor (1*16)(%r14), RL1, RL1;
13009+ vpxor (2*16)(%r14), RR2, RR2;
13010+ vpxor (3*16)(%r14), RL2, RL2;
13011+ vpxor (4*16)(%r14), RR3, RR3;
13012+ vpxor (5*16)(%r14), RL3, RL3;
13013+ vpxor (6*16)(%r14), RR4, RR4;
13014+ vpxor (7*16)(%r14), RL4, RL4;
13015 vmovdqu RR1, (0*16)(%r11);
13016 vmovdqu RL1, (1*16)(%r11);
13017 vmovdqu RR2, (2*16)(%r11);
13018@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13019 vmovdqu RR4, (6*16)(%r11);
13020 vmovdqu RL4, (7*16)(%r11);
13021
13022- popq %r12;
13023+ popq %r14;
13024
13025+ pax_force_retaddr
13026 ret;
13027 ENDPROC(cast5_ctr_16way)
13028diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13029index e3531f8..e123f35 100644
13030--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13031+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13032@@ -24,6 +24,7 @@
13033 */
13034
13035 #include <linux/linkage.h>
13036+#include <asm/alternative-asm.h>
13037 #include "glue_helper-asm-avx.S"
13038
13039 .file "cast6-avx-x86_64-asm_64.S"
13040@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13041 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13042 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13043
13044+ pax_force_retaddr
13045 ret;
13046 ENDPROC(__cast6_enc_blk8)
13047
13048@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13049 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13050 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13051
13052+ pax_force_retaddr
13053 ret;
13054 ENDPROC(__cast6_dec_blk8)
13055
13056@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13057
13058 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13059
13060+ pax_force_retaddr
13061 ret;
13062 ENDPROC(cast6_ecb_enc_8way)
13063
13064@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13065
13066 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13067
13068+ pax_force_retaddr
13069 ret;
13070 ENDPROC(cast6_ecb_dec_8way)
13071
13072@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13073 * %rdx: src
13074 */
13075
13076- pushq %r12;
13077+ pushq %r14;
13078
13079 movq %rsi, %r11;
13080- movq %rdx, %r12;
13081+ movq %rdx, %r14;
13082
13083 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13084
13085 call __cast6_dec_blk8;
13086
13087- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13088+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13089
13090- popq %r12;
13091+ popq %r14;
13092
13093+ pax_force_retaddr
13094 ret;
13095 ENDPROC(cast6_cbc_dec_8way)
13096
13097@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13098 * %rcx: iv (little endian, 128bit)
13099 */
13100
13101- pushq %r12;
13102+ pushq %r14;
13103
13104 movq %rsi, %r11;
13105- movq %rdx, %r12;
13106+ movq %rdx, %r14;
13107
13108 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13109 RD2, RX, RKR, RKM);
13110
13111 call __cast6_enc_blk8;
13112
13113- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13114+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13115
13116- popq %r12;
13117+ popq %r14;
13118
13119+ pax_force_retaddr
13120 ret;
13121 ENDPROC(cast6_ctr_8way)
13122
13123@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13124 /* dst <= regs xor IVs(in dst) */
13125 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13126
13127+ pax_force_retaddr
13128 ret;
13129 ENDPROC(cast6_xts_enc_8way)
13130
13131@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13132 /* dst <= regs xor IVs(in dst) */
13133 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13134
13135+ pax_force_retaddr
13136 ret;
13137 ENDPROC(cast6_xts_dec_8way)
13138diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13139index dbc4339..de6e120 100644
13140--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13141+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13142@@ -45,6 +45,7 @@
13143
13144 #include <asm/inst.h>
13145 #include <linux/linkage.h>
13146+#include <asm/alternative-asm.h>
13147
13148 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13149
13150@@ -312,6 +313,7 @@ do_return:
13151 popq %rsi
13152 popq %rdi
13153 popq %rbx
13154+ pax_force_retaddr
13155 ret
13156
13157 ################################################################
13158diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13159index 586f41a..d02851e 100644
13160--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
13161+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13162@@ -18,6 +18,7 @@
13163
13164 #include <linux/linkage.h>
13165 #include <asm/inst.h>
13166+#include <asm/alternative-asm.h>
13167
13168 .data
13169
13170@@ -93,6 +94,7 @@ __clmul_gf128mul_ble:
13171 psrlq $1, T2
13172 pxor T2, T1
13173 pxor T1, DATA
13174+ pax_force_retaddr
13175 ret
13176 ENDPROC(__clmul_gf128mul_ble)
13177
13178@@ -105,6 +107,7 @@ ENTRY(clmul_ghash_mul)
13179 call __clmul_gf128mul_ble
13180 PSHUFB_XMM BSWAP DATA
13181 movups DATA, (%rdi)
13182+ pax_force_retaddr
13183 ret
13184 ENDPROC(clmul_ghash_mul)
13185
13186@@ -132,6 +135,7 @@ ENTRY(clmul_ghash_update)
13187 PSHUFB_XMM BSWAP DATA
13188 movups DATA, (%rdi)
13189 .Lupdate_just_ret:
13190+ pax_force_retaddr
13191 ret
13192 ENDPROC(clmul_ghash_update)
13193
13194@@ -157,5 +161,6 @@ ENTRY(clmul_ghash_setkey)
13195 pand .Lpoly, %xmm1
13196 pxor %xmm1, %xmm0
13197 movups %xmm0, (%rdi)
13198+ pax_force_retaddr
13199 ret
13200 ENDPROC(clmul_ghash_setkey)
13201diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13202index 9279e0b..c4b3d2c 100644
13203--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
13204+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13205@@ -1,4 +1,5 @@
13206 #include <linux/linkage.h>
13207+#include <asm/alternative-asm.h>
13208
13209 # enter salsa20_encrypt_bytes
13210 ENTRY(salsa20_encrypt_bytes)
13211@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
13212 add %r11,%rsp
13213 mov %rdi,%rax
13214 mov %rsi,%rdx
13215+ pax_force_retaddr
13216 ret
13217 # bytesatleast65:
13218 ._bytesatleast65:
13219@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
13220 add %r11,%rsp
13221 mov %rdi,%rax
13222 mov %rsi,%rdx
13223+ pax_force_retaddr
13224 ret
13225 ENDPROC(salsa20_keysetup)
13226
13227@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
13228 add %r11,%rsp
13229 mov %rdi,%rax
13230 mov %rsi,%rdx
13231+ pax_force_retaddr
13232 ret
13233 ENDPROC(salsa20_ivsetup)
13234diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13235index 2f202f4..d9164d6 100644
13236--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13237+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13238@@ -24,6 +24,7 @@
13239 */
13240
13241 #include <linux/linkage.h>
13242+#include <asm/alternative-asm.h>
13243 #include "glue_helper-asm-avx.S"
13244
13245 .file "serpent-avx-x86_64-asm_64.S"
13246@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
13247 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13248 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13249
13250+ pax_force_retaddr
13251 ret;
13252 ENDPROC(__serpent_enc_blk8_avx)
13253
13254@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
13255 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13256 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13257
13258+ pax_force_retaddr
13259 ret;
13260 ENDPROC(__serpent_dec_blk8_avx)
13261
13262@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
13263
13264 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13265
13266+ pax_force_retaddr
13267 ret;
13268 ENDPROC(serpent_ecb_enc_8way_avx)
13269
13270@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
13271
13272 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13273
13274+ pax_force_retaddr
13275 ret;
13276 ENDPROC(serpent_ecb_dec_8way_avx)
13277
13278@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
13279
13280 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13281
13282+ pax_force_retaddr
13283 ret;
13284 ENDPROC(serpent_cbc_dec_8way_avx)
13285
13286@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
13287
13288 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13289
13290+ pax_force_retaddr
13291 ret;
13292 ENDPROC(serpent_ctr_8way_avx)
13293
13294@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
13295 /* dst <= regs xor IVs(in dst) */
13296 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13297
13298+ pax_force_retaddr
13299 ret;
13300 ENDPROC(serpent_xts_enc_8way_avx)
13301
13302@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
13303 /* dst <= regs xor IVs(in dst) */
13304 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13305
13306+ pax_force_retaddr
13307 ret;
13308 ENDPROC(serpent_xts_dec_8way_avx)
13309diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
13310index b222085..abd483c 100644
13311--- a/arch/x86/crypto/serpent-avx2-asm_64.S
13312+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
13313@@ -15,6 +15,7 @@
13314 */
13315
13316 #include <linux/linkage.h>
13317+#include <asm/alternative-asm.h>
13318 #include "glue_helper-asm-avx2.S"
13319
13320 .file "serpent-avx2-asm_64.S"
13321@@ -610,6 +611,7 @@ __serpent_enc_blk16:
13322 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13323 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13324
13325+ pax_force_retaddr
13326 ret;
13327 ENDPROC(__serpent_enc_blk16)
13328
13329@@ -664,6 +666,7 @@ __serpent_dec_blk16:
13330 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13331 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13332
13333+ pax_force_retaddr
13334 ret;
13335 ENDPROC(__serpent_dec_blk16)
13336
13337@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
13338
13339 vzeroupper;
13340
13341+ pax_force_retaddr
13342 ret;
13343 ENDPROC(serpent_ecb_enc_16way)
13344
13345@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
13346
13347 vzeroupper;
13348
13349+ pax_force_retaddr
13350 ret;
13351 ENDPROC(serpent_ecb_dec_16way)
13352
13353@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
13354
13355 vzeroupper;
13356
13357+ pax_force_retaddr
13358 ret;
13359 ENDPROC(serpent_cbc_dec_16way)
13360
13361@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
13362
13363 vzeroupper;
13364
13365+ pax_force_retaddr
13366 ret;
13367 ENDPROC(serpent_ctr_16way)
13368
13369@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
13370
13371 vzeroupper;
13372
13373+ pax_force_retaddr
13374 ret;
13375 ENDPROC(serpent_xts_enc_16way)
13376
13377@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
13378
13379 vzeroupper;
13380
13381+ pax_force_retaddr
13382 ret;
13383 ENDPROC(serpent_xts_dec_16way)
13384diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13385index acc066c..1559cc4 100644
13386--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13387+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13388@@ -25,6 +25,7 @@
13389 */
13390
13391 #include <linux/linkage.h>
13392+#include <asm/alternative-asm.h>
13393
13394 .file "serpent-sse2-x86_64-asm_64.S"
13395 .text
13396@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
13397 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13398 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13399
13400+ pax_force_retaddr
13401 ret;
13402
13403 .L__enc_xor8:
13404 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13405 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13406
13407+ pax_force_retaddr
13408 ret;
13409 ENDPROC(__serpent_enc_blk_8way)
13410
13411@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
13412 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13413 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13414
13415+ pax_force_retaddr
13416 ret;
13417 ENDPROC(serpent_dec_blk_8way)
13418diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
13419index a410950..9dfe7ad 100644
13420--- a/arch/x86/crypto/sha1_ssse3_asm.S
13421+++ b/arch/x86/crypto/sha1_ssse3_asm.S
13422@@ -29,6 +29,7 @@
13423 */
13424
13425 #include <linux/linkage.h>
13426+#include <asm/alternative-asm.h>
13427
13428 #define CTX %rdi // arg1
13429 #define BUF %rsi // arg2
13430@@ -75,9 +76,9 @@
13431
13432 push %rbx
13433 push %rbp
13434- push %r12
13435+ push %r14
13436
13437- mov %rsp, %r12
13438+ mov %rsp, %r14
13439 sub $64, %rsp # allocate workspace
13440 and $~15, %rsp # align stack
13441
13442@@ -99,11 +100,12 @@
13443 xor %rax, %rax
13444 rep stosq
13445
13446- mov %r12, %rsp # deallocate workspace
13447+ mov %r14, %rsp # deallocate workspace
13448
13449- pop %r12
13450+ pop %r14
13451 pop %rbp
13452 pop %rbx
13453+ pax_force_retaddr
13454 ret
13455
13456 ENDPROC(\name)
13457diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
13458index 642f156..51a513c 100644
13459--- a/arch/x86/crypto/sha256-avx-asm.S
13460+++ b/arch/x86/crypto/sha256-avx-asm.S
13461@@ -49,6 +49,7 @@
13462
13463 #ifdef CONFIG_AS_AVX
13464 #include <linux/linkage.h>
13465+#include <asm/alternative-asm.h>
13466
13467 ## assume buffers not aligned
13468 #define VMOVDQ vmovdqu
13469@@ -460,6 +461,7 @@ done_hash:
13470 popq %r13
13471 popq %rbp
13472 popq %rbx
13473+ pax_force_retaddr
13474 ret
13475 ENDPROC(sha256_transform_avx)
13476
13477diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
13478index 9e86944..3795e6a 100644
13479--- a/arch/x86/crypto/sha256-avx2-asm.S
13480+++ b/arch/x86/crypto/sha256-avx2-asm.S
13481@@ -50,6 +50,7 @@
13482
13483 #ifdef CONFIG_AS_AVX2
13484 #include <linux/linkage.h>
13485+#include <asm/alternative-asm.h>
13486
13487 ## assume buffers not aligned
13488 #define VMOVDQ vmovdqu
13489@@ -720,6 +721,7 @@ done_hash:
13490 popq %r12
13491 popq %rbp
13492 popq %rbx
13493+ pax_force_retaddr
13494 ret
13495 ENDPROC(sha256_transform_rorx)
13496
13497diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
13498index f833b74..8c62a9e 100644
13499--- a/arch/x86/crypto/sha256-ssse3-asm.S
13500+++ b/arch/x86/crypto/sha256-ssse3-asm.S
13501@@ -47,6 +47,7 @@
13502 ########################################################################
13503
13504 #include <linux/linkage.h>
13505+#include <asm/alternative-asm.h>
13506
13507 ## assume buffers not aligned
13508 #define MOVDQ movdqu
13509@@ -471,6 +472,7 @@ done_hash:
13510 popq %rbp
13511 popq %rbx
13512
13513+ pax_force_retaddr
13514 ret
13515 ENDPROC(sha256_transform_ssse3)
13516
13517diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
13518index 974dde9..a823ff9 100644
13519--- a/arch/x86/crypto/sha512-avx-asm.S
13520+++ b/arch/x86/crypto/sha512-avx-asm.S
13521@@ -49,6 +49,7 @@
13522
13523 #ifdef CONFIG_AS_AVX
13524 #include <linux/linkage.h>
13525+#include <asm/alternative-asm.h>
13526
13527 .text
13528
13529@@ -364,6 +365,7 @@ updateblock:
13530 mov frame_RSPSAVE(%rsp), %rsp
13531
13532 nowork:
13533+ pax_force_retaddr
13534 ret
13535 ENDPROC(sha512_transform_avx)
13536
13537diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
13538index 568b961..ed20c37 100644
13539--- a/arch/x86/crypto/sha512-avx2-asm.S
13540+++ b/arch/x86/crypto/sha512-avx2-asm.S
13541@@ -51,6 +51,7 @@
13542
13543 #ifdef CONFIG_AS_AVX2
13544 #include <linux/linkage.h>
13545+#include <asm/alternative-asm.h>
13546
13547 .text
13548
13549@@ -678,6 +679,7 @@ done_hash:
13550
13551 # Restore Stack Pointer
13552 mov frame_RSPSAVE(%rsp), %rsp
13553+ pax_force_retaddr
13554 ret
13555 ENDPROC(sha512_transform_rorx)
13556
13557diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
13558index fb56855..6edd768 100644
13559--- a/arch/x86/crypto/sha512-ssse3-asm.S
13560+++ b/arch/x86/crypto/sha512-ssse3-asm.S
13561@@ -48,6 +48,7 @@
13562 ########################################################################
13563
13564 #include <linux/linkage.h>
13565+#include <asm/alternative-asm.h>
13566
13567 .text
13568
13569@@ -363,6 +364,7 @@ updateblock:
13570 mov frame_RSPSAVE(%rsp), %rsp
13571
13572 nowork:
13573+ pax_force_retaddr
13574 ret
13575 ENDPROC(sha512_transform_ssse3)
13576
13577diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
13578index 0505813..b067311 100644
13579--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
13580+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
13581@@ -24,6 +24,7 @@
13582 */
13583
13584 #include <linux/linkage.h>
13585+#include <asm/alternative-asm.h>
13586 #include "glue_helper-asm-avx.S"
13587
13588 .file "twofish-avx-x86_64-asm_64.S"
13589@@ -284,6 +285,7 @@ __twofish_enc_blk8:
13590 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
13591 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
13592
13593+ pax_force_retaddr
13594 ret;
13595 ENDPROC(__twofish_enc_blk8)
13596
13597@@ -324,6 +326,7 @@ __twofish_dec_blk8:
13598 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
13599 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
13600
13601+ pax_force_retaddr
13602 ret;
13603 ENDPROC(__twofish_dec_blk8)
13604
13605@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
13606
13607 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13608
13609+ pax_force_retaddr
13610 ret;
13611 ENDPROC(twofish_ecb_enc_8way)
13612
13613@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
13614
13615 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13616
13617+ pax_force_retaddr
13618 ret;
13619 ENDPROC(twofish_ecb_dec_8way)
13620
13621@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
13622 * %rdx: src
13623 */
13624
13625- pushq %r12;
13626+ pushq %r14;
13627
13628 movq %rsi, %r11;
13629- movq %rdx, %r12;
13630+ movq %rdx, %r14;
13631
13632 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13633
13634 call __twofish_dec_blk8;
13635
13636- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13637+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13638
13639- popq %r12;
13640+ popq %r14;
13641
13642+ pax_force_retaddr
13643 ret;
13644 ENDPROC(twofish_cbc_dec_8way)
13645
13646@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
13647 * %rcx: iv (little endian, 128bit)
13648 */
13649
13650- pushq %r12;
13651+ pushq %r14;
13652
13653 movq %rsi, %r11;
13654- movq %rdx, %r12;
13655+ movq %rdx, %r14;
13656
13657 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13658 RD2, RX0, RX1, RY0);
13659
13660 call __twofish_enc_blk8;
13661
13662- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13663+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13664
13665- popq %r12;
13666+ popq %r14;
13667
13668+ pax_force_retaddr
13669 ret;
13670 ENDPROC(twofish_ctr_8way)
13671
13672@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
13673 /* dst <= regs xor IVs(in dst) */
13674 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13675
13676+ pax_force_retaddr
13677 ret;
13678 ENDPROC(twofish_xts_enc_8way)
13679
13680@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
13681 /* dst <= regs xor IVs(in dst) */
13682 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13683
13684+ pax_force_retaddr
13685 ret;
13686 ENDPROC(twofish_xts_dec_8way)
13687diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13688index 1c3b7ce..02f578d 100644
13689--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13690+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13691@@ -21,6 +21,7 @@
13692 */
13693
13694 #include <linux/linkage.h>
13695+#include <asm/alternative-asm.h>
13696
13697 .file "twofish-x86_64-asm-3way.S"
13698 .text
13699@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
13700 popq %r13;
13701 popq %r14;
13702 popq %r15;
13703+ pax_force_retaddr
13704 ret;
13705
13706 .L__enc_xor3:
13707@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
13708 popq %r13;
13709 popq %r14;
13710 popq %r15;
13711+ pax_force_retaddr
13712 ret;
13713 ENDPROC(__twofish_enc_blk_3way)
13714
13715@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
13716 popq %r13;
13717 popq %r14;
13718 popq %r15;
13719+ pax_force_retaddr
13720 ret;
13721 ENDPROC(twofish_dec_blk_3way)
13722diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
13723index a039d21..524b8b2 100644
13724--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
13725+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
13726@@ -22,6 +22,7 @@
13727
13728 #include <linux/linkage.h>
13729 #include <asm/asm-offsets.h>
13730+#include <asm/alternative-asm.h>
13731
13732 #define a_offset 0
13733 #define b_offset 4
13734@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
13735
13736 popq R1
13737 movq $1,%rax
13738+ pax_force_retaddr
13739 ret
13740 ENDPROC(twofish_enc_blk)
13741
13742@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
13743
13744 popq R1
13745 movq $1,%rax
13746+ pax_force_retaddr
13747 ret
13748 ENDPROC(twofish_dec_blk)
13749diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
13750index d21ff89..6da8e6e 100644
13751--- a/arch/x86/ia32/ia32_aout.c
13752+++ b/arch/x86/ia32/ia32_aout.c
13753@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
13754 unsigned long dump_start, dump_size;
13755 struct user32 dump;
13756
13757+ memset(&dump, 0, sizeof(dump));
13758+
13759 fs = get_fs();
13760 set_fs(KERNEL_DS);
13761 has_dumped = 1;
13762diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
13763index 2206757..85cbcfa 100644
13764--- a/arch/x86/ia32/ia32_signal.c
13765+++ b/arch/x86/ia32/ia32_signal.c
13766@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
13767 if (__get_user(set.sig[0], &frame->sc.oldmask)
13768 || (_COMPAT_NSIG_WORDS > 1
13769 && __copy_from_user((((char *) &set.sig) + 4),
13770- &frame->extramask,
13771+ frame->extramask,
13772 sizeof(frame->extramask))))
13773 goto badframe;
13774
13775@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
13776 sp -= frame_size;
13777 /* Align the stack pointer according to the i386 ABI,
13778 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
13779- sp = ((sp + 4) & -16ul) - 4;
13780+ sp = ((sp - 12) & -16ul) - 4;
13781 return (void __user *) sp;
13782 }
13783
13784@@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
13785 restorer = VDSO32_SYMBOL(current->mm->context.vdso,
13786 sigreturn);
13787 else
13788- restorer = &frame->retcode;
13789+ restorer = frame->retcode;
13790 }
13791
13792 put_user_try {
13793@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
13794 * These are actually not used anymore, but left because some
13795 * gdb versions depend on them as a marker.
13796 */
13797- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13798+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13799 } put_user_catch(err);
13800
13801 if (err)
13802@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13803 0xb8,
13804 __NR_ia32_rt_sigreturn,
13805 0x80cd,
13806- 0,
13807+ 0
13808 };
13809
13810 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
13811@@ -461,16 +461,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13812
13813 if (ksig->ka.sa.sa_flags & SA_RESTORER)
13814 restorer = ksig->ka.sa.sa_restorer;
13815+ else if (current->mm->context.vdso)
13816+ /* Return stub is in 32bit vsyscall page */
13817+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
13818 else
13819- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
13820- rt_sigreturn);
13821+ restorer = frame->retcode;
13822 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
13823
13824 /*
13825 * Not actually used anymore, but left because some gdb
13826 * versions need it.
13827 */
13828- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13829+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13830 } put_user_catch(err);
13831
13832 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
13833diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
13834index 4299eb0..c0687a7 100644
13835--- a/arch/x86/ia32/ia32entry.S
13836+++ b/arch/x86/ia32/ia32entry.S
13837@@ -15,8 +15,10 @@
13838 #include <asm/irqflags.h>
13839 #include <asm/asm.h>
13840 #include <asm/smap.h>
13841+#include <asm/pgtable.h>
13842 #include <linux/linkage.h>
13843 #include <linux/err.h>
13844+#include <asm/alternative-asm.h>
13845
13846 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13847 #include <linux/elf-em.h>
13848@@ -62,12 +64,12 @@
13849 */
13850 .macro LOAD_ARGS32 offset, _r9=0
13851 .if \_r9
13852- movl \offset+16(%rsp),%r9d
13853+ movl \offset+R9(%rsp),%r9d
13854 .endif
13855- movl \offset+40(%rsp),%ecx
13856- movl \offset+48(%rsp),%edx
13857- movl \offset+56(%rsp),%esi
13858- movl \offset+64(%rsp),%edi
13859+ movl \offset+RCX(%rsp),%ecx
13860+ movl \offset+RDX(%rsp),%edx
13861+ movl \offset+RSI(%rsp),%esi
13862+ movl \offset+RDI(%rsp),%edi
13863 movl %eax,%eax /* zero extension */
13864 .endm
13865
13866@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
13867 ENDPROC(native_irq_enable_sysexit)
13868 #endif
13869
13870+ .macro pax_enter_kernel_user
13871+ pax_set_fptr_mask
13872+#ifdef CONFIG_PAX_MEMORY_UDEREF
13873+ call pax_enter_kernel_user
13874+#endif
13875+ .endm
13876+
13877+ .macro pax_exit_kernel_user
13878+#ifdef CONFIG_PAX_MEMORY_UDEREF
13879+ call pax_exit_kernel_user
13880+#endif
13881+#ifdef CONFIG_PAX_RANDKSTACK
13882+ pushq %rax
13883+ pushq %r11
13884+ call pax_randomize_kstack
13885+ popq %r11
13886+ popq %rax
13887+#endif
13888+ .endm
13889+
13890+ .macro pax_erase_kstack
13891+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13892+ call pax_erase_kstack
13893+#endif
13894+ .endm
13895+
13896 /*
13897 * 32bit SYSENTER instruction entry.
13898 *
13899@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
13900 CFI_REGISTER rsp,rbp
13901 SWAPGS_UNSAFE_STACK
13902 movq PER_CPU_VAR(kernel_stack), %rsp
13903- addq $(KERNEL_STACK_OFFSET),%rsp
13904- /*
13905- * No need to follow this irqs on/off section: the syscall
13906- * disabled irqs, here we enable it straight after entry:
13907- */
13908- ENABLE_INTERRUPTS(CLBR_NONE)
13909 movl %ebp,%ebp /* zero extension */
13910 pushq_cfi $__USER32_DS
13911 /*CFI_REL_OFFSET ss,0*/
13912@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
13913 CFI_REL_OFFSET rsp,0
13914 pushfq_cfi
13915 /*CFI_REL_OFFSET rflags,0*/
13916- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
13917- CFI_REGISTER rip,r10
13918+ orl $X86_EFLAGS_IF,(%rsp)
13919+ GET_THREAD_INFO(%r11)
13920+ movl TI_sysenter_return(%r11), %r11d
13921+ CFI_REGISTER rip,r11
13922 pushq_cfi $__USER32_CS
13923 /*CFI_REL_OFFSET cs,0*/
13924 movl %eax, %eax
13925- pushq_cfi %r10
13926+ pushq_cfi %r11
13927 CFI_REL_OFFSET rip,0
13928 pushq_cfi %rax
13929 cld
13930 SAVE_ARGS 0,1,0
13931+ pax_enter_kernel_user
13932+
13933+#ifdef CONFIG_PAX_RANDKSTACK
13934+ pax_erase_kstack
13935+#endif
13936+
13937+ /*
13938+ * No need to follow this irqs on/off section: the syscall
13939+ * disabled irqs, here we enable it straight after entry:
13940+ */
13941+ ENABLE_INTERRUPTS(CLBR_NONE)
13942 /* no need to do an access_ok check here because rbp has been
13943 32bit zero extended */
13944+
13945+#ifdef CONFIG_PAX_MEMORY_UDEREF
13946+ addq pax_user_shadow_base,%rbp
13947+ ASM_PAX_OPEN_USERLAND
13948+#endif
13949+
13950 ASM_STAC
13951 1: movl (%rbp),%ebp
13952 _ASM_EXTABLE(1b,ia32_badarg)
13953 ASM_CLAC
13954- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13955- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13956+
13957+#ifdef CONFIG_PAX_MEMORY_UDEREF
13958+ ASM_PAX_CLOSE_USERLAND
13959+#endif
13960+
13961+ GET_THREAD_INFO(%r11)
13962+ orl $TS_COMPAT,TI_status(%r11)
13963+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13964 CFI_REMEMBER_STATE
13965 jnz sysenter_tracesys
13966 cmpq $(IA32_NR_syscalls-1),%rax
13967@@ -162,15 +209,18 @@ sysenter_do_call:
13968 sysenter_dispatch:
13969 call *ia32_sys_call_table(,%rax,8)
13970 movq %rax,RAX-ARGOFFSET(%rsp)
13971+ GET_THREAD_INFO(%r11)
13972 DISABLE_INTERRUPTS(CLBR_NONE)
13973 TRACE_IRQS_OFF
13974- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13975+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
13976 jnz sysexit_audit
13977 sysexit_from_sys_call:
13978- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13979+ pax_exit_kernel_user
13980+ pax_erase_kstack
13981+ andl $~TS_COMPAT,TI_status(%r11)
13982 /* clear IF, that popfq doesn't enable interrupts early */
13983- andl $~0x200,EFLAGS-R11(%rsp)
13984- movl RIP-R11(%rsp),%edx /* User %eip */
13985+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
13986+ movl RIP(%rsp),%edx /* User %eip */
13987 CFI_REGISTER rip,rdx
13988 RESTORE_ARGS 0,24,0,0,0,0
13989 xorq %r8,%r8
13990@@ -193,6 +243,9 @@ sysexit_from_sys_call:
13991 movl %eax,%esi /* 2nd arg: syscall number */
13992 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
13993 call __audit_syscall_entry
13994+
13995+ pax_erase_kstack
13996+
13997 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
13998 cmpq $(IA32_NR_syscalls-1),%rax
13999 ja ia32_badsys
14000@@ -204,7 +257,7 @@ sysexit_from_sys_call:
14001 .endm
14002
14003 .macro auditsys_exit exit
14004- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14005+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14006 jnz ia32_ret_from_sys_call
14007 TRACE_IRQS_ON
14008 ENABLE_INTERRUPTS(CLBR_NONE)
14009@@ -215,11 +268,12 @@ sysexit_from_sys_call:
14010 1: setbe %al /* 1 if error, 0 if not */
14011 movzbl %al,%edi /* zero-extend that into %edi */
14012 call __audit_syscall_exit
14013+ GET_THREAD_INFO(%r11)
14014 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14015 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14016 DISABLE_INTERRUPTS(CLBR_NONE)
14017 TRACE_IRQS_OFF
14018- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14019+ testl %edi,TI_flags(%r11)
14020 jz \exit
14021 CLEAR_RREGS -ARGOFFSET
14022 jmp int_with_check
14023@@ -237,7 +291,7 @@ sysexit_audit:
14024
14025 sysenter_tracesys:
14026 #ifdef CONFIG_AUDITSYSCALL
14027- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14028+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14029 jz sysenter_auditsys
14030 #endif
14031 SAVE_REST
14032@@ -249,6 +303,9 @@ sysenter_tracesys:
14033 RESTORE_REST
14034 cmpq $(IA32_NR_syscalls-1),%rax
14035 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14036+
14037+ pax_erase_kstack
14038+
14039 jmp sysenter_do_call
14040 CFI_ENDPROC
14041 ENDPROC(ia32_sysenter_target)
14042@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
14043 ENTRY(ia32_cstar_target)
14044 CFI_STARTPROC32 simple
14045 CFI_SIGNAL_FRAME
14046- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14047+ CFI_DEF_CFA rsp,0
14048 CFI_REGISTER rip,rcx
14049 /*CFI_REGISTER rflags,r11*/
14050 SWAPGS_UNSAFE_STACK
14051 movl %esp,%r8d
14052 CFI_REGISTER rsp,r8
14053 movq PER_CPU_VAR(kernel_stack),%rsp
14054+ SAVE_ARGS 8*6,0,0
14055+ pax_enter_kernel_user
14056+
14057+#ifdef CONFIG_PAX_RANDKSTACK
14058+ pax_erase_kstack
14059+#endif
14060+
14061 /*
14062 * No need to follow this irqs on/off section: the syscall
14063 * disabled irqs and here we enable it straight after entry:
14064 */
14065 ENABLE_INTERRUPTS(CLBR_NONE)
14066- SAVE_ARGS 8,0,0
14067 movl %eax,%eax /* zero extension */
14068 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14069 movq %rcx,RIP-ARGOFFSET(%rsp)
14070@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
14071 /* no need to do an access_ok check here because r8 has been
14072 32bit zero extended */
14073 /* hardware stack frame is complete now */
14074+
14075+#ifdef CONFIG_PAX_MEMORY_UDEREF
14076+ ASM_PAX_OPEN_USERLAND
14077+ movq pax_user_shadow_base,%r8
14078+ addq RSP-ARGOFFSET(%rsp),%r8
14079+#endif
14080+
14081 ASM_STAC
14082 1: movl (%r8),%r9d
14083 _ASM_EXTABLE(1b,ia32_badarg)
14084 ASM_CLAC
14085- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14086- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14087+
14088+#ifdef CONFIG_PAX_MEMORY_UDEREF
14089+ ASM_PAX_CLOSE_USERLAND
14090+#endif
14091+
14092+ GET_THREAD_INFO(%r11)
14093+ orl $TS_COMPAT,TI_status(%r11)
14094+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14095 CFI_REMEMBER_STATE
14096 jnz cstar_tracesys
14097 cmpq $IA32_NR_syscalls-1,%rax
14098@@ -319,13 +395,16 @@ cstar_do_call:
14099 cstar_dispatch:
14100 call *ia32_sys_call_table(,%rax,8)
14101 movq %rax,RAX-ARGOFFSET(%rsp)
14102+ GET_THREAD_INFO(%r11)
14103 DISABLE_INTERRUPTS(CLBR_NONE)
14104 TRACE_IRQS_OFF
14105- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14106+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14107 jnz sysretl_audit
14108 sysretl_from_sys_call:
14109- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14110- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14111+ pax_exit_kernel_user
14112+ pax_erase_kstack
14113+ andl $~TS_COMPAT,TI_status(%r11)
14114+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14115 movl RIP-ARGOFFSET(%rsp),%ecx
14116 CFI_REGISTER rip,rcx
14117 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14118@@ -352,7 +431,7 @@ sysretl_audit:
14119
14120 cstar_tracesys:
14121 #ifdef CONFIG_AUDITSYSCALL
14122- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14123+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14124 jz cstar_auditsys
14125 #endif
14126 xchgl %r9d,%ebp
14127@@ -366,11 +445,19 @@ cstar_tracesys:
14128 xchgl %ebp,%r9d
14129 cmpq $(IA32_NR_syscalls-1),%rax
14130 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14131+
14132+ pax_erase_kstack
14133+
14134 jmp cstar_do_call
14135 END(ia32_cstar_target)
14136
14137 ia32_badarg:
14138 ASM_CLAC
14139+
14140+#ifdef CONFIG_PAX_MEMORY_UDEREF
14141+ ASM_PAX_CLOSE_USERLAND
14142+#endif
14143+
14144 movq $-EFAULT,%rax
14145 jmp ia32_sysret
14146 CFI_ENDPROC
14147@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
14148 CFI_REL_OFFSET rip,RIP-RIP
14149 PARAVIRT_ADJUST_EXCEPTION_FRAME
14150 SWAPGS
14151- /*
14152- * No need to follow this irqs on/off section: the syscall
14153- * disabled irqs and here we enable it straight after entry:
14154- */
14155- ENABLE_INTERRUPTS(CLBR_NONE)
14156 movl %eax,%eax
14157 pushq_cfi %rax
14158 cld
14159 /* note the registers are not zero extended to the sf.
14160 this could be a problem. */
14161 SAVE_ARGS 0,1,0
14162- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14163- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14164+ pax_enter_kernel_user
14165+
14166+#ifdef CONFIG_PAX_RANDKSTACK
14167+ pax_erase_kstack
14168+#endif
14169+
14170+ /*
14171+ * No need to follow this irqs on/off section: the syscall
14172+ * disabled irqs and here we enable it straight after entry:
14173+ */
14174+ ENABLE_INTERRUPTS(CLBR_NONE)
14175+ GET_THREAD_INFO(%r11)
14176+ orl $TS_COMPAT,TI_status(%r11)
14177+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14178 jnz ia32_tracesys
14179 cmpq $(IA32_NR_syscalls-1),%rax
14180 ja ia32_badsys
14181@@ -442,6 +536,9 @@ ia32_tracesys:
14182 RESTORE_REST
14183 cmpq $(IA32_NR_syscalls-1),%rax
14184 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
14185+
14186+ pax_erase_kstack
14187+
14188 jmp ia32_do_call
14189 END(ia32_syscall)
14190
14191diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
14192index 8e0ceec..af13504 100644
14193--- a/arch/x86/ia32/sys_ia32.c
14194+++ b/arch/x86/ia32/sys_ia32.c
14195@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
14196 */
14197 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
14198 {
14199- typeof(ubuf->st_uid) uid = 0;
14200- typeof(ubuf->st_gid) gid = 0;
14201+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
14202+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
14203 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
14204 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
14205 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
14206diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
14207index 372231c..51b537d 100644
14208--- a/arch/x86/include/asm/alternative-asm.h
14209+++ b/arch/x86/include/asm/alternative-asm.h
14210@@ -18,6 +18,45 @@
14211 .endm
14212 #endif
14213
14214+#ifdef KERNEXEC_PLUGIN
14215+ .macro pax_force_retaddr_bts rip=0
14216+ btsq $63,\rip(%rsp)
14217+ .endm
14218+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14219+ .macro pax_force_retaddr rip=0, reload=0
14220+ btsq $63,\rip(%rsp)
14221+ .endm
14222+ .macro pax_force_fptr ptr
14223+ btsq $63,\ptr
14224+ .endm
14225+ .macro pax_set_fptr_mask
14226+ .endm
14227+#endif
14228+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
14229+ .macro pax_force_retaddr rip=0, reload=0
14230+ .if \reload
14231+ pax_set_fptr_mask
14232+ .endif
14233+ orq %r12,\rip(%rsp)
14234+ .endm
14235+ .macro pax_force_fptr ptr
14236+ orq %r12,\ptr
14237+ .endm
14238+ .macro pax_set_fptr_mask
14239+ movabs $0x8000000000000000,%r12
14240+ .endm
14241+#endif
14242+#else
14243+ .macro pax_force_retaddr rip=0, reload=0
14244+ .endm
14245+ .macro pax_force_fptr ptr
14246+ .endm
14247+ .macro pax_force_retaddr_bts rip=0
14248+ .endm
14249+ .macro pax_set_fptr_mask
14250+ .endm
14251+#endif
14252+
14253 .macro altinstruction_entry orig alt feature orig_len alt_len
14254 .long \orig - .
14255 .long \alt - .
14256diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
14257index 0a3f9c9..c9d081d 100644
14258--- a/arch/x86/include/asm/alternative.h
14259+++ b/arch/x86/include/asm/alternative.h
14260@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
14261 ".pushsection .discard,\"aw\",@progbits\n" \
14262 DISCARD_ENTRY(1) \
14263 ".popsection\n" \
14264- ".pushsection .altinstr_replacement, \"ax\"\n" \
14265+ ".pushsection .altinstr_replacement, \"a\"\n" \
14266 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
14267 ".popsection"
14268
14269@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
14270 DISCARD_ENTRY(1) \
14271 DISCARD_ENTRY(2) \
14272 ".popsection\n" \
14273- ".pushsection .altinstr_replacement, \"ax\"\n" \
14274+ ".pushsection .altinstr_replacement, \"a\"\n" \
14275 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
14276 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
14277 ".popsection"
14278diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
14279index 1d2091a..f5074c1 100644
14280--- a/arch/x86/include/asm/apic.h
14281+++ b/arch/x86/include/asm/apic.h
14282@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
14283
14284 #ifdef CONFIG_X86_LOCAL_APIC
14285
14286-extern unsigned int apic_verbosity;
14287+extern int apic_verbosity;
14288 extern int local_apic_timer_c2_ok;
14289
14290 extern int disable_apic;
14291diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
14292index 20370c6..a2eb9b0 100644
14293--- a/arch/x86/include/asm/apm.h
14294+++ b/arch/x86/include/asm/apm.h
14295@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
14296 __asm__ __volatile__(APM_DO_ZERO_SEGS
14297 "pushl %%edi\n\t"
14298 "pushl %%ebp\n\t"
14299- "lcall *%%cs:apm_bios_entry\n\t"
14300+ "lcall *%%ss:apm_bios_entry\n\t"
14301 "setc %%al\n\t"
14302 "popl %%ebp\n\t"
14303 "popl %%edi\n\t"
14304@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
14305 __asm__ __volatile__(APM_DO_ZERO_SEGS
14306 "pushl %%edi\n\t"
14307 "pushl %%ebp\n\t"
14308- "lcall *%%cs:apm_bios_entry\n\t"
14309+ "lcall *%%ss:apm_bios_entry\n\t"
14310 "setc %%bl\n\t"
14311 "popl %%ebp\n\t"
14312 "popl %%edi\n\t"
14313diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
14314index b17f4f4..9620151 100644
14315--- a/arch/x86/include/asm/atomic.h
14316+++ b/arch/x86/include/asm/atomic.h
14317@@ -23,7 +23,18 @@
14318 */
14319 static inline int atomic_read(const atomic_t *v)
14320 {
14321- return (*(volatile int *)&(v)->counter);
14322+ return (*(volatile const int *)&(v)->counter);
14323+}
14324+
14325+/**
14326+ * atomic_read_unchecked - read atomic variable
14327+ * @v: pointer of type atomic_unchecked_t
14328+ *
14329+ * Atomically reads the value of @v.
14330+ */
14331+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
14332+{
14333+ return (*(volatile const int *)&(v)->counter);
14334 }
14335
14336 /**
14337@@ -39,6 +50,18 @@ static inline void atomic_set(atomic_t *v, int i)
14338 }
14339
14340 /**
14341+ * atomic_set_unchecked - set atomic variable
14342+ * @v: pointer of type atomic_unchecked_t
14343+ * @i: required value
14344+ *
14345+ * Atomically sets the value of @v to @i.
14346+ */
14347+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
14348+{
14349+ v->counter = i;
14350+}
14351+
14352+/**
14353 * atomic_add - add integer to atomic variable
14354 * @i: integer value to add
14355 * @v: pointer of type atomic_t
14356@@ -47,7 +70,29 @@ static inline void atomic_set(atomic_t *v, int i)
14357 */
14358 static inline void atomic_add(int i, atomic_t *v)
14359 {
14360- asm volatile(LOCK_PREFIX "addl %1,%0"
14361+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
14362+
14363+#ifdef CONFIG_PAX_REFCOUNT
14364+ "jno 0f\n"
14365+ LOCK_PREFIX "subl %1,%0\n"
14366+ "int $4\n0:\n"
14367+ _ASM_EXTABLE(0b, 0b)
14368+#endif
14369+
14370+ : "+m" (v->counter)
14371+ : "ir" (i));
14372+}
14373+
14374+/**
14375+ * atomic_add_unchecked - add integer to atomic variable
14376+ * @i: integer value to add
14377+ * @v: pointer of type atomic_unchecked_t
14378+ *
14379+ * Atomically adds @i to @v.
14380+ */
14381+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
14382+{
14383+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
14384 : "+m" (v->counter)
14385 : "ir" (i));
14386 }
14387@@ -61,7 +106,29 @@ static inline void atomic_add(int i, atomic_t *v)
14388 */
14389 static inline void atomic_sub(int i, atomic_t *v)
14390 {
14391- asm volatile(LOCK_PREFIX "subl %1,%0"
14392+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
14393+
14394+#ifdef CONFIG_PAX_REFCOUNT
14395+ "jno 0f\n"
14396+ LOCK_PREFIX "addl %1,%0\n"
14397+ "int $4\n0:\n"
14398+ _ASM_EXTABLE(0b, 0b)
14399+#endif
14400+
14401+ : "+m" (v->counter)
14402+ : "ir" (i));
14403+}
14404+
14405+/**
14406+ * atomic_sub_unchecked - subtract integer from atomic variable
14407+ * @i: integer value to subtract
14408+ * @v: pointer of type atomic_unchecked_t
14409+ *
14410+ * Atomically subtracts @i from @v.
14411+ */
14412+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
14413+{
14414+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
14415 : "+m" (v->counter)
14416 : "ir" (i));
14417 }
14418@@ -77,7 +144,7 @@ static inline void atomic_sub(int i, atomic_t *v)
14419 */
14420 static inline int atomic_sub_and_test(int i, atomic_t *v)
14421 {
14422- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
14423+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
14424 }
14425
14426 /**
14427@@ -88,7 +155,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
14428 */
14429 static inline void atomic_inc(atomic_t *v)
14430 {
14431- asm volatile(LOCK_PREFIX "incl %0"
14432+ asm volatile(LOCK_PREFIX "incl %0\n"
14433+
14434+#ifdef CONFIG_PAX_REFCOUNT
14435+ "jno 0f\n"
14436+ LOCK_PREFIX "decl %0\n"
14437+ "int $4\n0:\n"
14438+ _ASM_EXTABLE(0b, 0b)
14439+#endif
14440+
14441+ : "+m" (v->counter));
14442+}
14443+
14444+/**
14445+ * atomic_inc_unchecked - increment atomic variable
14446+ * @v: pointer of type atomic_unchecked_t
14447+ *
14448+ * Atomically increments @v by 1.
14449+ */
14450+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
14451+{
14452+ asm volatile(LOCK_PREFIX "incl %0\n"
14453 : "+m" (v->counter));
14454 }
14455
14456@@ -100,7 +187,27 @@ static inline void atomic_inc(atomic_t *v)
14457 */
14458 static inline void atomic_dec(atomic_t *v)
14459 {
14460- asm volatile(LOCK_PREFIX "decl %0"
14461+ asm volatile(LOCK_PREFIX "decl %0\n"
14462+
14463+#ifdef CONFIG_PAX_REFCOUNT
14464+ "jno 0f\n"
14465+ LOCK_PREFIX "incl %0\n"
14466+ "int $4\n0:\n"
14467+ _ASM_EXTABLE(0b, 0b)
14468+#endif
14469+
14470+ : "+m" (v->counter));
14471+}
14472+
14473+/**
14474+ * atomic_dec_unchecked - decrement atomic variable
14475+ * @v: pointer of type atomic_unchecked_t
14476+ *
14477+ * Atomically decrements @v by 1.
14478+ */
14479+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
14480+{
14481+ asm volatile(LOCK_PREFIX "decl %0\n"
14482 : "+m" (v->counter));
14483 }
14484
14485@@ -114,7 +221,7 @@ static inline void atomic_dec(atomic_t *v)
14486 */
14487 static inline int atomic_dec_and_test(atomic_t *v)
14488 {
14489- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
14490+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
14491 }
14492
14493 /**
14494@@ -127,7 +234,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
14495 */
14496 static inline int atomic_inc_and_test(atomic_t *v)
14497 {
14498- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
14499+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
14500+}
14501+
14502+/**
14503+ * atomic_inc_and_test_unchecked - increment and test
14504+ * @v: pointer of type atomic_unchecked_t
14505+ *
14506+ * Atomically increments @v by 1
14507+ * and returns true if the result is zero, or false for all
14508+ * other cases.
14509+ */
14510+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
14511+{
14512+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
14513 }
14514
14515 /**
14516@@ -141,7 +261,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
14517 */
14518 static inline int atomic_add_negative(int i, atomic_t *v)
14519 {
14520- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
14521+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
14522 }
14523
14524 /**
14525@@ -153,6 +273,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
14526 */
14527 static inline int atomic_add_return(int i, atomic_t *v)
14528 {
14529+ return i + xadd_check_overflow(&v->counter, i);
14530+}
14531+
14532+/**
14533+ * atomic_add_return_unchecked - add integer and return
14534+ * @i: integer value to add
14535+ * @v: pointer of type atomic_unchecked_t
14536+ *
14537+ * Atomically adds @i to @v and returns @i + @v
14538+ */
14539+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
14540+{
14541 return i + xadd(&v->counter, i);
14542 }
14543
14544@@ -169,9 +301,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
14545 }
14546
14547 #define atomic_inc_return(v) (atomic_add_return(1, v))
14548+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
14549+{
14550+ return atomic_add_return_unchecked(1, v);
14551+}
14552 #define atomic_dec_return(v) (atomic_sub_return(1, v))
14553
14554-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
14555+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
14556+{
14557+ return cmpxchg(&v->counter, old, new);
14558+}
14559+
14560+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
14561 {
14562 return cmpxchg(&v->counter, old, new);
14563 }
14564@@ -181,6 +322,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
14565 return xchg(&v->counter, new);
14566 }
14567
14568+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
14569+{
14570+ return xchg(&v->counter, new);
14571+}
14572+
14573 /**
14574 * __atomic_add_unless - add unless the number is already a given value
14575 * @v: pointer of type atomic_t
14576@@ -190,14 +336,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
14577 * Atomically adds @a to @v, so long as @v was not already @u.
14578 * Returns the old value of @v.
14579 */
14580-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14581+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
14582 {
14583- int c, old;
14584+ int c, old, new;
14585 c = atomic_read(v);
14586 for (;;) {
14587- if (unlikely(c == (u)))
14588+ if (unlikely(c == u))
14589 break;
14590- old = atomic_cmpxchg((v), c, c + (a));
14591+
14592+ asm volatile("addl %2,%0\n"
14593+
14594+#ifdef CONFIG_PAX_REFCOUNT
14595+ "jno 0f\n"
14596+ "subl %2,%0\n"
14597+ "int $4\n0:\n"
14598+ _ASM_EXTABLE(0b, 0b)
14599+#endif
14600+
14601+ : "=r" (new)
14602+ : "0" (c), "ir" (a));
14603+
14604+ old = atomic_cmpxchg(v, c, new);
14605 if (likely(old == c))
14606 break;
14607 c = old;
14608@@ -206,6 +365,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14609 }
14610
14611 /**
14612+ * atomic_inc_not_zero_hint - increment if not null
14613+ * @v: pointer of type atomic_t
14614+ * @hint: probable value of the atomic before the increment
14615+ *
14616+ * This version of atomic_inc_not_zero() gives a hint of probable
14617+ * value of the atomic. This helps processor to not read the memory
14618+ * before doing the atomic read/modify/write cycle, lowering
14619+ * number of bus transactions on some arches.
14620+ *
14621+ * Returns: 0 if increment was not done, 1 otherwise.
14622+ */
14623+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
14624+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
14625+{
14626+ int val, c = hint, new;
14627+
14628+ /* sanity test, should be removed by compiler if hint is a constant */
14629+ if (!hint)
14630+ return __atomic_add_unless(v, 1, 0);
14631+
14632+ do {
14633+ asm volatile("incl %0\n"
14634+
14635+#ifdef CONFIG_PAX_REFCOUNT
14636+ "jno 0f\n"
14637+ "decl %0\n"
14638+ "int $4\n0:\n"
14639+ _ASM_EXTABLE(0b, 0b)
14640+#endif
14641+
14642+ : "=r" (new)
14643+ : "0" (c));
14644+
14645+ val = atomic_cmpxchg(v, c, new);
14646+ if (val == c)
14647+ return 1;
14648+ c = val;
14649+ } while (c);
14650+
14651+ return 0;
14652+}
14653+
14654+/**
14655 * atomic_inc_short - increment of a short integer
14656 * @v: pointer to type int
14657 *
14658@@ -234,14 +436,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
14659 #endif
14660
14661 /* These are x86-specific, used by some header files */
14662-#define atomic_clear_mask(mask, addr) \
14663- asm volatile(LOCK_PREFIX "andl %0,%1" \
14664- : : "r" (~(mask)), "m" (*(addr)) : "memory")
14665+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
14666+{
14667+ asm volatile(LOCK_PREFIX "andl %1,%0"
14668+ : "+m" (v->counter)
14669+ : "r" (~(mask))
14670+ : "memory");
14671+}
14672
14673-#define atomic_set_mask(mask, addr) \
14674- asm volatile(LOCK_PREFIX "orl %0,%1" \
14675- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
14676- : "memory")
14677+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14678+{
14679+ asm volatile(LOCK_PREFIX "andl %1,%0"
14680+ : "+m" (v->counter)
14681+ : "r" (~(mask))
14682+ : "memory");
14683+}
14684+
14685+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
14686+{
14687+ asm volatile(LOCK_PREFIX "orl %1,%0"
14688+ : "+m" (v->counter)
14689+ : "r" (mask)
14690+ : "memory");
14691+}
14692+
14693+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14694+{
14695+ asm volatile(LOCK_PREFIX "orl %1,%0"
14696+ : "+m" (v->counter)
14697+ : "r" (mask)
14698+ : "memory");
14699+}
14700
14701 /* Atomic operations are already serializing on x86 */
14702 #define smp_mb__before_atomic_dec() barrier()
14703diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
14704index b154de7..bf18a5a 100644
14705--- a/arch/x86/include/asm/atomic64_32.h
14706+++ b/arch/x86/include/asm/atomic64_32.h
14707@@ -12,6 +12,14 @@ typedef struct {
14708 u64 __aligned(8) counter;
14709 } atomic64_t;
14710
14711+#ifdef CONFIG_PAX_REFCOUNT
14712+typedef struct {
14713+ u64 __aligned(8) counter;
14714+} atomic64_unchecked_t;
14715+#else
14716+typedef atomic64_t atomic64_unchecked_t;
14717+#endif
14718+
14719 #define ATOMIC64_INIT(val) { (val) }
14720
14721 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
14722@@ -37,21 +45,31 @@ typedef struct {
14723 ATOMIC64_DECL_ONE(sym##_386)
14724
14725 ATOMIC64_DECL_ONE(add_386);
14726+ATOMIC64_DECL_ONE(add_unchecked_386);
14727 ATOMIC64_DECL_ONE(sub_386);
14728+ATOMIC64_DECL_ONE(sub_unchecked_386);
14729 ATOMIC64_DECL_ONE(inc_386);
14730+ATOMIC64_DECL_ONE(inc_unchecked_386);
14731 ATOMIC64_DECL_ONE(dec_386);
14732+ATOMIC64_DECL_ONE(dec_unchecked_386);
14733 #endif
14734
14735 #define alternative_atomic64(f, out, in...) \
14736 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
14737
14738 ATOMIC64_DECL(read);
14739+ATOMIC64_DECL(read_unchecked);
14740 ATOMIC64_DECL(set);
14741+ATOMIC64_DECL(set_unchecked);
14742 ATOMIC64_DECL(xchg);
14743 ATOMIC64_DECL(add_return);
14744+ATOMIC64_DECL(add_return_unchecked);
14745 ATOMIC64_DECL(sub_return);
14746+ATOMIC64_DECL(sub_return_unchecked);
14747 ATOMIC64_DECL(inc_return);
14748+ATOMIC64_DECL(inc_return_unchecked);
14749 ATOMIC64_DECL(dec_return);
14750+ATOMIC64_DECL(dec_return_unchecked);
14751 ATOMIC64_DECL(dec_if_positive);
14752 ATOMIC64_DECL(inc_not_zero);
14753 ATOMIC64_DECL(add_unless);
14754@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
14755 }
14756
14757 /**
14758+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
14759+ * @p: pointer to type atomic64_unchecked_t
14760+ * @o: expected value
14761+ * @n: new value
14762+ *
14763+ * Atomically sets @v to @n if it was equal to @o and returns
14764+ * the old value.
14765+ */
14766+
14767+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
14768+{
14769+ return cmpxchg64(&v->counter, o, n);
14770+}
14771+
14772+/**
14773 * atomic64_xchg - xchg atomic64 variable
14774 * @v: pointer to type atomic64_t
14775 * @n: value to assign
14776@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
14777 }
14778
14779 /**
14780+ * atomic64_set_unchecked - set atomic64 variable
14781+ * @v: pointer to type atomic64_unchecked_t
14782+ * @n: value to assign
14783+ *
14784+ * Atomically sets the value of @v to @n.
14785+ */
14786+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
14787+{
14788+ unsigned high = (unsigned)(i >> 32);
14789+ unsigned low = (unsigned)i;
14790+ alternative_atomic64(set, /* no output */,
14791+ "S" (v), "b" (low), "c" (high)
14792+ : "eax", "edx", "memory");
14793+}
14794+
14795+/**
14796 * atomic64_read - read atomic64 variable
14797 * @v: pointer to type atomic64_t
14798 *
14799@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
14800 }
14801
14802 /**
14803+ * atomic64_read_unchecked - read atomic64 variable
14804+ * @v: pointer to type atomic64_unchecked_t
14805+ *
14806+ * Atomically reads the value of @v and returns it.
14807+ */
14808+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
14809+{
14810+ long long r;
14811+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
14812+ return r;
14813+ }
14814+
14815+/**
14816 * atomic64_add_return - add and return
14817 * @i: integer value to add
14818 * @v: pointer to type atomic64_t
14819@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
14820 return i;
14821 }
14822
14823+/**
14824+ * atomic64_add_return_unchecked - add and return
14825+ * @i: integer value to add
14826+ * @v: pointer to type atomic64_unchecked_t
14827+ *
14828+ * Atomically adds @i to @v and returns @i + *@v
14829+ */
14830+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
14831+{
14832+ alternative_atomic64(add_return_unchecked,
14833+ ASM_OUTPUT2("+A" (i), "+c" (v)),
14834+ ASM_NO_INPUT_CLOBBER("memory"));
14835+ return i;
14836+}
14837+
14838 /*
14839 * Other variants with different arithmetic operators:
14840 */
14841@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
14842 return a;
14843 }
14844
14845+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
14846+{
14847+ long long a;
14848+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
14849+ "S" (v) : "memory", "ecx");
14850+ return a;
14851+}
14852+
14853 static inline long long atomic64_dec_return(atomic64_t *v)
14854 {
14855 long long a;
14856@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
14857 }
14858
14859 /**
14860+ * atomic64_add_unchecked - add integer to atomic64 variable
14861+ * @i: integer value to add
14862+ * @v: pointer to type atomic64_unchecked_t
14863+ *
14864+ * Atomically adds @i to @v.
14865+ */
14866+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
14867+{
14868+ __alternative_atomic64(add_unchecked, add_return_unchecked,
14869+ ASM_OUTPUT2("+A" (i), "+c" (v)),
14870+ ASM_NO_INPUT_CLOBBER("memory"));
14871+ return i;
14872+}
14873+
14874+/**
14875 * atomic64_sub - subtract the atomic64 variable
14876 * @i: integer value to subtract
14877 * @v: pointer to type atomic64_t
14878diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
14879index 46e9052..ae45136 100644
14880--- a/arch/x86/include/asm/atomic64_64.h
14881+++ b/arch/x86/include/asm/atomic64_64.h
14882@@ -18,7 +18,19 @@
14883 */
14884 static inline long atomic64_read(const atomic64_t *v)
14885 {
14886- return (*(volatile long *)&(v)->counter);
14887+ return (*(volatile const long *)&(v)->counter);
14888+}
14889+
14890+/**
14891+ * atomic64_read_unchecked - read atomic64 variable
14892+ * @v: pointer of type atomic64_unchecked_t
14893+ *
14894+ * Atomically reads the value of @v.
14895+ * Doesn't imply a read memory barrier.
14896+ */
14897+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
14898+{
14899+ return (*(volatile const long *)&(v)->counter);
14900 }
14901
14902 /**
14903@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
14904 }
14905
14906 /**
14907+ * atomic64_set_unchecked - set atomic64 variable
14908+ * @v: pointer to type atomic64_unchecked_t
14909+ * @i: required value
14910+ *
14911+ * Atomically sets the value of @v to @i.
14912+ */
14913+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
14914+{
14915+ v->counter = i;
14916+}
14917+
14918+/**
14919 * atomic64_add - add integer to atomic64 variable
14920 * @i: integer value to add
14921 * @v: pointer to type atomic64_t
14922@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
14923 */
14924 static inline void atomic64_add(long i, atomic64_t *v)
14925 {
14926+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
14927+
14928+#ifdef CONFIG_PAX_REFCOUNT
14929+ "jno 0f\n"
14930+ LOCK_PREFIX "subq %1,%0\n"
14931+ "int $4\n0:\n"
14932+ _ASM_EXTABLE(0b, 0b)
14933+#endif
14934+
14935+ : "=m" (v->counter)
14936+ : "er" (i), "m" (v->counter));
14937+}
14938+
14939+/**
14940+ * atomic64_add_unchecked - add integer to atomic64 variable
14941+ * @i: integer value to add
14942+ * @v: pointer to type atomic64_unchecked_t
14943+ *
14944+ * Atomically adds @i to @v.
14945+ */
14946+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
14947+{
14948 asm volatile(LOCK_PREFIX "addq %1,%0"
14949 : "=m" (v->counter)
14950 : "er" (i), "m" (v->counter));
14951@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
14952 */
14953 static inline void atomic64_sub(long i, atomic64_t *v)
14954 {
14955- asm volatile(LOCK_PREFIX "subq %1,%0"
14956+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
14957+
14958+#ifdef CONFIG_PAX_REFCOUNT
14959+ "jno 0f\n"
14960+ LOCK_PREFIX "addq %1,%0\n"
14961+ "int $4\n0:\n"
14962+ _ASM_EXTABLE(0b, 0b)
14963+#endif
14964+
14965+ : "=m" (v->counter)
14966+ : "er" (i), "m" (v->counter));
14967+}
14968+
14969+/**
14970+ * atomic64_sub_unchecked - subtract the atomic64 variable
14971+ * @i: integer value to subtract
14972+ * @v: pointer to type atomic64_unchecked_t
14973+ *
14974+ * Atomically subtracts @i from @v.
14975+ */
14976+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
14977+{
14978+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
14979 : "=m" (v->counter)
14980 : "er" (i), "m" (v->counter));
14981 }
14982@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
14983 */
14984 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
14985 {
14986- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
14987+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
14988 }
14989
14990 /**
14991@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
14992 */
14993 static inline void atomic64_inc(atomic64_t *v)
14994 {
14995+ asm volatile(LOCK_PREFIX "incq %0\n"
14996+
14997+#ifdef CONFIG_PAX_REFCOUNT
14998+ "jno 0f\n"
14999+ LOCK_PREFIX "decq %0\n"
15000+ "int $4\n0:\n"
15001+ _ASM_EXTABLE(0b, 0b)
15002+#endif
15003+
15004+ : "=m" (v->counter)
15005+ : "m" (v->counter));
15006+}
15007+
15008+/**
15009+ * atomic64_inc_unchecked - increment atomic64 variable
15010+ * @v: pointer to type atomic64_unchecked_t
15011+ *
15012+ * Atomically increments @v by 1.
15013+ */
15014+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15015+{
15016 asm volatile(LOCK_PREFIX "incq %0"
15017 : "=m" (v->counter)
15018 : "m" (v->counter));
15019@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15020 */
15021 static inline void atomic64_dec(atomic64_t *v)
15022 {
15023- asm volatile(LOCK_PREFIX "decq %0"
15024+ asm volatile(LOCK_PREFIX "decq %0\n"
15025+
15026+#ifdef CONFIG_PAX_REFCOUNT
15027+ "jno 0f\n"
15028+ LOCK_PREFIX "incq %0\n"
15029+ "int $4\n0:\n"
15030+ _ASM_EXTABLE(0b, 0b)
15031+#endif
15032+
15033+ : "=m" (v->counter)
15034+ : "m" (v->counter));
15035+}
15036+
15037+/**
15038+ * atomic64_dec_unchecked - decrement atomic64 variable
15039+ * @v: pointer to type atomic64_t
15040+ *
15041+ * Atomically decrements @v by 1.
15042+ */
15043+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15044+{
15045+ asm volatile(LOCK_PREFIX "decq %0\n"
15046 : "=m" (v->counter)
15047 : "m" (v->counter));
15048 }
15049@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15050 */
15051 static inline int atomic64_dec_and_test(atomic64_t *v)
15052 {
15053- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15054+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15055 }
15056
15057 /**
15058@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15059 */
15060 static inline int atomic64_inc_and_test(atomic64_t *v)
15061 {
15062- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15063+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15064 }
15065
15066 /**
15067@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15068 */
15069 static inline int atomic64_add_negative(long i, atomic64_t *v)
15070 {
15071- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15072+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15073 }
15074
15075 /**
15076@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15077 */
15078 static inline long atomic64_add_return(long i, atomic64_t *v)
15079 {
15080+ return i + xadd_check_overflow(&v->counter, i);
15081+}
15082+
15083+/**
15084+ * atomic64_add_return_unchecked - add and return
15085+ * @i: integer value to add
15086+ * @v: pointer to type atomic64_unchecked_t
15087+ *
15088+ * Atomically adds @i to @v and returns @i + @v
15089+ */
15090+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15091+{
15092 return i + xadd(&v->counter, i);
15093 }
15094
15095@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15096 }
15097
15098 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15099+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15100+{
15101+ return atomic64_add_return_unchecked(1, v);
15102+}
15103 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15104
15105 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15106@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15107 return cmpxchg(&v->counter, old, new);
15108 }
15109
15110+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15111+{
15112+ return cmpxchg(&v->counter, old, new);
15113+}
15114+
15115 static inline long atomic64_xchg(atomic64_t *v, long new)
15116 {
15117 return xchg(&v->counter, new);
15118@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15119 */
15120 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15121 {
15122- long c, old;
15123+ long c, old, new;
15124 c = atomic64_read(v);
15125 for (;;) {
15126- if (unlikely(c == (u)))
15127+ if (unlikely(c == u))
15128 break;
15129- old = atomic64_cmpxchg((v), c, c + (a));
15130+
15131+ asm volatile("add %2,%0\n"
15132+
15133+#ifdef CONFIG_PAX_REFCOUNT
15134+ "jno 0f\n"
15135+ "sub %2,%0\n"
15136+ "int $4\n0:\n"
15137+ _ASM_EXTABLE(0b, 0b)
15138+#endif
15139+
15140+ : "=r" (new)
15141+ : "0" (c), "ir" (a));
15142+
15143+ old = atomic64_cmpxchg(v, c, new);
15144 if (likely(old == c))
15145 break;
15146 c = old;
15147 }
15148- return c != (u);
15149+ return c != u;
15150 }
15151
15152 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
15153diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
15154index 9fc1af7..fc71228 100644
15155--- a/arch/x86/include/asm/bitops.h
15156+++ b/arch/x86/include/asm/bitops.h
15157@@ -49,7 +49,7 @@
15158 * a mask operation on a byte.
15159 */
15160 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
15161-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
15162+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
15163 #define CONST_MASK(nr) (1 << ((nr) & 7))
15164
15165 /**
15166@@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
15167 */
15168 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
15169 {
15170- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
15171+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
15172 }
15173
15174 /**
15175@@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
15176 */
15177 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
15178 {
15179- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
15180+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
15181 }
15182
15183 /**
15184@@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
15185 */
15186 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
15187 {
15188- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
15189+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
15190 }
15191
15192 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
15193@@ -345,7 +345,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
15194 *
15195 * Undefined if no bit exists, so code should check against 0 first.
15196 */
15197-static inline unsigned long __ffs(unsigned long word)
15198+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
15199 {
15200 asm("rep; bsf %1,%0"
15201 : "=r" (word)
15202@@ -359,7 +359,7 @@ static inline unsigned long __ffs(unsigned long word)
15203 *
15204 * Undefined if no zero exists, so code should check against ~0UL first.
15205 */
15206-static inline unsigned long ffz(unsigned long word)
15207+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
15208 {
15209 asm("rep; bsf %1,%0"
15210 : "=r" (word)
15211@@ -373,7 +373,7 @@ static inline unsigned long ffz(unsigned long word)
15212 *
15213 * Undefined if no set bit exists, so code should check against 0 first.
15214 */
15215-static inline unsigned long __fls(unsigned long word)
15216+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
15217 {
15218 asm("bsr %1,%0"
15219 : "=r" (word)
15220@@ -436,7 +436,7 @@ static inline int ffs(int x)
15221 * set bit if value is nonzero. The last (most significant) bit is
15222 * at position 32.
15223 */
15224-static inline int fls(int x)
15225+static inline int __intentional_overflow(-1) fls(int x)
15226 {
15227 int r;
15228
15229@@ -478,7 +478,7 @@ static inline int fls(int x)
15230 * at position 64.
15231 */
15232 #ifdef CONFIG_X86_64
15233-static __always_inline int fls64(__u64 x)
15234+static __always_inline long fls64(__u64 x)
15235 {
15236 int bitpos = -1;
15237 /*
15238diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
15239index 4fa687a..60f2d39 100644
15240--- a/arch/x86/include/asm/boot.h
15241+++ b/arch/x86/include/asm/boot.h
15242@@ -6,10 +6,15 @@
15243 #include <uapi/asm/boot.h>
15244
15245 /* Physical address where kernel should be loaded. */
15246-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15247+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15248 + (CONFIG_PHYSICAL_ALIGN - 1)) \
15249 & ~(CONFIG_PHYSICAL_ALIGN - 1))
15250
15251+#ifndef __ASSEMBLY__
15252+extern unsigned char __LOAD_PHYSICAL_ADDR[];
15253+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
15254+#endif
15255+
15256 /* Minimum kernel alignment, as a power of two */
15257 #ifdef CONFIG_X86_64
15258 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
15259diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
15260index 48f99f1..d78ebf9 100644
15261--- a/arch/x86/include/asm/cache.h
15262+++ b/arch/x86/include/asm/cache.h
15263@@ -5,12 +5,13 @@
15264
15265 /* L1 cache line size */
15266 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
15267-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
15268+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
15269
15270 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
15271+#define __read_only __attribute__((__section__(".data..read_only")))
15272
15273 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
15274-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
15275+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
15276
15277 #ifdef CONFIG_X86_VSMP
15278 #ifdef CONFIG_SMP
15279diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
15280index 9863ee3..4a1f8e1 100644
15281--- a/arch/x86/include/asm/cacheflush.h
15282+++ b/arch/x86/include/asm/cacheflush.h
15283@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
15284 unsigned long pg_flags = pg->flags & _PGMT_MASK;
15285
15286 if (pg_flags == _PGMT_DEFAULT)
15287- return -1;
15288+ return ~0UL;
15289 else if (pg_flags == _PGMT_WC)
15290 return _PAGE_CACHE_WC;
15291 else if (pg_flags == _PGMT_UC_MINUS)
15292diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
15293index cb4c73b..c473c29 100644
15294--- a/arch/x86/include/asm/calling.h
15295+++ b/arch/x86/include/asm/calling.h
15296@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
15297 #define RSP 152
15298 #define SS 160
15299
15300-#define ARGOFFSET R11
15301-#define SWFRAME ORIG_RAX
15302+#define ARGOFFSET R15
15303
15304 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
15305- subq $9*8+\addskip, %rsp
15306- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
15307- movq_cfi rdi, 8*8
15308- movq_cfi rsi, 7*8
15309- movq_cfi rdx, 6*8
15310+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
15311+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
15312+ movq_cfi rdi, RDI
15313+ movq_cfi rsi, RSI
15314+ movq_cfi rdx, RDX
15315
15316 .if \save_rcx
15317- movq_cfi rcx, 5*8
15318+ movq_cfi rcx, RCX
15319 .endif
15320
15321- movq_cfi rax, 4*8
15322+ movq_cfi rax, RAX
15323
15324 .if \save_r891011
15325- movq_cfi r8, 3*8
15326- movq_cfi r9, 2*8
15327- movq_cfi r10, 1*8
15328- movq_cfi r11, 0*8
15329+ movq_cfi r8, R8
15330+ movq_cfi r9, R9
15331+ movq_cfi r10, R10
15332+ movq_cfi r11, R11
15333 .endif
15334
15335+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15336+ movq_cfi r12, R12
15337+#endif
15338+
15339 .endm
15340
15341-#define ARG_SKIP (9*8)
15342+#define ARG_SKIP ORIG_RAX
15343
15344 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
15345 rstor_r8910=1, rstor_rdx=1
15346+
15347+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15348+ movq_cfi_restore R12, r12
15349+#endif
15350+
15351 .if \rstor_r11
15352- movq_cfi_restore 0*8, r11
15353+ movq_cfi_restore R11, r11
15354 .endif
15355
15356 .if \rstor_r8910
15357- movq_cfi_restore 1*8, r10
15358- movq_cfi_restore 2*8, r9
15359- movq_cfi_restore 3*8, r8
15360+ movq_cfi_restore R10, r10
15361+ movq_cfi_restore R9, r9
15362+ movq_cfi_restore R8, r8
15363 .endif
15364
15365 .if \rstor_rax
15366- movq_cfi_restore 4*8, rax
15367+ movq_cfi_restore RAX, rax
15368 .endif
15369
15370 .if \rstor_rcx
15371- movq_cfi_restore 5*8, rcx
15372+ movq_cfi_restore RCX, rcx
15373 .endif
15374
15375 .if \rstor_rdx
15376- movq_cfi_restore 6*8, rdx
15377+ movq_cfi_restore RDX, rdx
15378 .endif
15379
15380- movq_cfi_restore 7*8, rsi
15381- movq_cfi_restore 8*8, rdi
15382+ movq_cfi_restore RSI, rsi
15383+ movq_cfi_restore RDI, rdi
15384
15385- .if ARG_SKIP+\addskip > 0
15386- addq $ARG_SKIP+\addskip, %rsp
15387- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
15388+ .if ORIG_RAX+\addskip > 0
15389+ addq $ORIG_RAX+\addskip, %rsp
15390+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
15391 .endif
15392 .endm
15393
15394- .macro LOAD_ARGS offset, skiprax=0
15395- movq \offset(%rsp), %r11
15396- movq \offset+8(%rsp), %r10
15397- movq \offset+16(%rsp), %r9
15398- movq \offset+24(%rsp), %r8
15399- movq \offset+40(%rsp), %rcx
15400- movq \offset+48(%rsp), %rdx
15401- movq \offset+56(%rsp), %rsi
15402- movq \offset+64(%rsp), %rdi
15403+ .macro LOAD_ARGS skiprax=0
15404+ movq R11(%rsp), %r11
15405+ movq R10(%rsp), %r10
15406+ movq R9(%rsp), %r9
15407+ movq R8(%rsp), %r8
15408+ movq RCX(%rsp), %rcx
15409+ movq RDX(%rsp), %rdx
15410+ movq RSI(%rsp), %rsi
15411+ movq RDI(%rsp), %rdi
15412 .if \skiprax
15413 .else
15414- movq \offset+72(%rsp), %rax
15415+ movq RAX(%rsp), %rax
15416 .endif
15417 .endm
15418
15419-#define REST_SKIP (6*8)
15420-
15421 .macro SAVE_REST
15422- subq $REST_SKIP, %rsp
15423- CFI_ADJUST_CFA_OFFSET REST_SKIP
15424- movq_cfi rbx, 5*8
15425- movq_cfi rbp, 4*8
15426- movq_cfi r12, 3*8
15427- movq_cfi r13, 2*8
15428- movq_cfi r14, 1*8
15429- movq_cfi r15, 0*8
15430+ movq_cfi rbx, RBX
15431+ movq_cfi rbp, RBP
15432+
15433+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15434+ movq_cfi r12, R12
15435+#endif
15436+
15437+ movq_cfi r13, R13
15438+ movq_cfi r14, R14
15439+ movq_cfi r15, R15
15440 .endm
15441
15442 .macro RESTORE_REST
15443- movq_cfi_restore 0*8, r15
15444- movq_cfi_restore 1*8, r14
15445- movq_cfi_restore 2*8, r13
15446- movq_cfi_restore 3*8, r12
15447- movq_cfi_restore 4*8, rbp
15448- movq_cfi_restore 5*8, rbx
15449- addq $REST_SKIP, %rsp
15450- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
15451+ movq_cfi_restore R15, r15
15452+ movq_cfi_restore R14, r14
15453+ movq_cfi_restore R13, r13
15454+
15455+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15456+ movq_cfi_restore R12, r12
15457+#endif
15458+
15459+ movq_cfi_restore RBP, rbp
15460+ movq_cfi_restore RBX, rbx
15461 .endm
15462
15463 .macro SAVE_ALL
15464diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
15465index f50de69..2b0a458 100644
15466--- a/arch/x86/include/asm/checksum_32.h
15467+++ b/arch/x86/include/asm/checksum_32.h
15468@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
15469 int len, __wsum sum,
15470 int *src_err_ptr, int *dst_err_ptr);
15471
15472+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
15473+ int len, __wsum sum,
15474+ int *src_err_ptr, int *dst_err_ptr);
15475+
15476+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
15477+ int len, __wsum sum,
15478+ int *src_err_ptr, int *dst_err_ptr);
15479+
15480 /*
15481 * Note: when you get a NULL pointer exception here this means someone
15482 * passed in an incorrect kernel address to one of these functions.
15483@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
15484
15485 might_sleep();
15486 stac();
15487- ret = csum_partial_copy_generic((__force void *)src, dst,
15488+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
15489 len, sum, err_ptr, NULL);
15490 clac();
15491
15492@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
15493 might_sleep();
15494 if (access_ok(VERIFY_WRITE, dst, len)) {
15495 stac();
15496- ret = csum_partial_copy_generic(src, (__force void *)dst,
15497+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
15498 len, sum, NULL, err_ptr);
15499 clac();
15500 return ret;
15501diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
15502index d47786a..ce1b05d 100644
15503--- a/arch/x86/include/asm/cmpxchg.h
15504+++ b/arch/x86/include/asm/cmpxchg.h
15505@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
15506 __compiletime_error("Bad argument size for cmpxchg");
15507 extern void __xadd_wrong_size(void)
15508 __compiletime_error("Bad argument size for xadd");
15509+extern void __xadd_check_overflow_wrong_size(void)
15510+ __compiletime_error("Bad argument size for xadd_check_overflow");
15511 extern void __add_wrong_size(void)
15512 __compiletime_error("Bad argument size for add");
15513+extern void __add_check_overflow_wrong_size(void)
15514+ __compiletime_error("Bad argument size for add_check_overflow");
15515
15516 /*
15517 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
15518@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
15519 __ret; \
15520 })
15521
15522+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
15523+ ({ \
15524+ __typeof__ (*(ptr)) __ret = (arg); \
15525+ switch (sizeof(*(ptr))) { \
15526+ case __X86_CASE_L: \
15527+ asm volatile (lock #op "l %0, %1\n" \
15528+ "jno 0f\n" \
15529+ "mov %0,%1\n" \
15530+ "int $4\n0:\n" \
15531+ _ASM_EXTABLE(0b, 0b) \
15532+ : "+r" (__ret), "+m" (*(ptr)) \
15533+ : : "memory", "cc"); \
15534+ break; \
15535+ case __X86_CASE_Q: \
15536+ asm volatile (lock #op "q %q0, %1\n" \
15537+ "jno 0f\n" \
15538+ "mov %0,%1\n" \
15539+ "int $4\n0:\n" \
15540+ _ASM_EXTABLE(0b, 0b) \
15541+ : "+r" (__ret), "+m" (*(ptr)) \
15542+ : : "memory", "cc"); \
15543+ break; \
15544+ default: \
15545+ __ ## op ## _check_overflow_wrong_size(); \
15546+ } \
15547+ __ret; \
15548+ })
15549+
15550 /*
15551 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
15552 * Since this is generally used to protect other memory information, we
15553@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
15554 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
15555 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
15556
15557+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
15558+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
15559+
15560 #define __add(ptr, inc, lock) \
15561 ({ \
15562 __typeof__ (*(ptr)) __ret = (inc); \
15563diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
15564index 59c6c40..5e0b22c 100644
15565--- a/arch/x86/include/asm/compat.h
15566+++ b/arch/x86/include/asm/compat.h
15567@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
15568 typedef u32 compat_uint_t;
15569 typedef u32 compat_ulong_t;
15570 typedef u64 __attribute__((aligned(4))) compat_u64;
15571-typedef u32 compat_uptr_t;
15572+typedef u32 __user compat_uptr_t;
15573
15574 struct compat_timespec {
15575 compat_time_t tv_sec;
15576diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
15577index 89270b4..f0abf8e 100644
15578--- a/arch/x86/include/asm/cpufeature.h
15579+++ b/arch/x86/include/asm/cpufeature.h
15580@@ -203,7 +203,7 @@
15581 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
15582 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
15583 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
15584-
15585+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
15586
15587 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
15588 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
15589@@ -211,7 +211,7 @@
15590 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
15591 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
15592 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
15593-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
15594+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
15595 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
15596 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
15597 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
15598@@ -353,6 +353,7 @@ extern const char * const x86_power_flags[32];
15599 #undef cpu_has_centaur_mcr
15600 #define cpu_has_centaur_mcr 0
15601
15602+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
15603 #endif /* CONFIG_X86_64 */
15604
15605 #if __GNUC__ >= 4
15606@@ -405,7 +406,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
15607
15608 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
15609 t_warn:
15610- warn_pre_alternatives();
15611+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
15612+ warn_pre_alternatives();
15613 return false;
15614 #endif
15615
15616@@ -425,7 +427,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
15617 ".section .discard,\"aw\",@progbits\n"
15618 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
15619 ".previous\n"
15620- ".section .altinstr_replacement,\"ax\"\n"
15621+ ".section .altinstr_replacement,\"a\"\n"
15622 "3: movb $1,%0\n"
15623 "4:\n"
15624 ".previous\n"
15625@@ -462,7 +464,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
15626 " .byte 2b - 1b\n" /* src len */
15627 " .byte 4f - 3f\n" /* repl len */
15628 ".previous\n"
15629- ".section .altinstr_replacement,\"ax\"\n"
15630+ ".section .altinstr_replacement,\"a\"\n"
15631 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
15632 "4:\n"
15633 ".previous\n"
15634@@ -495,7 +497,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
15635 ".section .discard,\"aw\",@progbits\n"
15636 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
15637 ".previous\n"
15638- ".section .altinstr_replacement,\"ax\"\n"
15639+ ".section .altinstr_replacement,\"a\"\n"
15640 "3: movb $0,%0\n"
15641 "4:\n"
15642 ".previous\n"
15643@@ -509,7 +511,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
15644 ".section .discard,\"aw\",@progbits\n"
15645 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
15646 ".previous\n"
15647- ".section .altinstr_replacement,\"ax\"\n"
15648+ ".section .altinstr_replacement,\"a\"\n"
15649 "5: movb $1,%0\n"
15650 "6:\n"
15651 ".previous\n"
15652diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
15653index 50d033a..37deb26 100644
15654--- a/arch/x86/include/asm/desc.h
15655+++ b/arch/x86/include/asm/desc.h
15656@@ -4,6 +4,7 @@
15657 #include <asm/desc_defs.h>
15658 #include <asm/ldt.h>
15659 #include <asm/mmu.h>
15660+#include <asm/pgtable.h>
15661
15662 #include <linux/smp.h>
15663 #include <linux/percpu.h>
15664@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
15665
15666 desc->type = (info->read_exec_only ^ 1) << 1;
15667 desc->type |= info->contents << 2;
15668+ desc->type |= info->seg_not_present ^ 1;
15669
15670 desc->s = 1;
15671 desc->dpl = 0x3;
15672@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
15673 }
15674
15675 extern struct desc_ptr idt_descr;
15676-extern gate_desc idt_table[];
15677-extern struct desc_ptr debug_idt_descr;
15678-extern gate_desc debug_idt_table[];
15679-
15680-struct gdt_page {
15681- struct desc_struct gdt[GDT_ENTRIES];
15682-} __attribute__((aligned(PAGE_SIZE)));
15683-
15684-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
15685+extern gate_desc idt_table[IDT_ENTRIES];
15686+extern const struct desc_ptr debug_idt_descr;
15687+extern gate_desc debug_idt_table[IDT_ENTRIES];
15688
15689+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
15690 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
15691 {
15692- return per_cpu(gdt_page, cpu).gdt;
15693+ return cpu_gdt_table[cpu];
15694 }
15695
15696 #ifdef CONFIG_X86_64
15697@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
15698 unsigned long base, unsigned dpl, unsigned flags,
15699 unsigned short seg)
15700 {
15701- gate->a = (seg << 16) | (base & 0xffff);
15702- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
15703+ gate->gate.offset_low = base;
15704+ gate->gate.seg = seg;
15705+ gate->gate.reserved = 0;
15706+ gate->gate.type = type;
15707+ gate->gate.s = 0;
15708+ gate->gate.dpl = dpl;
15709+ gate->gate.p = 1;
15710+ gate->gate.offset_high = base >> 16;
15711 }
15712
15713 #endif
15714@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
15715
15716 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
15717 {
15718+ pax_open_kernel();
15719 memcpy(&idt[entry], gate, sizeof(*gate));
15720+ pax_close_kernel();
15721 }
15722
15723 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
15724 {
15725+ pax_open_kernel();
15726 memcpy(&ldt[entry], desc, 8);
15727+ pax_close_kernel();
15728 }
15729
15730 static inline void
15731@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
15732 default: size = sizeof(*gdt); break;
15733 }
15734
15735+ pax_open_kernel();
15736 memcpy(&gdt[entry], desc, size);
15737+ pax_close_kernel();
15738 }
15739
15740 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
15741@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
15742
15743 static inline void native_load_tr_desc(void)
15744 {
15745+ pax_open_kernel();
15746 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
15747+ pax_close_kernel();
15748 }
15749
15750 static inline void native_load_gdt(const struct desc_ptr *dtr)
15751@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
15752 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
15753 unsigned int i;
15754
15755+ pax_open_kernel();
15756 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
15757 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
15758+ pax_close_kernel();
15759 }
15760
15761 #define _LDT_empty(info) \
15762@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
15763 preempt_enable();
15764 }
15765
15766-static inline unsigned long get_desc_base(const struct desc_struct *desc)
15767+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
15768 {
15769 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
15770 }
15771@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
15772 }
15773
15774 #ifdef CONFIG_X86_64
15775-static inline void set_nmi_gate(int gate, void *addr)
15776+static inline void set_nmi_gate(int gate, const void *addr)
15777 {
15778 gate_desc s;
15779
15780@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
15781 #endif
15782
15783 #ifdef CONFIG_TRACING
15784-extern struct desc_ptr trace_idt_descr;
15785-extern gate_desc trace_idt_table[];
15786+extern const struct desc_ptr trace_idt_descr;
15787+extern gate_desc trace_idt_table[IDT_ENTRIES];
15788 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
15789 {
15790 write_idt_entry(trace_idt_table, entry, gate);
15791 }
15792
15793-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
15794+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
15795 unsigned dpl, unsigned ist, unsigned seg)
15796 {
15797 gate_desc s;
15798@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
15799 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
15800 #endif
15801
15802-static inline void _set_gate(int gate, unsigned type, void *addr,
15803+static inline void _set_gate(int gate, unsigned type, const void *addr,
15804 unsigned dpl, unsigned ist, unsigned seg)
15805 {
15806 gate_desc s;
15807@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
15808 #define set_intr_gate(n, addr) \
15809 do { \
15810 BUG_ON((unsigned)n > 0xFF); \
15811- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
15812+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
15813 __KERNEL_CS); \
15814- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
15815+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
15816 0, 0, __KERNEL_CS); \
15817 } while (0)
15818
15819@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
15820 /*
15821 * This routine sets up an interrupt gate at directory privilege level 3.
15822 */
15823-static inline void set_system_intr_gate(unsigned int n, void *addr)
15824+static inline void set_system_intr_gate(unsigned int n, const void *addr)
15825 {
15826 BUG_ON((unsigned)n > 0xFF);
15827 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
15828 }
15829
15830-static inline void set_system_trap_gate(unsigned int n, void *addr)
15831+static inline void set_system_trap_gate(unsigned int n, const void *addr)
15832 {
15833 BUG_ON((unsigned)n > 0xFF);
15834 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
15835 }
15836
15837-static inline void set_trap_gate(unsigned int n, void *addr)
15838+static inline void set_trap_gate(unsigned int n, const void *addr)
15839 {
15840 BUG_ON((unsigned)n > 0xFF);
15841 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
15842@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
15843 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
15844 {
15845 BUG_ON((unsigned)n > 0xFF);
15846- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
15847+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
15848 }
15849
15850-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
15851+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
15852 {
15853 BUG_ON((unsigned)n > 0xFF);
15854 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
15855 }
15856
15857-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
15858+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
15859 {
15860 BUG_ON((unsigned)n > 0xFF);
15861 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
15862@@ -503,4 +516,17 @@ static inline void load_current_idt(void)
15863 else
15864 load_idt((const struct desc_ptr *)&idt_descr);
15865 }
15866+
15867+#ifdef CONFIG_X86_32
15868+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
15869+{
15870+ struct desc_struct d;
15871+
15872+ if (likely(limit))
15873+ limit = (limit - 1UL) >> PAGE_SHIFT;
15874+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
15875+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
15876+}
15877+#endif
15878+
15879 #endif /* _ASM_X86_DESC_H */
15880diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
15881index 278441f..b95a174 100644
15882--- a/arch/x86/include/asm/desc_defs.h
15883+++ b/arch/x86/include/asm/desc_defs.h
15884@@ -31,6 +31,12 @@ struct desc_struct {
15885 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
15886 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
15887 };
15888+ struct {
15889+ u16 offset_low;
15890+ u16 seg;
15891+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
15892+ unsigned offset_high: 16;
15893+ } gate;
15894 };
15895 } __attribute__((packed));
15896
15897diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
15898index ced283a..ffe04cc 100644
15899--- a/arch/x86/include/asm/div64.h
15900+++ b/arch/x86/include/asm/div64.h
15901@@ -39,7 +39,7 @@
15902 __mod; \
15903 })
15904
15905-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15906+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15907 {
15908 union {
15909 u64 v64;
15910diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
15911index 9c999c1..3860cb8 100644
15912--- a/arch/x86/include/asm/elf.h
15913+++ b/arch/x86/include/asm/elf.h
15914@@ -243,7 +243,25 @@ extern int force_personality32;
15915 the loader. We need to make sure that it is out of the way of the program
15916 that it will "exec", and that there is sufficient room for the brk. */
15917
15918+#ifdef CONFIG_PAX_SEGMEXEC
15919+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
15920+#else
15921 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
15922+#endif
15923+
15924+#ifdef CONFIG_PAX_ASLR
15925+#ifdef CONFIG_X86_32
15926+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
15927+
15928+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15929+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15930+#else
15931+#define PAX_ELF_ET_DYN_BASE 0x400000UL
15932+
15933+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15934+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15935+#endif
15936+#endif
15937
15938 /* This yields a mask that user programs can use to figure out what
15939 instruction set this CPU supports. This could be done in user space,
15940@@ -296,16 +314,12 @@ do { \
15941
15942 #define ARCH_DLINFO \
15943 do { \
15944- if (vdso_enabled) \
15945- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15946- (unsigned long)current->mm->context.vdso); \
15947+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15948 } while (0)
15949
15950 #define ARCH_DLINFO_X32 \
15951 do { \
15952- if (vdso_enabled) \
15953- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15954- (unsigned long)current->mm->context.vdso); \
15955+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15956 } while (0)
15957
15958 #define AT_SYSINFO 32
15959@@ -320,7 +334,7 @@ else \
15960
15961 #endif /* !CONFIG_X86_32 */
15962
15963-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
15964+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
15965
15966 #define VDSO_ENTRY \
15967 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
15968@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
15969 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
15970 #define compat_arch_setup_additional_pages syscall32_setup_pages
15971
15972-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
15973-#define arch_randomize_brk arch_randomize_brk
15974-
15975 /*
15976 * True on X86_32 or when emulating IA32 on X86_64
15977 */
15978diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
15979index 77a99ac..39ff7f5 100644
15980--- a/arch/x86/include/asm/emergency-restart.h
15981+++ b/arch/x86/include/asm/emergency-restart.h
15982@@ -1,6 +1,6 @@
15983 #ifndef _ASM_X86_EMERGENCY_RESTART_H
15984 #define _ASM_X86_EMERGENCY_RESTART_H
15985
15986-extern void machine_emergency_restart(void);
15987+extern void machine_emergency_restart(void) __noreturn;
15988
15989 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
15990diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
15991index d3d7469..677ef72 100644
15992--- a/arch/x86/include/asm/floppy.h
15993+++ b/arch/x86/include/asm/floppy.h
15994@@ -229,18 +229,18 @@ static struct fd_routine_l {
15995 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
15996 } fd_routine[] = {
15997 {
15998- request_dma,
15999- free_dma,
16000- get_dma_residue,
16001- dma_mem_alloc,
16002- hard_dma_setup
16003+ ._request_dma = request_dma,
16004+ ._free_dma = free_dma,
16005+ ._get_dma_residue = get_dma_residue,
16006+ ._dma_mem_alloc = dma_mem_alloc,
16007+ ._dma_setup = hard_dma_setup
16008 },
16009 {
16010- vdma_request_dma,
16011- vdma_nop,
16012- vdma_get_dma_residue,
16013- vdma_mem_alloc,
16014- vdma_dma_setup
16015+ ._request_dma = vdma_request_dma,
16016+ ._free_dma = vdma_nop,
16017+ ._get_dma_residue = vdma_get_dma_residue,
16018+ ._dma_mem_alloc = vdma_mem_alloc,
16019+ ._dma_setup = vdma_dma_setup
16020 }
16021 };
16022
16023diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16024index cea1c76..6c0d79b 100644
16025--- a/arch/x86/include/asm/fpu-internal.h
16026+++ b/arch/x86/include/asm/fpu-internal.h
16027@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16028 #define user_insn(insn, output, input...) \
16029 ({ \
16030 int err; \
16031+ pax_open_userland(); \
16032 asm volatile(ASM_STAC "\n" \
16033- "1:" #insn "\n\t" \
16034+ "1:" \
16035+ __copyuser_seg \
16036+ #insn "\n\t" \
16037 "2: " ASM_CLAC "\n" \
16038 ".section .fixup,\"ax\"\n" \
16039 "3: movl $-1,%[err]\n" \
16040@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16041 _ASM_EXTABLE(1b, 3b) \
16042 : [err] "=r" (err), output \
16043 : "0"(0), input); \
16044+ pax_close_userland(); \
16045 err; \
16046 })
16047
16048@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16049 "fnclex\n\t"
16050 "emms\n\t"
16051 "fildl %P[addr]" /* set F?P to defined value */
16052- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16053+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16054 }
16055
16056 return fpu_restore_checking(&tsk->thread.fpu);
16057diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16058index be27ba1..04a8801 100644
16059--- a/arch/x86/include/asm/futex.h
16060+++ b/arch/x86/include/asm/futex.h
16061@@ -12,6 +12,7 @@
16062 #include <asm/smap.h>
16063
16064 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16065+ typecheck(u32 __user *, uaddr); \
16066 asm volatile("\t" ASM_STAC "\n" \
16067 "1:\t" insn "\n" \
16068 "2:\t" ASM_CLAC "\n" \
16069@@ -20,15 +21,16 @@
16070 "\tjmp\t2b\n" \
16071 "\t.previous\n" \
16072 _ASM_EXTABLE(1b, 3b) \
16073- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16074+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16075 : "i" (-EFAULT), "0" (oparg), "1" (0))
16076
16077 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16078+ typecheck(u32 __user *, uaddr); \
16079 asm volatile("\t" ASM_STAC "\n" \
16080 "1:\tmovl %2, %0\n" \
16081 "\tmovl\t%0, %3\n" \
16082 "\t" insn "\n" \
16083- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16084+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16085 "\tjnz\t1b\n" \
16086 "3:\t" ASM_CLAC "\n" \
16087 "\t.section .fixup,\"ax\"\n" \
16088@@ -38,7 +40,7 @@
16089 _ASM_EXTABLE(1b, 4b) \
16090 _ASM_EXTABLE(2b, 4b) \
16091 : "=&a" (oldval), "=&r" (ret), \
16092- "+m" (*uaddr), "=&r" (tem) \
16093+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16094 : "r" (oparg), "i" (-EFAULT), "1" (0))
16095
16096 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16097@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16098
16099 pagefault_disable();
16100
16101+ pax_open_userland();
16102 switch (op) {
16103 case FUTEX_OP_SET:
16104- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16105+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16106 break;
16107 case FUTEX_OP_ADD:
16108- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16109+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16110 uaddr, oparg);
16111 break;
16112 case FUTEX_OP_OR:
16113@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16114 default:
16115 ret = -ENOSYS;
16116 }
16117+ pax_close_userland();
16118
16119 pagefault_enable();
16120
16121@@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
16122 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
16123 return -EFAULT;
16124
16125+ pax_open_userland();
16126 asm volatile("\t" ASM_STAC "\n"
16127- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
16128+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
16129 "2:\t" ASM_CLAC "\n"
16130 "\t.section .fixup, \"ax\"\n"
16131 "3:\tmov %3, %0\n"
16132 "\tjmp 2b\n"
16133 "\t.previous\n"
16134 _ASM_EXTABLE(1b, 3b)
16135- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
16136+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
16137 : "i" (-EFAULT), "r" (newval), "1" (oldval)
16138 : "memory"
16139 );
16140+ pax_close_userland();
16141
16142 *uval = oldval;
16143 return ret;
16144diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
16145index cba45d9..86344ba 100644
16146--- a/arch/x86/include/asm/hw_irq.h
16147+++ b/arch/x86/include/asm/hw_irq.h
16148@@ -165,8 +165,8 @@ extern void setup_ioapic_dest(void);
16149 extern void enable_IO_APIC(void);
16150
16151 /* Statistics */
16152-extern atomic_t irq_err_count;
16153-extern atomic_t irq_mis_count;
16154+extern atomic_unchecked_t irq_err_count;
16155+extern atomic_unchecked_t irq_mis_count;
16156
16157 /* EISA */
16158 extern void eisa_set_level_irq(unsigned int irq);
16159diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
16160index a203659..9889f1c 100644
16161--- a/arch/x86/include/asm/i8259.h
16162+++ b/arch/x86/include/asm/i8259.h
16163@@ -62,7 +62,7 @@ struct legacy_pic {
16164 void (*init)(int auto_eoi);
16165 int (*irq_pending)(unsigned int irq);
16166 void (*make_irq)(unsigned int irq);
16167-};
16168+} __do_const;
16169
16170 extern struct legacy_pic *legacy_pic;
16171 extern struct legacy_pic null_legacy_pic;
16172diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
16173index 34f69cb..6d95446 100644
16174--- a/arch/x86/include/asm/io.h
16175+++ b/arch/x86/include/asm/io.h
16176@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
16177 "m" (*(volatile type __force *)addr) barrier); }
16178
16179 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
16180-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
16181-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
16182+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
16183+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
16184
16185 build_mmio_read(__readb, "b", unsigned char, "=q", )
16186-build_mmio_read(__readw, "w", unsigned short, "=r", )
16187-build_mmio_read(__readl, "l", unsigned int, "=r", )
16188+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
16189+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
16190
16191 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
16192 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
16193@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
16194 return ioremap_nocache(offset, size);
16195 }
16196
16197-extern void iounmap(volatile void __iomem *addr);
16198+extern void iounmap(const volatile void __iomem *addr);
16199
16200 extern void set_iounmap_nonlazy(void);
16201
16202@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
16203
16204 #include <linux/vmalloc.h>
16205
16206+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
16207+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
16208+{
16209+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
16210+}
16211+
16212+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
16213+{
16214+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
16215+}
16216+
16217 /*
16218 * Convert a virtual cached pointer to an uncached pointer
16219 */
16220diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
16221index bba3cf8..06bc8da 100644
16222--- a/arch/x86/include/asm/irqflags.h
16223+++ b/arch/x86/include/asm/irqflags.h
16224@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
16225 sti; \
16226 sysexit
16227
16228+#define GET_CR0_INTO_RDI mov %cr0, %rdi
16229+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
16230+#define GET_CR3_INTO_RDI mov %cr3, %rdi
16231+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
16232+
16233 #else
16234 #define INTERRUPT_RETURN iret
16235 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
16236diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
16237index 9454c16..e4100e3 100644
16238--- a/arch/x86/include/asm/kprobes.h
16239+++ b/arch/x86/include/asm/kprobes.h
16240@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
16241 #define RELATIVEJUMP_SIZE 5
16242 #define RELATIVECALL_OPCODE 0xe8
16243 #define RELATIVE_ADDR_SIZE 4
16244-#define MAX_STACK_SIZE 64
16245-#define MIN_STACK_SIZE(ADDR) \
16246- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
16247- THREAD_SIZE - (unsigned long)(ADDR))) \
16248- ? (MAX_STACK_SIZE) \
16249- : (((unsigned long)current_thread_info()) + \
16250- THREAD_SIZE - (unsigned long)(ADDR)))
16251+#define MAX_STACK_SIZE 64UL
16252+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
16253
16254 #define flush_insn_slot(p) do { } while (0)
16255
16256diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
16257index 4ad6560..75c7bdd 100644
16258--- a/arch/x86/include/asm/local.h
16259+++ b/arch/x86/include/asm/local.h
16260@@ -10,33 +10,97 @@ typedef struct {
16261 atomic_long_t a;
16262 } local_t;
16263
16264+typedef struct {
16265+ atomic_long_unchecked_t a;
16266+} local_unchecked_t;
16267+
16268 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
16269
16270 #define local_read(l) atomic_long_read(&(l)->a)
16271+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
16272 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
16273+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
16274
16275 static inline void local_inc(local_t *l)
16276 {
16277- asm volatile(_ASM_INC "%0"
16278+ asm volatile(_ASM_INC "%0\n"
16279+
16280+#ifdef CONFIG_PAX_REFCOUNT
16281+ "jno 0f\n"
16282+ _ASM_DEC "%0\n"
16283+ "int $4\n0:\n"
16284+ _ASM_EXTABLE(0b, 0b)
16285+#endif
16286+
16287+ : "+m" (l->a.counter));
16288+}
16289+
16290+static inline void local_inc_unchecked(local_unchecked_t *l)
16291+{
16292+ asm volatile(_ASM_INC "%0\n"
16293 : "+m" (l->a.counter));
16294 }
16295
16296 static inline void local_dec(local_t *l)
16297 {
16298- asm volatile(_ASM_DEC "%0"
16299+ asm volatile(_ASM_DEC "%0\n"
16300+
16301+#ifdef CONFIG_PAX_REFCOUNT
16302+ "jno 0f\n"
16303+ _ASM_INC "%0\n"
16304+ "int $4\n0:\n"
16305+ _ASM_EXTABLE(0b, 0b)
16306+#endif
16307+
16308+ : "+m" (l->a.counter));
16309+}
16310+
16311+static inline void local_dec_unchecked(local_unchecked_t *l)
16312+{
16313+ asm volatile(_ASM_DEC "%0\n"
16314 : "+m" (l->a.counter));
16315 }
16316
16317 static inline void local_add(long i, local_t *l)
16318 {
16319- asm volatile(_ASM_ADD "%1,%0"
16320+ asm volatile(_ASM_ADD "%1,%0\n"
16321+
16322+#ifdef CONFIG_PAX_REFCOUNT
16323+ "jno 0f\n"
16324+ _ASM_SUB "%1,%0\n"
16325+ "int $4\n0:\n"
16326+ _ASM_EXTABLE(0b, 0b)
16327+#endif
16328+
16329+ : "+m" (l->a.counter)
16330+ : "ir" (i));
16331+}
16332+
16333+static inline void local_add_unchecked(long i, local_unchecked_t *l)
16334+{
16335+ asm volatile(_ASM_ADD "%1,%0\n"
16336 : "+m" (l->a.counter)
16337 : "ir" (i));
16338 }
16339
16340 static inline void local_sub(long i, local_t *l)
16341 {
16342- asm volatile(_ASM_SUB "%1,%0"
16343+ asm volatile(_ASM_SUB "%1,%0\n"
16344+
16345+#ifdef CONFIG_PAX_REFCOUNT
16346+ "jno 0f\n"
16347+ _ASM_ADD "%1,%0\n"
16348+ "int $4\n0:\n"
16349+ _ASM_EXTABLE(0b, 0b)
16350+#endif
16351+
16352+ : "+m" (l->a.counter)
16353+ : "ir" (i));
16354+}
16355+
16356+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
16357+{
16358+ asm volatile(_ASM_SUB "%1,%0\n"
16359 : "+m" (l->a.counter)
16360 : "ir" (i));
16361 }
16362@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
16363 */
16364 static inline int local_sub_and_test(long i, local_t *l)
16365 {
16366- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
16367+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
16368 }
16369
16370 /**
16371@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
16372 */
16373 static inline int local_dec_and_test(local_t *l)
16374 {
16375- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
16376+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
16377 }
16378
16379 /**
16380@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
16381 */
16382 static inline int local_inc_and_test(local_t *l)
16383 {
16384- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
16385+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
16386 }
16387
16388 /**
16389@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
16390 */
16391 static inline int local_add_negative(long i, local_t *l)
16392 {
16393- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
16394+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
16395 }
16396
16397 /**
16398@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
16399 static inline long local_add_return(long i, local_t *l)
16400 {
16401 long __i = i;
16402+ asm volatile(_ASM_XADD "%0, %1\n"
16403+
16404+#ifdef CONFIG_PAX_REFCOUNT
16405+ "jno 0f\n"
16406+ _ASM_MOV "%0,%1\n"
16407+ "int $4\n0:\n"
16408+ _ASM_EXTABLE(0b, 0b)
16409+#endif
16410+
16411+ : "+r" (i), "+m" (l->a.counter)
16412+ : : "memory");
16413+ return i + __i;
16414+}
16415+
16416+/**
16417+ * local_add_return_unchecked - add and return
16418+ * @i: integer value to add
16419+ * @l: pointer to type local_unchecked_t
16420+ *
16421+ * Atomically adds @i to @l and returns @i + @l
16422+ */
16423+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
16424+{
16425+ long __i = i;
16426 asm volatile(_ASM_XADD "%0, %1;"
16427 : "+r" (i), "+m" (l->a.counter)
16428 : : "memory");
16429@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
16430
16431 #define local_cmpxchg(l, o, n) \
16432 (cmpxchg_local(&((l)->a.counter), (o), (n)))
16433+#define local_cmpxchg_unchecked(l, o, n) \
16434+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
16435 /* Always has a lock prefix */
16436 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
16437
16438diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
16439new file mode 100644
16440index 0000000..2bfd3ba
16441--- /dev/null
16442+++ b/arch/x86/include/asm/mman.h
16443@@ -0,0 +1,15 @@
16444+#ifndef _X86_MMAN_H
16445+#define _X86_MMAN_H
16446+
16447+#include <uapi/asm/mman.h>
16448+
16449+#ifdef __KERNEL__
16450+#ifndef __ASSEMBLY__
16451+#ifdef CONFIG_X86_32
16452+#define arch_mmap_check i386_mmap_check
16453+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
16454+#endif
16455+#endif
16456+#endif
16457+
16458+#endif /* X86_MMAN_H */
16459diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
16460index 5f55e69..e20bfb1 100644
16461--- a/arch/x86/include/asm/mmu.h
16462+++ b/arch/x86/include/asm/mmu.h
16463@@ -9,7 +9,7 @@
16464 * we put the segment information here.
16465 */
16466 typedef struct {
16467- void *ldt;
16468+ struct desc_struct *ldt;
16469 int size;
16470
16471 #ifdef CONFIG_X86_64
16472@@ -18,7 +18,19 @@ typedef struct {
16473 #endif
16474
16475 struct mutex lock;
16476- void *vdso;
16477+ unsigned long vdso;
16478+
16479+#ifdef CONFIG_X86_32
16480+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
16481+ unsigned long user_cs_base;
16482+ unsigned long user_cs_limit;
16483+
16484+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16485+ cpumask_t cpu_user_cs_mask;
16486+#endif
16487+
16488+#endif
16489+#endif
16490 } mm_context_t;
16491
16492 #ifdef CONFIG_SMP
16493diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
16494index be12c53..4d24039 100644
16495--- a/arch/x86/include/asm/mmu_context.h
16496+++ b/arch/x86/include/asm/mmu_context.h
16497@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
16498
16499 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
16500 {
16501+
16502+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16503+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
16504+ unsigned int i;
16505+ pgd_t *pgd;
16506+
16507+ pax_open_kernel();
16508+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
16509+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
16510+ set_pgd_batched(pgd+i, native_make_pgd(0));
16511+ pax_close_kernel();
16512+ }
16513+#endif
16514+
16515 #ifdef CONFIG_SMP
16516 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
16517 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
16518@@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16519 struct task_struct *tsk)
16520 {
16521 unsigned cpu = smp_processor_id();
16522+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16523+ int tlbstate = TLBSTATE_OK;
16524+#endif
16525
16526 if (likely(prev != next)) {
16527 #ifdef CONFIG_SMP
16528+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16529+ tlbstate = this_cpu_read(cpu_tlbstate.state);
16530+#endif
16531 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
16532 this_cpu_write(cpu_tlbstate.active_mm, next);
16533 #endif
16534 cpumask_set_cpu(cpu, mm_cpumask(next));
16535
16536 /* Re-load page tables */
16537+#ifdef CONFIG_PAX_PER_CPU_PGD
16538+ pax_open_kernel();
16539+
16540+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16541+ if (static_cpu_has(X86_FEATURE_PCID))
16542+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
16543+ else
16544+#endif
16545+
16546+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
16547+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
16548+ pax_close_kernel();
16549+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
16550+
16551+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16552+ if (static_cpu_has(X86_FEATURE_PCID)) {
16553+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
16554+ u64 descriptor[2];
16555+ descriptor[0] = PCID_USER;
16556+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16557+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
16558+ descriptor[0] = PCID_KERNEL;
16559+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16560+ }
16561+ } else {
16562+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
16563+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
16564+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
16565+ else
16566+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
16567+ }
16568+ } else
16569+#endif
16570+
16571+ load_cr3(get_cpu_pgd(cpu, kernel));
16572+#else
16573 load_cr3(next->pgd);
16574+#endif
16575
16576 /* Stop flush ipis for the previous mm */
16577 cpumask_clear_cpu(cpu, mm_cpumask(prev));
16578@@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16579 /* Load the LDT, if the LDT is different: */
16580 if (unlikely(prev->context.ldt != next->context.ldt))
16581 load_LDT_nolock(&next->context);
16582+
16583+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16584+ if (!(__supported_pte_mask & _PAGE_NX)) {
16585+ smp_mb__before_clear_bit();
16586+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
16587+ smp_mb__after_clear_bit();
16588+ cpu_set(cpu, next->context.cpu_user_cs_mask);
16589+ }
16590+#endif
16591+
16592+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16593+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
16594+ prev->context.user_cs_limit != next->context.user_cs_limit))
16595+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
16596+#ifdef CONFIG_SMP
16597+ else if (unlikely(tlbstate != TLBSTATE_OK))
16598+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
16599+#endif
16600+#endif
16601+
16602 }
16603+ else {
16604+
16605+#ifdef CONFIG_PAX_PER_CPU_PGD
16606+ pax_open_kernel();
16607+
16608+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16609+ if (static_cpu_has(X86_FEATURE_PCID))
16610+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
16611+ else
16612+#endif
16613+
16614+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
16615+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
16616+ pax_close_kernel();
16617+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
16618+
16619+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16620+ if (static_cpu_has(X86_FEATURE_PCID)) {
16621+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
16622+ u64 descriptor[2];
16623+ descriptor[0] = PCID_USER;
16624+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16625+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
16626+ descriptor[0] = PCID_KERNEL;
16627+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16628+ }
16629+ } else {
16630+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
16631+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
16632+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
16633+ else
16634+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
16635+ }
16636+ } else
16637+#endif
16638+
16639+ load_cr3(get_cpu_pgd(cpu, kernel));
16640+#endif
16641+
16642 #ifdef CONFIG_SMP
16643- else {
16644 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
16645 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
16646
16647@@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16648 * tlb flush IPI delivery. We must reload CR3
16649 * to make sure to use no freed page tables.
16650 */
16651+
16652+#ifndef CONFIG_PAX_PER_CPU_PGD
16653 load_cr3(next->pgd);
16654+#endif
16655+
16656 load_LDT_nolock(&next->context);
16657+
16658+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16659+ if (!(__supported_pte_mask & _PAGE_NX))
16660+ cpu_set(cpu, next->context.cpu_user_cs_mask);
16661+#endif
16662+
16663+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16664+#ifdef CONFIG_PAX_PAGEEXEC
16665+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
16666+#endif
16667+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
16668+#endif
16669+
16670 }
16671+#endif
16672 }
16673-#endif
16674 }
16675
16676 #define activate_mm(prev, next) \
16677diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
16678index e3b7819..b257c64 100644
16679--- a/arch/x86/include/asm/module.h
16680+++ b/arch/x86/include/asm/module.h
16681@@ -5,6 +5,7 @@
16682
16683 #ifdef CONFIG_X86_64
16684 /* X86_64 does not define MODULE_PROC_FAMILY */
16685+#define MODULE_PROC_FAMILY ""
16686 #elif defined CONFIG_M486
16687 #define MODULE_PROC_FAMILY "486 "
16688 #elif defined CONFIG_M586
16689@@ -57,8 +58,20 @@
16690 #error unknown processor family
16691 #endif
16692
16693-#ifdef CONFIG_X86_32
16694-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
16695+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
16696+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
16697+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
16698+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
16699+#else
16700+#define MODULE_PAX_KERNEXEC ""
16701 #endif
16702
16703+#ifdef CONFIG_PAX_MEMORY_UDEREF
16704+#define MODULE_PAX_UDEREF "UDEREF "
16705+#else
16706+#define MODULE_PAX_UDEREF ""
16707+#endif
16708+
16709+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
16710+
16711 #endif /* _ASM_X86_MODULE_H */
16712diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
16713index 86f9301..b365cda 100644
16714--- a/arch/x86/include/asm/nmi.h
16715+++ b/arch/x86/include/asm/nmi.h
16716@@ -40,11 +40,11 @@ struct nmiaction {
16717 nmi_handler_t handler;
16718 unsigned long flags;
16719 const char *name;
16720-};
16721+} __do_const;
16722
16723 #define register_nmi_handler(t, fn, fg, n, init...) \
16724 ({ \
16725- static struct nmiaction init fn##_na = { \
16726+ static const struct nmiaction init fn##_na = { \
16727 .handler = (fn), \
16728 .name = (n), \
16729 .flags = (fg), \
16730@@ -52,7 +52,7 @@ struct nmiaction {
16731 __register_nmi_handler((t), &fn##_na); \
16732 })
16733
16734-int __register_nmi_handler(unsigned int, struct nmiaction *);
16735+int __register_nmi_handler(unsigned int, const struct nmiaction *);
16736
16737 void unregister_nmi_handler(unsigned int, const char *);
16738
16739diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
16740index c878924..21f4889 100644
16741--- a/arch/x86/include/asm/page.h
16742+++ b/arch/x86/include/asm/page.h
16743@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
16744 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
16745
16746 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
16747+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
16748
16749 #define __boot_va(x) __va(x)
16750 #define __boot_pa(x) __pa(x)
16751diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
16752index 0f1ddee..e2fc3d1 100644
16753--- a/arch/x86/include/asm/page_64.h
16754+++ b/arch/x86/include/asm/page_64.h
16755@@ -7,9 +7,9 @@
16756
16757 /* duplicated to the one in bootmem.h */
16758 extern unsigned long max_pfn;
16759-extern unsigned long phys_base;
16760+extern const unsigned long phys_base;
16761
16762-static inline unsigned long __phys_addr_nodebug(unsigned long x)
16763+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
16764 {
16765 unsigned long y = x - __START_KERNEL_map;
16766
16767diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
16768index 401f350..dee5d13 100644
16769--- a/arch/x86/include/asm/paravirt.h
16770+++ b/arch/x86/include/asm/paravirt.h
16771@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
16772 return (pmd_t) { ret };
16773 }
16774
16775-static inline pmdval_t pmd_val(pmd_t pmd)
16776+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
16777 {
16778 pmdval_t ret;
16779
16780@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
16781 val);
16782 }
16783
16784+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
16785+{
16786+ pgdval_t val = native_pgd_val(pgd);
16787+
16788+ if (sizeof(pgdval_t) > sizeof(long))
16789+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
16790+ val, (u64)val >> 32);
16791+ else
16792+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
16793+ val);
16794+}
16795+
16796 static inline void pgd_clear(pgd_t *pgdp)
16797 {
16798 set_pgd(pgdp, __pgd(0));
16799@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
16800 pv_mmu_ops.set_fixmap(idx, phys, flags);
16801 }
16802
16803+#ifdef CONFIG_PAX_KERNEXEC
16804+static inline unsigned long pax_open_kernel(void)
16805+{
16806+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
16807+}
16808+
16809+static inline unsigned long pax_close_kernel(void)
16810+{
16811+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
16812+}
16813+#else
16814+static inline unsigned long pax_open_kernel(void) { return 0; }
16815+static inline unsigned long pax_close_kernel(void) { return 0; }
16816+#endif
16817+
16818 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
16819
16820 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
16821@@ -906,7 +933,7 @@ extern void default_banner(void);
16822
16823 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
16824 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
16825-#define PARA_INDIRECT(addr) *%cs:addr
16826+#define PARA_INDIRECT(addr) *%ss:addr
16827 #endif
16828
16829 #define INTERRUPT_RETURN \
16830@@ -981,6 +1008,21 @@ extern void default_banner(void);
16831 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
16832 CLBR_NONE, \
16833 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
16834+
16835+#define GET_CR0_INTO_RDI \
16836+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
16837+ mov %rax,%rdi
16838+
16839+#define SET_RDI_INTO_CR0 \
16840+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16841+
16842+#define GET_CR3_INTO_RDI \
16843+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
16844+ mov %rax,%rdi
16845+
16846+#define SET_RDI_INTO_CR3 \
16847+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
16848+
16849 #endif /* CONFIG_X86_32 */
16850
16851 #endif /* __ASSEMBLY__ */
16852diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
16853index aab8f67..0fb0ee4 100644
16854--- a/arch/x86/include/asm/paravirt_types.h
16855+++ b/arch/x86/include/asm/paravirt_types.h
16856@@ -84,7 +84,7 @@ struct pv_init_ops {
16857 */
16858 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
16859 unsigned long addr, unsigned len);
16860-};
16861+} __no_const __no_randomize_layout;
16862
16863
16864 struct pv_lazy_ops {
16865@@ -92,13 +92,13 @@ struct pv_lazy_ops {
16866 void (*enter)(void);
16867 void (*leave)(void);
16868 void (*flush)(void);
16869-};
16870+} __no_randomize_layout;
16871
16872 struct pv_time_ops {
16873 unsigned long long (*sched_clock)(void);
16874 unsigned long long (*steal_clock)(int cpu);
16875 unsigned long (*get_tsc_khz)(void);
16876-};
16877+} __no_const __no_randomize_layout;
16878
16879 struct pv_cpu_ops {
16880 /* hooks for various privileged instructions */
16881@@ -192,7 +192,7 @@ struct pv_cpu_ops {
16882
16883 void (*start_context_switch)(struct task_struct *prev);
16884 void (*end_context_switch)(struct task_struct *next);
16885-};
16886+} __no_const __no_randomize_layout;
16887
16888 struct pv_irq_ops {
16889 /*
16890@@ -215,7 +215,7 @@ struct pv_irq_ops {
16891 #ifdef CONFIG_X86_64
16892 void (*adjust_exception_frame)(void);
16893 #endif
16894-};
16895+} __no_randomize_layout;
16896
16897 struct pv_apic_ops {
16898 #ifdef CONFIG_X86_LOCAL_APIC
16899@@ -223,7 +223,7 @@ struct pv_apic_ops {
16900 unsigned long start_eip,
16901 unsigned long start_esp);
16902 #endif
16903-};
16904+} __no_const __no_randomize_layout;
16905
16906 struct pv_mmu_ops {
16907 unsigned long (*read_cr2)(void);
16908@@ -313,6 +313,7 @@ struct pv_mmu_ops {
16909 struct paravirt_callee_save make_pud;
16910
16911 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
16912+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
16913 #endif /* PAGETABLE_LEVELS == 4 */
16914 #endif /* PAGETABLE_LEVELS >= 3 */
16915
16916@@ -324,7 +325,13 @@ struct pv_mmu_ops {
16917 an mfn. We can tell which is which from the index. */
16918 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
16919 phys_addr_t phys, pgprot_t flags);
16920-};
16921+
16922+#ifdef CONFIG_PAX_KERNEXEC
16923+ unsigned long (*pax_open_kernel)(void);
16924+ unsigned long (*pax_close_kernel)(void);
16925+#endif
16926+
16927+} __no_randomize_layout;
16928
16929 struct arch_spinlock;
16930 #ifdef CONFIG_SMP
16931@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
16932 struct pv_lock_ops {
16933 struct paravirt_callee_save lock_spinning;
16934 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
16935-};
16936+} __no_randomize_layout;
16937
16938 /* This contains all the paravirt structures: we get a convenient
16939 * number for each function using the offset which we use to indicate
16940- * what to patch. */
16941+ * what to patch.
16942+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
16943+ */
16944+
16945 struct paravirt_patch_template {
16946 struct pv_init_ops pv_init_ops;
16947 struct pv_time_ops pv_time_ops;
16948@@ -349,7 +359,7 @@ struct paravirt_patch_template {
16949 struct pv_apic_ops pv_apic_ops;
16950 struct pv_mmu_ops pv_mmu_ops;
16951 struct pv_lock_ops pv_lock_ops;
16952-};
16953+} __no_randomize_layout;
16954
16955 extern struct pv_info pv_info;
16956 extern struct pv_init_ops pv_init_ops;
16957diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
16958index c4412e9..90e88c5 100644
16959--- a/arch/x86/include/asm/pgalloc.h
16960+++ b/arch/x86/include/asm/pgalloc.h
16961@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
16962 pmd_t *pmd, pte_t *pte)
16963 {
16964 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16965+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
16966+}
16967+
16968+static inline void pmd_populate_user(struct mm_struct *mm,
16969+ pmd_t *pmd, pte_t *pte)
16970+{
16971+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16972 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
16973 }
16974
16975@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
16976
16977 #ifdef CONFIG_X86_PAE
16978 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
16979+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
16980+{
16981+ pud_populate(mm, pudp, pmd);
16982+}
16983 #else /* !CONFIG_X86_PAE */
16984 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16985 {
16986 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16987 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
16988 }
16989+
16990+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16991+{
16992+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16993+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
16994+}
16995 #endif /* CONFIG_X86_PAE */
16996
16997 #if PAGETABLE_LEVELS > 3
16998@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
16999 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17000 }
17001
17002+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17003+{
17004+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17005+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17006+}
17007+
17008 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17009 {
17010 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17011diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17012index 3bf2dd0..23d2a9f 100644
17013--- a/arch/x86/include/asm/pgtable-2level.h
17014+++ b/arch/x86/include/asm/pgtable-2level.h
17015@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17016
17017 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17018 {
17019+ pax_open_kernel();
17020 *pmdp = pmd;
17021+ pax_close_kernel();
17022 }
17023
17024 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17025diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17026index 81bb91b..9392125 100644
17027--- a/arch/x86/include/asm/pgtable-3level.h
17028+++ b/arch/x86/include/asm/pgtable-3level.h
17029@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17030
17031 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17032 {
17033+ pax_open_kernel();
17034 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17035+ pax_close_kernel();
17036 }
17037
17038 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17039 {
17040+ pax_open_kernel();
17041 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17042+ pax_close_kernel();
17043 }
17044
17045 /*
17046diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17047index bbc8b12..f228861 100644
17048--- a/arch/x86/include/asm/pgtable.h
17049+++ b/arch/x86/include/asm/pgtable.h
17050@@ -45,6 +45,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17051
17052 #ifndef __PAGETABLE_PUD_FOLDED
17053 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17054+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17055 #define pgd_clear(pgd) native_pgd_clear(pgd)
17056 #endif
17057
17058@@ -82,12 +83,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17059
17060 #define arch_end_context_switch(prev) do {} while(0)
17061
17062+#define pax_open_kernel() native_pax_open_kernel()
17063+#define pax_close_kernel() native_pax_close_kernel()
17064 #endif /* CONFIG_PARAVIRT */
17065
17066+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17067+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17068+
17069+#ifdef CONFIG_PAX_KERNEXEC
17070+static inline unsigned long native_pax_open_kernel(void)
17071+{
17072+ unsigned long cr0;
17073+
17074+ preempt_disable();
17075+ barrier();
17076+ cr0 = read_cr0() ^ X86_CR0_WP;
17077+ BUG_ON(cr0 & X86_CR0_WP);
17078+ write_cr0(cr0);
17079+ return cr0 ^ X86_CR0_WP;
17080+}
17081+
17082+static inline unsigned long native_pax_close_kernel(void)
17083+{
17084+ unsigned long cr0;
17085+
17086+ cr0 = read_cr0() ^ X86_CR0_WP;
17087+ BUG_ON(!(cr0 & X86_CR0_WP));
17088+ write_cr0(cr0);
17089+ barrier();
17090+ preempt_enable_no_resched();
17091+ return cr0 ^ X86_CR0_WP;
17092+}
17093+#else
17094+static inline unsigned long native_pax_open_kernel(void) { return 0; }
17095+static inline unsigned long native_pax_close_kernel(void) { return 0; }
17096+#endif
17097+
17098 /*
17099 * The following only work if pte_present() is true.
17100 * Undefined behaviour if not..
17101 */
17102+static inline int pte_user(pte_t pte)
17103+{
17104+ return pte_val(pte) & _PAGE_USER;
17105+}
17106+
17107 static inline int pte_dirty(pte_t pte)
17108 {
17109 return pte_flags(pte) & _PAGE_DIRTY;
17110@@ -148,6 +188,11 @@ static inline unsigned long pud_pfn(pud_t pud)
17111 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
17112 }
17113
17114+static inline unsigned long pgd_pfn(pgd_t pgd)
17115+{
17116+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
17117+}
17118+
17119 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
17120
17121 static inline int pmd_large(pmd_t pte)
17122@@ -201,9 +246,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
17123 return pte_clear_flags(pte, _PAGE_RW);
17124 }
17125
17126+static inline pte_t pte_mkread(pte_t pte)
17127+{
17128+ return __pte(pte_val(pte) | _PAGE_USER);
17129+}
17130+
17131 static inline pte_t pte_mkexec(pte_t pte)
17132 {
17133- return pte_clear_flags(pte, _PAGE_NX);
17134+#ifdef CONFIG_X86_PAE
17135+ if (__supported_pte_mask & _PAGE_NX)
17136+ return pte_clear_flags(pte, _PAGE_NX);
17137+ else
17138+#endif
17139+ return pte_set_flags(pte, _PAGE_USER);
17140+}
17141+
17142+static inline pte_t pte_exprotect(pte_t pte)
17143+{
17144+#ifdef CONFIG_X86_PAE
17145+ if (__supported_pte_mask & _PAGE_NX)
17146+ return pte_set_flags(pte, _PAGE_NX);
17147+ else
17148+#endif
17149+ return pte_clear_flags(pte, _PAGE_USER);
17150 }
17151
17152 static inline pte_t pte_mkdirty(pte_t pte)
17153@@ -430,6 +495,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
17154 #endif
17155
17156 #ifndef __ASSEMBLY__
17157+
17158+#ifdef CONFIG_PAX_PER_CPU_PGD
17159+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
17160+enum cpu_pgd_type {kernel = 0, user = 1};
17161+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
17162+{
17163+ return cpu_pgd[cpu][type];
17164+}
17165+#endif
17166+
17167 #include <linux/mm_types.h>
17168 #include <linux/mmdebug.h>
17169 #include <linux/log2.h>
17170@@ -570,7 +645,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
17171 * Currently stuck as a macro due to indirect forward reference to
17172 * linux/mmzone.h's __section_mem_map_addr() definition:
17173 */
17174-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
17175+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
17176
17177 /* Find an entry in the second-level page table.. */
17178 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
17179@@ -610,7 +685,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
17180 * Currently stuck as a macro due to indirect forward reference to
17181 * linux/mmzone.h's __section_mem_map_addr() definition:
17182 */
17183-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
17184+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
17185
17186 /* to find an entry in a page-table-directory. */
17187 static inline unsigned long pud_index(unsigned long address)
17188@@ -625,7 +700,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
17189
17190 static inline int pgd_bad(pgd_t pgd)
17191 {
17192- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
17193+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
17194 }
17195
17196 static inline int pgd_none(pgd_t pgd)
17197@@ -648,7 +723,12 @@ static inline int pgd_none(pgd_t pgd)
17198 * pgd_offset() returns a (pgd_t *)
17199 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
17200 */
17201-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
17202+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
17203+
17204+#ifdef CONFIG_PAX_PER_CPU_PGD
17205+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
17206+#endif
17207+
17208 /*
17209 * a shortcut which implies the use of the kernel's pgd, instead
17210 * of a process's
17211@@ -659,6 +739,23 @@ static inline int pgd_none(pgd_t pgd)
17212 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
17213 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
17214
17215+#ifdef CONFIG_X86_32
17216+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
17217+#else
17218+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
17219+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
17220+
17221+#ifdef CONFIG_PAX_MEMORY_UDEREF
17222+#ifdef __ASSEMBLY__
17223+#define pax_user_shadow_base pax_user_shadow_base(%rip)
17224+#else
17225+extern unsigned long pax_user_shadow_base;
17226+extern pgdval_t clone_pgd_mask;
17227+#endif
17228+#endif
17229+
17230+#endif
17231+
17232 #ifndef __ASSEMBLY__
17233
17234 extern int direct_gbpages;
17235@@ -825,11 +922,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
17236 * dst and src can be on the same page, but the range must not overlap,
17237 * and must not cross a page boundary.
17238 */
17239-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
17240+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
17241 {
17242- memcpy(dst, src, count * sizeof(pgd_t));
17243+ pax_open_kernel();
17244+ while (count--)
17245+ *dst++ = *src++;
17246+ pax_close_kernel();
17247 }
17248
17249+#ifdef CONFIG_PAX_PER_CPU_PGD
17250+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
17251+#endif
17252+
17253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17254+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
17255+#else
17256+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
17257+#endif
17258+
17259 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
17260 static inline int page_level_shift(enum pg_level level)
17261 {
17262diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
17263index 9ee3221..b979c6b 100644
17264--- a/arch/x86/include/asm/pgtable_32.h
17265+++ b/arch/x86/include/asm/pgtable_32.h
17266@@ -25,9 +25,6 @@
17267 struct mm_struct;
17268 struct vm_area_struct;
17269
17270-extern pgd_t swapper_pg_dir[1024];
17271-extern pgd_t initial_page_table[1024];
17272-
17273 static inline void pgtable_cache_init(void) { }
17274 static inline void check_pgt_cache(void) { }
17275 void paging_init(void);
17276@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
17277 # include <asm/pgtable-2level.h>
17278 #endif
17279
17280+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
17281+extern pgd_t initial_page_table[PTRS_PER_PGD];
17282+#ifdef CONFIG_X86_PAE
17283+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
17284+#endif
17285+
17286 #if defined(CONFIG_HIGHPTE)
17287 #define pte_offset_map(dir, address) \
17288 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
17289@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
17290 /* Clear a kernel PTE and flush it from the TLB */
17291 #define kpte_clear_flush(ptep, vaddr) \
17292 do { \
17293+ pax_open_kernel(); \
17294 pte_clear(&init_mm, (vaddr), (ptep)); \
17295+ pax_close_kernel(); \
17296 __flush_tlb_one((vaddr)); \
17297 } while (0)
17298
17299 #endif /* !__ASSEMBLY__ */
17300
17301+#define HAVE_ARCH_UNMAPPED_AREA
17302+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
17303+
17304 /*
17305 * kern_addr_valid() is (1) for FLATMEM and (0) for
17306 * SPARSEMEM and DISCONTIGMEM
17307diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
17308index ed5903b..c7fe163 100644
17309--- a/arch/x86/include/asm/pgtable_32_types.h
17310+++ b/arch/x86/include/asm/pgtable_32_types.h
17311@@ -8,7 +8,7 @@
17312 */
17313 #ifdef CONFIG_X86_PAE
17314 # include <asm/pgtable-3level_types.h>
17315-# define PMD_SIZE (1UL << PMD_SHIFT)
17316+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
17317 # define PMD_MASK (~(PMD_SIZE - 1))
17318 #else
17319 # include <asm/pgtable-2level_types.h>
17320@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
17321 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
17322 #endif
17323
17324+#ifdef CONFIG_PAX_KERNEXEC
17325+#ifndef __ASSEMBLY__
17326+extern unsigned char MODULES_EXEC_VADDR[];
17327+extern unsigned char MODULES_EXEC_END[];
17328+#endif
17329+#include <asm/boot.h>
17330+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
17331+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
17332+#else
17333+#define ktla_ktva(addr) (addr)
17334+#define ktva_ktla(addr) (addr)
17335+#endif
17336+
17337 #define MODULES_VADDR VMALLOC_START
17338 #define MODULES_END VMALLOC_END
17339 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
17340diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
17341index e22c1db..23a625a 100644
17342--- a/arch/x86/include/asm/pgtable_64.h
17343+++ b/arch/x86/include/asm/pgtable_64.h
17344@@ -16,10 +16,14 @@
17345
17346 extern pud_t level3_kernel_pgt[512];
17347 extern pud_t level3_ident_pgt[512];
17348+extern pud_t level3_vmalloc_start_pgt[512];
17349+extern pud_t level3_vmalloc_end_pgt[512];
17350+extern pud_t level3_vmemmap_pgt[512];
17351+extern pud_t level2_vmemmap_pgt[512];
17352 extern pmd_t level2_kernel_pgt[512];
17353 extern pmd_t level2_fixmap_pgt[512];
17354-extern pmd_t level2_ident_pgt[512];
17355-extern pgd_t init_level4_pgt[];
17356+extern pmd_t level2_ident_pgt[512*2];
17357+extern pgd_t init_level4_pgt[512];
17358
17359 #define swapper_pg_dir init_level4_pgt
17360
17361@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17362
17363 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17364 {
17365+ pax_open_kernel();
17366 *pmdp = pmd;
17367+ pax_close_kernel();
17368 }
17369
17370 static inline void native_pmd_clear(pmd_t *pmd)
17371@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
17372
17373 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17374 {
17375+ pax_open_kernel();
17376 *pudp = pud;
17377+ pax_close_kernel();
17378 }
17379
17380 static inline void native_pud_clear(pud_t *pud)
17381@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
17382
17383 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
17384 {
17385+ pax_open_kernel();
17386+ *pgdp = pgd;
17387+ pax_close_kernel();
17388+}
17389+
17390+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17391+{
17392 *pgdp = pgd;
17393 }
17394
17395diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
17396index 2d88344..4679fc3 100644
17397--- a/arch/x86/include/asm/pgtable_64_types.h
17398+++ b/arch/x86/include/asm/pgtable_64_types.h
17399@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
17400 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
17401 #define MODULES_END _AC(0xffffffffff000000, UL)
17402 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
17403+#define MODULES_EXEC_VADDR MODULES_VADDR
17404+#define MODULES_EXEC_END MODULES_END
17405+
17406+#define ktla_ktva(addr) (addr)
17407+#define ktva_ktla(addr) (addr)
17408
17409 #define EARLY_DYNAMIC_PAGE_TABLES 64
17410
17411diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
17412index 0ecac25..7a15e09 100644
17413--- a/arch/x86/include/asm/pgtable_types.h
17414+++ b/arch/x86/include/asm/pgtable_types.h
17415@@ -16,13 +16,12 @@
17416 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
17417 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
17418 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17419-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
17420+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
17421 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
17422 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
17423 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
17424-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
17425-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
17426-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
17427+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
17428+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
17429 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
17430
17431 /* If _PAGE_BIT_PRESENT is clear, we use these: */
17432@@ -40,7 +39,6 @@
17433 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
17434 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
17435 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
17436-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
17437 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
17438 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
17439 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
17440@@ -87,8 +85,10 @@
17441
17442 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
17443 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
17444-#else
17445+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
17446 #define _PAGE_NX (_AT(pteval_t, 0))
17447+#else
17448+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
17449 #endif
17450
17451 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
17452@@ -146,6 +146,9 @@
17453 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
17454 _PAGE_ACCESSED)
17455
17456+#define PAGE_READONLY_NOEXEC PAGE_READONLY
17457+#define PAGE_SHARED_NOEXEC PAGE_SHARED
17458+
17459 #define __PAGE_KERNEL_EXEC \
17460 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
17461 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
17462@@ -156,7 +159,7 @@
17463 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
17464 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
17465 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
17466-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
17467+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
17468 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
17469 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
17470 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
17471@@ -218,8 +221,8 @@
17472 * bits are combined, this will alow user to access the high address mapped
17473 * VDSO in the presence of CONFIG_COMPAT_VDSO
17474 */
17475-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
17476-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
17477+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
17478+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
17479 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
17480 #endif
17481
17482@@ -257,7 +260,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
17483 {
17484 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
17485 }
17486+#endif
17487
17488+#if PAGETABLE_LEVELS == 3
17489+#include <asm-generic/pgtable-nopud.h>
17490+#endif
17491+
17492+#if PAGETABLE_LEVELS == 2
17493+#include <asm-generic/pgtable-nopmd.h>
17494+#endif
17495+
17496+#ifndef __ASSEMBLY__
17497 #if PAGETABLE_LEVELS > 3
17498 typedef struct { pudval_t pud; } pud_t;
17499
17500@@ -271,8 +284,6 @@ static inline pudval_t native_pud_val(pud_t pud)
17501 return pud.pud;
17502 }
17503 #else
17504-#include <asm-generic/pgtable-nopud.h>
17505-
17506 static inline pudval_t native_pud_val(pud_t pud)
17507 {
17508 return native_pgd_val(pud.pgd);
17509@@ -292,8 +303,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
17510 return pmd.pmd;
17511 }
17512 #else
17513-#include <asm-generic/pgtable-nopmd.h>
17514-
17515 static inline pmdval_t native_pmd_val(pmd_t pmd)
17516 {
17517 return native_pgd_val(pmd.pud.pgd);
17518@@ -333,7 +342,6 @@ typedef struct page *pgtable_t;
17519
17520 extern pteval_t __supported_pte_mask;
17521 extern void set_nx(void);
17522-extern int nx_enabled;
17523
17524 #define pgprot_writecombine pgprot_writecombine
17525 extern pgprot_t pgprot_writecombine(pgprot_t prot);
17526diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
17527index c8b0519..fd29e73 100644
17528--- a/arch/x86/include/asm/preempt.h
17529+++ b/arch/x86/include/asm/preempt.h
17530@@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
17531 */
17532 static __always_inline bool __preempt_count_dec_and_test(void)
17533 {
17534- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
17535+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
17536 }
17537
17538 /*
17539diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
17540index 7b034a4..4fe3e3f 100644
17541--- a/arch/x86/include/asm/processor.h
17542+++ b/arch/x86/include/asm/processor.h
17543@@ -128,7 +128,7 @@ struct cpuinfo_x86 {
17544 /* Index into per_cpu list: */
17545 u16 cpu_index;
17546 u32 microcode;
17547-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
17548+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
17549
17550 #define X86_VENDOR_INTEL 0
17551 #define X86_VENDOR_CYRIX 1
17552@@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
17553 : "memory");
17554 }
17555
17556+/* invpcid (%rdx),%rax */
17557+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
17558+
17559+#define INVPCID_SINGLE_ADDRESS 0UL
17560+#define INVPCID_SINGLE_CONTEXT 1UL
17561+#define INVPCID_ALL_GLOBAL 2UL
17562+#define INVPCID_ALL_MONGLOBAL 3UL
17563+
17564+#define PCID_KERNEL 0UL
17565+#define PCID_USER 1UL
17566+#define PCID_NOFLUSH (1UL << 63)
17567+
17568 static inline void load_cr3(pgd_t *pgdir)
17569 {
17570- write_cr3(__pa(pgdir));
17571+ write_cr3(__pa(pgdir) | PCID_KERNEL);
17572 }
17573
17574 #ifdef CONFIG_X86_32
17575@@ -283,7 +295,7 @@ struct tss_struct {
17576
17577 } ____cacheline_aligned;
17578
17579-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
17580+extern struct tss_struct init_tss[NR_CPUS];
17581
17582 /*
17583 * Save the original ist values for checking stack pointers during debugging
17584@@ -453,6 +465,7 @@ struct thread_struct {
17585 unsigned short ds;
17586 unsigned short fsindex;
17587 unsigned short gsindex;
17588+ unsigned short ss;
17589 #endif
17590 #ifdef CONFIG_X86_32
17591 unsigned long ip;
17592@@ -562,29 +575,8 @@ static inline void load_sp0(struct tss_struct *tss,
17593 extern unsigned long mmu_cr4_features;
17594 extern u32 *trampoline_cr4_features;
17595
17596-static inline void set_in_cr4(unsigned long mask)
17597-{
17598- unsigned long cr4;
17599-
17600- mmu_cr4_features |= mask;
17601- if (trampoline_cr4_features)
17602- *trampoline_cr4_features = mmu_cr4_features;
17603- cr4 = read_cr4();
17604- cr4 |= mask;
17605- write_cr4(cr4);
17606-}
17607-
17608-static inline void clear_in_cr4(unsigned long mask)
17609-{
17610- unsigned long cr4;
17611-
17612- mmu_cr4_features &= ~mask;
17613- if (trampoline_cr4_features)
17614- *trampoline_cr4_features = mmu_cr4_features;
17615- cr4 = read_cr4();
17616- cr4 &= ~mask;
17617- write_cr4(cr4);
17618-}
17619+extern void set_in_cr4(unsigned long mask);
17620+extern void clear_in_cr4(unsigned long mask);
17621
17622 typedef struct {
17623 unsigned long seg;
17624@@ -833,11 +825,18 @@ static inline void spin_lock_prefetch(const void *x)
17625 */
17626 #define TASK_SIZE PAGE_OFFSET
17627 #define TASK_SIZE_MAX TASK_SIZE
17628+
17629+#ifdef CONFIG_PAX_SEGMEXEC
17630+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
17631+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
17632+#else
17633 #define STACK_TOP TASK_SIZE
17634-#define STACK_TOP_MAX STACK_TOP
17635+#endif
17636+
17637+#define STACK_TOP_MAX TASK_SIZE
17638
17639 #define INIT_THREAD { \
17640- .sp0 = sizeof(init_stack) + (long)&init_stack, \
17641+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
17642 .vm86_info = NULL, \
17643 .sysenter_cs = __KERNEL_CS, \
17644 .io_bitmap_ptr = NULL, \
17645@@ -851,7 +850,7 @@ static inline void spin_lock_prefetch(const void *x)
17646 */
17647 #define INIT_TSS { \
17648 .x86_tss = { \
17649- .sp0 = sizeof(init_stack) + (long)&init_stack, \
17650+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
17651 .ss0 = __KERNEL_DS, \
17652 .ss1 = __KERNEL_CS, \
17653 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
17654@@ -862,11 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
17655 extern unsigned long thread_saved_pc(struct task_struct *tsk);
17656
17657 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
17658-#define KSTK_TOP(info) \
17659-({ \
17660- unsigned long *__ptr = (unsigned long *)(info); \
17661- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
17662-})
17663+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
17664
17665 /*
17666 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
17667@@ -881,7 +876,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
17668 #define task_pt_regs(task) \
17669 ({ \
17670 struct pt_regs *__regs__; \
17671- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
17672+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
17673 __regs__ - 1; \
17674 })
17675
17676@@ -891,13 +886,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
17677 /*
17678 * User space process size. 47bits minus one guard page.
17679 */
17680-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
17681+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
17682
17683 /* This decides where the kernel will search for a free chunk of vm
17684 * space during mmap's.
17685 */
17686 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
17687- 0xc0000000 : 0xFFFFe000)
17688+ 0xc0000000 : 0xFFFFf000)
17689
17690 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
17691 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
17692@@ -908,11 +903,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
17693 #define STACK_TOP_MAX TASK_SIZE_MAX
17694
17695 #define INIT_THREAD { \
17696- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
17697+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
17698 }
17699
17700 #define INIT_TSS { \
17701- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
17702+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
17703 }
17704
17705 /*
17706@@ -940,6 +935,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
17707 */
17708 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
17709
17710+#ifdef CONFIG_PAX_SEGMEXEC
17711+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
17712+#endif
17713+
17714 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
17715
17716 /* Get/set a process' ability to use the timestamp counter instruction */
17717@@ -966,7 +965,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
17718 return 0;
17719 }
17720
17721-extern unsigned long arch_align_stack(unsigned long sp);
17722+#define arch_align_stack(x) ((x) & ~0xfUL)
17723 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
17724
17725 void default_idle(void);
17726@@ -976,6 +975,6 @@ bool xen_set_default_idle(void);
17727 #define xen_set_default_idle 0
17728 #endif
17729
17730-void stop_this_cpu(void *dummy);
17731+void stop_this_cpu(void *dummy) __noreturn;
17732 void df_debug(struct pt_regs *regs, long error_code);
17733 #endif /* _ASM_X86_PROCESSOR_H */
17734diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
17735index 942a086..6c26446 100644
17736--- a/arch/x86/include/asm/ptrace.h
17737+++ b/arch/x86/include/asm/ptrace.h
17738@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
17739 }
17740
17741 /*
17742- * user_mode_vm(regs) determines whether a register set came from user mode.
17743+ * user_mode(regs) determines whether a register set came from user mode.
17744 * This is true if V8086 mode was enabled OR if the register set was from
17745 * protected mode with RPL-3 CS value. This tricky test checks that with
17746 * one comparison. Many places in the kernel can bypass this full check
17747- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
17748+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
17749+ * be used.
17750 */
17751-static inline int user_mode(struct pt_regs *regs)
17752+static inline int user_mode_novm(struct pt_regs *regs)
17753 {
17754 #ifdef CONFIG_X86_32
17755 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
17756 #else
17757- return !!(regs->cs & 3);
17758+ return !!(regs->cs & SEGMENT_RPL_MASK);
17759 #endif
17760 }
17761
17762-static inline int user_mode_vm(struct pt_regs *regs)
17763+static inline int user_mode(struct pt_regs *regs)
17764 {
17765 #ifdef CONFIG_X86_32
17766 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
17767 USER_RPL;
17768 #else
17769- return user_mode(regs);
17770+ return user_mode_novm(regs);
17771 #endif
17772 }
17773
17774@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
17775 #ifdef CONFIG_X86_64
17776 static inline bool user_64bit_mode(struct pt_regs *regs)
17777 {
17778+ unsigned long cs = regs->cs & 0xffff;
17779 #ifndef CONFIG_PARAVIRT
17780 /*
17781 * On non-paravirt systems, this is the only long mode CPL 3
17782 * selector. We do not allow long mode selectors in the LDT.
17783 */
17784- return regs->cs == __USER_CS;
17785+ return cs == __USER_CS;
17786 #else
17787 /* Headers are too twisted for this to go in paravirt.h. */
17788- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
17789+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
17790 #endif
17791 }
17792
17793@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
17794 * Traps from the kernel do not save sp and ss.
17795 * Use the helper function to retrieve sp.
17796 */
17797- if (offset == offsetof(struct pt_regs, sp) &&
17798- regs->cs == __KERNEL_CS)
17799- return kernel_stack_pointer(regs);
17800+ if (offset == offsetof(struct pt_regs, sp)) {
17801+ unsigned long cs = regs->cs & 0xffff;
17802+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
17803+ return kernel_stack_pointer(regs);
17804+ }
17805 #endif
17806 return *(unsigned long *)((unsigned long)regs + offset);
17807 }
17808diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
17809index 9c6b890..5305f53 100644
17810--- a/arch/x86/include/asm/realmode.h
17811+++ b/arch/x86/include/asm/realmode.h
17812@@ -22,16 +22,14 @@ struct real_mode_header {
17813 #endif
17814 /* APM/BIOS reboot */
17815 u32 machine_real_restart_asm;
17816-#ifdef CONFIG_X86_64
17817 u32 machine_real_restart_seg;
17818-#endif
17819 };
17820
17821 /* This must match data at trampoline_32/64.S */
17822 struct trampoline_header {
17823 #ifdef CONFIG_X86_32
17824 u32 start;
17825- u16 gdt_pad;
17826+ u16 boot_cs;
17827 u16 gdt_limit;
17828 u32 gdt_base;
17829 #else
17830diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
17831index a82c4f1..ac45053 100644
17832--- a/arch/x86/include/asm/reboot.h
17833+++ b/arch/x86/include/asm/reboot.h
17834@@ -6,13 +6,13 @@
17835 struct pt_regs;
17836
17837 struct machine_ops {
17838- void (*restart)(char *cmd);
17839- void (*halt)(void);
17840- void (*power_off)(void);
17841+ void (* __noreturn restart)(char *cmd);
17842+ void (* __noreturn halt)(void);
17843+ void (* __noreturn power_off)(void);
17844 void (*shutdown)(void);
17845 void (*crash_shutdown)(struct pt_regs *);
17846- void (*emergency_restart)(void);
17847-};
17848+ void (* __noreturn emergency_restart)(void);
17849+} __no_const;
17850
17851 extern struct machine_ops machine_ops;
17852
17853diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
17854index 8f7866a..e442f20 100644
17855--- a/arch/x86/include/asm/rmwcc.h
17856+++ b/arch/x86/include/asm/rmwcc.h
17857@@ -3,7 +3,34 @@
17858
17859 #ifdef CC_HAVE_ASM_GOTO
17860
17861-#define __GEN_RMWcc(fullop, var, cc, ...) \
17862+#ifdef CONFIG_PAX_REFCOUNT
17863+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
17864+do { \
17865+ asm_volatile_goto (fullop \
17866+ ";jno 0f\n" \
17867+ fullantiop \
17868+ ";int $4\n0:\n" \
17869+ _ASM_EXTABLE(0b, 0b) \
17870+ ";j" cc " %l[cc_label]" \
17871+ : : "m" (var), ## __VA_ARGS__ \
17872+ : "memory" : cc_label); \
17873+ return 0; \
17874+cc_label: \
17875+ return 1; \
17876+} while (0)
17877+#else
17878+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
17879+do { \
17880+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
17881+ : : "m" (var), ## __VA_ARGS__ \
17882+ : "memory" : cc_label); \
17883+ return 0; \
17884+cc_label: \
17885+ return 1; \
17886+} while (0)
17887+#endif
17888+
17889+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
17890 do { \
17891 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
17892 : : "m" (var), ## __VA_ARGS__ \
17893@@ -13,15 +40,46 @@ cc_label: \
17894 return 1; \
17895 } while (0)
17896
17897-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
17898- __GEN_RMWcc(op " " arg0, var, cc)
17899+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
17900+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
17901
17902-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
17903- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
17904+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
17905+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
17906+
17907+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
17908+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
17909+
17910+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
17911+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
17912
17913 #else /* !CC_HAVE_ASM_GOTO */
17914
17915-#define __GEN_RMWcc(fullop, var, cc, ...) \
17916+#ifdef CONFIG_PAX_REFCOUNT
17917+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
17918+do { \
17919+ char c; \
17920+ asm volatile (fullop \
17921+ ";jno 0f\n" \
17922+ fullantiop \
17923+ ";int $4\n0:\n" \
17924+ _ASM_EXTABLE(0b, 0b) \
17925+ "; set" cc " %1" \
17926+ : "+m" (var), "=qm" (c) \
17927+ : __VA_ARGS__ : "memory"); \
17928+ return c != 0; \
17929+} while (0)
17930+#else
17931+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
17932+do { \
17933+ char c; \
17934+ asm volatile (fullop "; set" cc " %1" \
17935+ : "+m" (var), "=qm" (c) \
17936+ : __VA_ARGS__ : "memory"); \
17937+ return c != 0; \
17938+} while (0)
17939+#endif
17940+
17941+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
17942 do { \
17943 char c; \
17944 asm volatile (fullop "; set" cc " %1" \
17945@@ -30,11 +88,17 @@ do { \
17946 return c != 0; \
17947 } while (0)
17948
17949-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
17950- __GEN_RMWcc(op " " arg0, var, cc)
17951+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
17952+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
17953+
17954+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
17955+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
17956+
17957+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
17958+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
17959
17960-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
17961- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
17962+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
17963+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
17964
17965 #endif /* CC_HAVE_ASM_GOTO */
17966
17967diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
17968index cad82c9..2e5c5c1 100644
17969--- a/arch/x86/include/asm/rwsem.h
17970+++ b/arch/x86/include/asm/rwsem.h
17971@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
17972 {
17973 asm volatile("# beginning down_read\n\t"
17974 LOCK_PREFIX _ASM_INC "(%1)\n\t"
17975+
17976+#ifdef CONFIG_PAX_REFCOUNT
17977+ "jno 0f\n"
17978+ LOCK_PREFIX _ASM_DEC "(%1)\n"
17979+ "int $4\n0:\n"
17980+ _ASM_EXTABLE(0b, 0b)
17981+#endif
17982+
17983 /* adds 0x00000001 */
17984 " jns 1f\n"
17985 " call call_rwsem_down_read_failed\n"
17986@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
17987 "1:\n\t"
17988 " mov %1,%2\n\t"
17989 " add %3,%2\n\t"
17990+
17991+#ifdef CONFIG_PAX_REFCOUNT
17992+ "jno 0f\n"
17993+ "sub %3,%2\n"
17994+ "int $4\n0:\n"
17995+ _ASM_EXTABLE(0b, 0b)
17996+#endif
17997+
17998 " jle 2f\n\t"
17999 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18000 " jnz 1b\n\t"
18001@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18002 long tmp;
18003 asm volatile("# beginning down_write\n\t"
18004 LOCK_PREFIX " xadd %1,(%2)\n\t"
18005+
18006+#ifdef CONFIG_PAX_REFCOUNT
18007+ "jno 0f\n"
18008+ "mov %1,(%2)\n"
18009+ "int $4\n0:\n"
18010+ _ASM_EXTABLE(0b, 0b)
18011+#endif
18012+
18013 /* adds 0xffff0001, returns the old value */
18014 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18015 /* was the active mask 0 before? */
18016@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18017 long tmp;
18018 asm volatile("# beginning __up_read\n\t"
18019 LOCK_PREFIX " xadd %1,(%2)\n\t"
18020+
18021+#ifdef CONFIG_PAX_REFCOUNT
18022+ "jno 0f\n"
18023+ "mov %1,(%2)\n"
18024+ "int $4\n0:\n"
18025+ _ASM_EXTABLE(0b, 0b)
18026+#endif
18027+
18028 /* subtracts 1, returns the old value */
18029 " jns 1f\n\t"
18030 " call call_rwsem_wake\n" /* expects old value in %edx */
18031@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18032 long tmp;
18033 asm volatile("# beginning __up_write\n\t"
18034 LOCK_PREFIX " xadd %1,(%2)\n\t"
18035+
18036+#ifdef CONFIG_PAX_REFCOUNT
18037+ "jno 0f\n"
18038+ "mov %1,(%2)\n"
18039+ "int $4\n0:\n"
18040+ _ASM_EXTABLE(0b, 0b)
18041+#endif
18042+
18043 /* subtracts 0xffff0001, returns the old value */
18044 " jns 1f\n\t"
18045 " call call_rwsem_wake\n" /* expects old value in %edx */
18046@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18047 {
18048 asm volatile("# beginning __downgrade_write\n\t"
18049 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18050+
18051+#ifdef CONFIG_PAX_REFCOUNT
18052+ "jno 0f\n"
18053+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18054+ "int $4\n0:\n"
18055+ _ASM_EXTABLE(0b, 0b)
18056+#endif
18057+
18058 /*
18059 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18060 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18061@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18062 */
18063 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18064 {
18065- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18066+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18067+
18068+#ifdef CONFIG_PAX_REFCOUNT
18069+ "jno 0f\n"
18070+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18071+ "int $4\n0:\n"
18072+ _ASM_EXTABLE(0b, 0b)
18073+#endif
18074+
18075 : "+m" (sem->count)
18076 : "er" (delta));
18077 }
18078@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18079 */
18080 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18081 {
18082- return delta + xadd(&sem->count, delta);
18083+ return delta + xadd_check_overflow(&sem->count, delta);
18084 }
18085
18086 #endif /* __KERNEL__ */
18087diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18088index 6f1c3a8..7744f19 100644
18089--- a/arch/x86/include/asm/segment.h
18090+++ b/arch/x86/include/asm/segment.h
18091@@ -64,10 +64,15 @@
18092 * 26 - ESPFIX small SS
18093 * 27 - per-cpu [ offset to per-cpu data area ]
18094 * 28 - stack_canary-20 [ for stack protector ]
18095- * 29 - unused
18096- * 30 - unused
18097+ * 29 - PCI BIOS CS
18098+ * 30 - PCI BIOS DS
18099 * 31 - TSS for double fault handler
18100 */
18101+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
18102+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
18103+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
18104+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
18105+
18106 #define GDT_ENTRY_TLS_MIN 6
18107 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
18108
18109@@ -79,6 +84,8 @@
18110
18111 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
18112
18113+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
18114+
18115 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
18116
18117 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
18118@@ -104,6 +111,12 @@
18119 #define __KERNEL_STACK_CANARY 0
18120 #endif
18121
18122+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
18123+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
18124+
18125+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
18126+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
18127+
18128 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
18129
18130 /*
18131@@ -141,7 +154,7 @@
18132 */
18133
18134 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
18135-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
18136+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
18137
18138
18139 #else
18140@@ -165,6 +178,8 @@
18141 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
18142 #define __USER32_DS __USER_DS
18143
18144+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
18145+
18146 #define GDT_ENTRY_TSS 8 /* needs two entries */
18147 #define GDT_ENTRY_LDT 10 /* needs two entries */
18148 #define GDT_ENTRY_TLS_MIN 12
18149@@ -173,6 +188,8 @@
18150 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
18151 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
18152
18153+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
18154+
18155 /* TLS indexes for 64bit - hardcoded in arch_prctl */
18156 #define FS_TLS 0
18157 #define GS_TLS 1
18158@@ -180,12 +197,14 @@
18159 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
18160 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
18161
18162-#define GDT_ENTRIES 16
18163+#define GDT_ENTRIES 17
18164
18165 #endif
18166
18167 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
18168+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
18169 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
18170+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
18171 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
18172 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
18173 #ifndef CONFIG_PARAVIRT
18174@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
18175 {
18176 unsigned long __limit;
18177 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
18178- return __limit + 1;
18179+ return __limit;
18180 }
18181
18182 #endif /* !__ASSEMBLY__ */
18183diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
18184index 8d3120f..352b440 100644
18185--- a/arch/x86/include/asm/smap.h
18186+++ b/arch/x86/include/asm/smap.h
18187@@ -25,11 +25,40 @@
18188
18189 #include <asm/alternative-asm.h>
18190
18191+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18192+#define ASM_PAX_OPEN_USERLAND \
18193+ 661: jmp 663f; \
18194+ .pushsection .altinstr_replacement, "a" ; \
18195+ 662: pushq %rax; nop; \
18196+ .popsection ; \
18197+ .pushsection .altinstructions, "a" ; \
18198+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
18199+ .popsection ; \
18200+ call __pax_open_userland; \
18201+ popq %rax; \
18202+ 663:
18203+
18204+#define ASM_PAX_CLOSE_USERLAND \
18205+ 661: jmp 663f; \
18206+ .pushsection .altinstr_replacement, "a" ; \
18207+ 662: pushq %rax; nop; \
18208+ .popsection; \
18209+ .pushsection .altinstructions, "a" ; \
18210+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
18211+ .popsection; \
18212+ call __pax_close_userland; \
18213+ popq %rax; \
18214+ 663:
18215+#else
18216+#define ASM_PAX_OPEN_USERLAND
18217+#define ASM_PAX_CLOSE_USERLAND
18218+#endif
18219+
18220 #ifdef CONFIG_X86_SMAP
18221
18222 #define ASM_CLAC \
18223 661: ASM_NOP3 ; \
18224- .pushsection .altinstr_replacement, "ax" ; \
18225+ .pushsection .altinstr_replacement, "a" ; \
18226 662: __ASM_CLAC ; \
18227 .popsection ; \
18228 .pushsection .altinstructions, "a" ; \
18229@@ -38,7 +67,7 @@
18230
18231 #define ASM_STAC \
18232 661: ASM_NOP3 ; \
18233- .pushsection .altinstr_replacement, "ax" ; \
18234+ .pushsection .altinstr_replacement, "a" ; \
18235 662: __ASM_STAC ; \
18236 .popsection ; \
18237 .pushsection .altinstructions, "a" ; \
18238@@ -56,6 +85,37 @@
18239
18240 #include <asm/alternative.h>
18241
18242+#define __HAVE_ARCH_PAX_OPEN_USERLAND
18243+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
18244+
18245+extern void __pax_open_userland(void);
18246+static __always_inline unsigned long pax_open_userland(void)
18247+{
18248+
18249+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18250+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
18251+ :
18252+ : [open] "i" (__pax_open_userland)
18253+ : "memory", "rax");
18254+#endif
18255+
18256+ return 0;
18257+}
18258+
18259+extern void __pax_close_userland(void);
18260+static __always_inline unsigned long pax_close_userland(void)
18261+{
18262+
18263+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18264+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
18265+ :
18266+ : [close] "i" (__pax_close_userland)
18267+ : "memory", "rax");
18268+#endif
18269+
18270+ return 0;
18271+}
18272+
18273 #ifdef CONFIG_X86_SMAP
18274
18275 static __always_inline void clac(void)
18276diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
18277index 4137890..03fa172 100644
18278--- a/arch/x86/include/asm/smp.h
18279+++ b/arch/x86/include/asm/smp.h
18280@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
18281 /* cpus sharing the last level cache: */
18282 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
18283 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
18284-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
18285+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
18286
18287 static inline struct cpumask *cpu_sibling_mask(int cpu)
18288 {
18289@@ -79,7 +79,7 @@ struct smp_ops {
18290
18291 void (*send_call_func_ipi)(const struct cpumask *mask);
18292 void (*send_call_func_single_ipi)(int cpu);
18293-};
18294+} __no_const;
18295
18296 /* Globals due to paravirt */
18297 extern void set_cpu_sibling_map(int cpu);
18298@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
18299 extern int safe_smp_processor_id(void);
18300
18301 #elif defined(CONFIG_X86_64_SMP)
18302-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
18303-
18304-#define stack_smp_processor_id() \
18305-({ \
18306- struct thread_info *ti; \
18307- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
18308- ti->cpu; \
18309-})
18310+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
18311+#define stack_smp_processor_id() raw_smp_processor_id()
18312 #define safe_smp_processor_id() smp_processor_id()
18313
18314 #endif
18315diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
18316index bf156de..1a782ab 100644
18317--- a/arch/x86/include/asm/spinlock.h
18318+++ b/arch/x86/include/asm/spinlock.h
18319@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
18320 static inline void arch_read_lock(arch_rwlock_t *rw)
18321 {
18322 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
18323+
18324+#ifdef CONFIG_PAX_REFCOUNT
18325+ "jno 0f\n"
18326+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
18327+ "int $4\n0:\n"
18328+ _ASM_EXTABLE(0b, 0b)
18329+#endif
18330+
18331 "jns 1f\n"
18332 "call __read_lock_failed\n\t"
18333 "1:\n"
18334@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
18335 static inline void arch_write_lock(arch_rwlock_t *rw)
18336 {
18337 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
18338+
18339+#ifdef CONFIG_PAX_REFCOUNT
18340+ "jno 0f\n"
18341+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
18342+ "int $4\n0:\n"
18343+ _ASM_EXTABLE(0b, 0b)
18344+#endif
18345+
18346 "jz 1f\n"
18347 "call __write_lock_failed\n\t"
18348 "1:\n"
18349@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
18350
18351 static inline void arch_read_unlock(arch_rwlock_t *rw)
18352 {
18353- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
18354+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
18355+
18356+#ifdef CONFIG_PAX_REFCOUNT
18357+ "jno 0f\n"
18358+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
18359+ "int $4\n0:\n"
18360+ _ASM_EXTABLE(0b, 0b)
18361+#endif
18362+
18363 :"+m" (rw->lock) : : "memory");
18364 }
18365
18366 static inline void arch_write_unlock(arch_rwlock_t *rw)
18367 {
18368- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
18369+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
18370+
18371+#ifdef CONFIG_PAX_REFCOUNT
18372+ "jno 0f\n"
18373+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
18374+ "int $4\n0:\n"
18375+ _ASM_EXTABLE(0b, 0b)
18376+#endif
18377+
18378 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
18379 }
18380
18381diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
18382index 6a99859..03cb807 100644
18383--- a/arch/x86/include/asm/stackprotector.h
18384+++ b/arch/x86/include/asm/stackprotector.h
18385@@ -47,7 +47,7 @@
18386 * head_32 for boot CPU and setup_per_cpu_areas() for others.
18387 */
18388 #define GDT_STACK_CANARY_INIT \
18389- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
18390+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
18391
18392 /*
18393 * Initialize the stackprotector canary value.
18394@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
18395
18396 static inline void load_stack_canary_segment(void)
18397 {
18398-#ifdef CONFIG_X86_32
18399+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18400 asm volatile ("mov %0, %%gs" : : "r" (0));
18401 #endif
18402 }
18403diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
18404index 70bbe39..4ae2bd4 100644
18405--- a/arch/x86/include/asm/stacktrace.h
18406+++ b/arch/x86/include/asm/stacktrace.h
18407@@ -11,28 +11,20 @@
18408
18409 extern int kstack_depth_to_print;
18410
18411-struct thread_info;
18412+struct task_struct;
18413 struct stacktrace_ops;
18414
18415-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
18416- unsigned long *stack,
18417- unsigned long bp,
18418- const struct stacktrace_ops *ops,
18419- void *data,
18420- unsigned long *end,
18421- int *graph);
18422+typedef unsigned long walk_stack_t(struct task_struct *task,
18423+ void *stack_start,
18424+ unsigned long *stack,
18425+ unsigned long bp,
18426+ const struct stacktrace_ops *ops,
18427+ void *data,
18428+ unsigned long *end,
18429+ int *graph);
18430
18431-extern unsigned long
18432-print_context_stack(struct thread_info *tinfo,
18433- unsigned long *stack, unsigned long bp,
18434- const struct stacktrace_ops *ops, void *data,
18435- unsigned long *end, int *graph);
18436-
18437-extern unsigned long
18438-print_context_stack_bp(struct thread_info *tinfo,
18439- unsigned long *stack, unsigned long bp,
18440- const struct stacktrace_ops *ops, void *data,
18441- unsigned long *end, int *graph);
18442+extern walk_stack_t print_context_stack;
18443+extern walk_stack_t print_context_stack_bp;
18444
18445 /* Generic stack tracer with callbacks */
18446
18447@@ -40,7 +32,7 @@ struct stacktrace_ops {
18448 void (*address)(void *data, unsigned long address, int reliable);
18449 /* On negative return stop dumping */
18450 int (*stack)(void *data, char *name);
18451- walk_stack_t walk_stack;
18452+ walk_stack_t *walk_stack;
18453 };
18454
18455 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
18456diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
18457index d7f3b3b..3cc39f1 100644
18458--- a/arch/x86/include/asm/switch_to.h
18459+++ b/arch/x86/include/asm/switch_to.h
18460@@ -108,7 +108,7 @@ do { \
18461 "call __switch_to\n\t" \
18462 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
18463 __switch_canary \
18464- "movq %P[thread_info](%%rsi),%%r8\n\t" \
18465+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
18466 "movq %%rax,%%rdi\n\t" \
18467 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
18468 "jnz ret_from_fork\n\t" \
18469@@ -119,7 +119,7 @@ do { \
18470 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
18471 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
18472 [_tif_fork] "i" (_TIF_FORK), \
18473- [thread_info] "i" (offsetof(struct task_struct, stack)), \
18474+ [thread_info] "m" (current_tinfo), \
18475 [current_task] "m" (current_task) \
18476 __switch_canary_iparam \
18477 : "memory", "cc" __EXTRA_CLOBBER)
18478diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
18479index 3ba3de4..6c113b2 100644
18480--- a/arch/x86/include/asm/thread_info.h
18481+++ b/arch/x86/include/asm/thread_info.h
18482@@ -10,6 +10,7 @@
18483 #include <linux/compiler.h>
18484 #include <asm/page.h>
18485 #include <asm/types.h>
18486+#include <asm/percpu.h>
18487
18488 /*
18489 * low level task data that entry.S needs immediate access to
18490@@ -23,7 +24,6 @@ struct exec_domain;
18491 #include <linux/atomic.h>
18492
18493 struct thread_info {
18494- struct task_struct *task; /* main task structure */
18495 struct exec_domain *exec_domain; /* execution domain */
18496 __u32 flags; /* low level flags */
18497 __u32 status; /* thread synchronous flags */
18498@@ -32,19 +32,13 @@ struct thread_info {
18499 mm_segment_t addr_limit;
18500 struct restart_block restart_block;
18501 void __user *sysenter_return;
18502-#ifdef CONFIG_X86_32
18503- unsigned long previous_esp; /* ESP of the previous stack in
18504- case of nested (IRQ) stacks
18505- */
18506- __u8 supervisor_stack[0];
18507-#endif
18508+ unsigned long lowest_stack;
18509 unsigned int sig_on_uaccess_error:1;
18510 unsigned int uaccess_err:1; /* uaccess failed */
18511 };
18512
18513-#define INIT_THREAD_INFO(tsk) \
18514+#define INIT_THREAD_INFO \
18515 { \
18516- .task = &tsk, \
18517 .exec_domain = &default_exec_domain, \
18518 .flags = 0, \
18519 .cpu = 0, \
18520@@ -55,7 +49,7 @@ struct thread_info {
18521 }, \
18522 }
18523
18524-#define init_thread_info (init_thread_union.thread_info)
18525+#define init_thread_info (init_thread_union.stack)
18526 #define init_stack (init_thread_union.stack)
18527
18528 #else /* !__ASSEMBLY__ */
18529@@ -95,6 +89,7 @@ struct thread_info {
18530 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
18531 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
18532 #define TIF_X32 30 /* 32-bit native x86-64 binary */
18533+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
18534
18535 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
18536 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
18537@@ -118,17 +113,18 @@ struct thread_info {
18538 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
18539 #define _TIF_ADDR32 (1 << TIF_ADDR32)
18540 #define _TIF_X32 (1 << TIF_X32)
18541+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
18542
18543 /* work to do in syscall_trace_enter() */
18544 #define _TIF_WORK_SYSCALL_ENTRY \
18545 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
18546 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
18547- _TIF_NOHZ)
18548+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
18549
18550 /* work to do in syscall_trace_leave() */
18551 #define _TIF_WORK_SYSCALL_EXIT \
18552 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
18553- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
18554+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
18555
18556 /* work to do on interrupt/exception return */
18557 #define _TIF_WORK_MASK \
18558@@ -139,7 +135,7 @@ struct thread_info {
18559 /* work to do on any return to user space */
18560 #define _TIF_ALLWORK_MASK \
18561 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
18562- _TIF_NOHZ)
18563+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
18564
18565 /* Only used for 64 bit */
18566 #define _TIF_DO_NOTIFY_MASK \
18567@@ -153,45 +149,40 @@ struct thread_info {
18568 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
18569 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
18570
18571-#ifdef CONFIG_X86_32
18572-
18573-#define STACK_WARN (THREAD_SIZE/8)
18574-/*
18575- * macros/functions for gaining access to the thread information structure
18576- *
18577- * preempt_count needs to be 1 initially, until the scheduler is functional.
18578- */
18579-#ifndef __ASSEMBLY__
18580-
18581-
18582-/* how to get the current stack pointer from C */
18583-register unsigned long current_stack_pointer asm("esp") __used;
18584-
18585-/* how to get the thread information struct from C */
18586-static inline struct thread_info *current_thread_info(void)
18587-{
18588- return (struct thread_info *)
18589- (current_stack_pointer & ~(THREAD_SIZE - 1));
18590-}
18591-
18592-#else /* !__ASSEMBLY__ */
18593-
18594+#ifdef __ASSEMBLY__
18595 /* how to get the thread information struct from ASM */
18596 #define GET_THREAD_INFO(reg) \
18597- movl $-THREAD_SIZE, reg; \
18598- andl %esp, reg
18599+ mov PER_CPU_VAR(current_tinfo), reg
18600
18601 /* use this one if reg already contains %esp */
18602-#define GET_THREAD_INFO_WITH_ESP(reg) \
18603- andl $-THREAD_SIZE, reg
18604+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
18605+#else
18606+/* how to get the thread information struct from C */
18607+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
18608+
18609+static __always_inline struct thread_info *current_thread_info(void)
18610+{
18611+ return this_cpu_read_stable(current_tinfo);
18612+}
18613+#endif
18614+
18615+#ifdef CONFIG_X86_32
18616+
18617+#define STACK_WARN (THREAD_SIZE/8)
18618+/*
18619+ * macros/functions for gaining access to the thread information structure
18620+ *
18621+ * preempt_count needs to be 1 initially, until the scheduler is functional.
18622+ */
18623+#ifndef __ASSEMBLY__
18624+
18625+/* how to get the current stack pointer from C */
18626+register unsigned long current_stack_pointer asm("esp") __used;
18627
18628 #endif
18629
18630 #else /* X86_32 */
18631
18632-#include <asm/percpu.h>
18633-#define KERNEL_STACK_OFFSET (5*8)
18634-
18635 /*
18636 * macros/functions for gaining access to the thread information structure
18637 * preempt_count needs to be 1 initially, until the scheduler is functional.
18638@@ -199,27 +190,8 @@ static inline struct thread_info *current_thread_info(void)
18639 #ifndef __ASSEMBLY__
18640 DECLARE_PER_CPU(unsigned long, kernel_stack);
18641
18642-static inline struct thread_info *current_thread_info(void)
18643-{
18644- struct thread_info *ti;
18645- ti = (void *)(this_cpu_read_stable(kernel_stack) +
18646- KERNEL_STACK_OFFSET - THREAD_SIZE);
18647- return ti;
18648-}
18649-
18650-#else /* !__ASSEMBLY__ */
18651-
18652-/* how to get the thread information struct from ASM */
18653-#define GET_THREAD_INFO(reg) \
18654- movq PER_CPU_VAR(kernel_stack),reg ; \
18655- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
18656-
18657-/*
18658- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
18659- * a certain register (to be used in assembler memory operands).
18660- */
18661-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
18662-
18663+/* how to get the current stack pointer from C */
18664+register unsigned long current_stack_pointer asm("rsp") __used;
18665 #endif
18666
18667 #endif /* !X86_32 */
18668@@ -278,5 +250,12 @@ static inline bool is_ia32_task(void)
18669 extern void arch_task_cache_init(void);
18670 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
18671 extern void arch_release_task_struct(struct task_struct *tsk);
18672+
18673+#define __HAVE_THREAD_FUNCTIONS
18674+#define task_thread_info(task) (&(task)->tinfo)
18675+#define task_stack_page(task) ((task)->stack)
18676+#define setup_thread_stack(p, org) do {} while (0)
18677+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
18678+
18679 #endif
18680 #endif /* _ASM_X86_THREAD_INFO_H */
18681diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
18682index e6d90ba..f81f114 100644
18683--- a/arch/x86/include/asm/tlbflush.h
18684+++ b/arch/x86/include/asm/tlbflush.h
18685@@ -17,18 +17,44 @@
18686
18687 static inline void __native_flush_tlb(void)
18688 {
18689+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18690+ u64 descriptor[2];
18691+
18692+ descriptor[0] = PCID_KERNEL;
18693+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
18694+ return;
18695+ }
18696+
18697+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18698+ if (static_cpu_has(X86_FEATURE_PCID)) {
18699+ unsigned int cpu = raw_get_cpu();
18700+
18701+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
18702+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
18703+ raw_put_cpu_no_resched();
18704+ return;
18705+ }
18706+#endif
18707+
18708 native_write_cr3(native_read_cr3());
18709 }
18710
18711 static inline void __native_flush_tlb_global_irq_disabled(void)
18712 {
18713- unsigned long cr4;
18714+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18715+ u64 descriptor[2];
18716
18717- cr4 = native_read_cr4();
18718- /* clear PGE */
18719- native_write_cr4(cr4 & ~X86_CR4_PGE);
18720- /* write old PGE again and flush TLBs */
18721- native_write_cr4(cr4);
18722+ descriptor[0] = PCID_KERNEL;
18723+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
18724+ } else {
18725+ unsigned long cr4;
18726+
18727+ cr4 = native_read_cr4();
18728+ /* clear PGE */
18729+ native_write_cr4(cr4 & ~X86_CR4_PGE);
18730+ /* write old PGE again and flush TLBs */
18731+ native_write_cr4(cr4);
18732+ }
18733 }
18734
18735 static inline void __native_flush_tlb_global(void)
18736@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
18737
18738 static inline void __native_flush_tlb_single(unsigned long addr)
18739 {
18740+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18741+ u64 descriptor[2];
18742+
18743+ descriptor[0] = PCID_KERNEL;
18744+ descriptor[1] = addr;
18745+
18746+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18747+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
18748+ if (addr < TASK_SIZE_MAX)
18749+ descriptor[1] += pax_user_shadow_base;
18750+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
18751+ }
18752+
18753+ descriptor[0] = PCID_USER;
18754+ descriptor[1] = addr;
18755+#endif
18756+
18757+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
18758+ return;
18759+ }
18760+
18761+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18762+ if (static_cpu_has(X86_FEATURE_PCID)) {
18763+ unsigned int cpu = raw_get_cpu();
18764+
18765+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
18766+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
18767+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
18768+ raw_put_cpu_no_resched();
18769+
18770+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
18771+ addr += pax_user_shadow_base;
18772+ }
18773+#endif
18774+
18775 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
18776 }
18777
18778diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
18779index 8ec57c0..3ee58c9 100644
18780--- a/arch/x86/include/asm/uaccess.h
18781+++ b/arch/x86/include/asm/uaccess.h
18782@@ -7,6 +7,7 @@
18783 #include <linux/compiler.h>
18784 #include <linux/thread_info.h>
18785 #include <linux/string.h>
18786+#include <linux/spinlock.h>
18787 #include <asm/asm.h>
18788 #include <asm/page.h>
18789 #include <asm/smap.h>
18790@@ -29,7 +30,12 @@
18791
18792 #define get_ds() (KERNEL_DS)
18793 #define get_fs() (current_thread_info()->addr_limit)
18794+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18795+void __set_fs(mm_segment_t x);
18796+void set_fs(mm_segment_t x);
18797+#else
18798 #define set_fs(x) (current_thread_info()->addr_limit = (x))
18799+#endif
18800
18801 #define segment_eq(a, b) ((a).seg == (b).seg)
18802
18803@@ -77,8 +83,34 @@
18804 * checks that the pointer is in the user space range - after calling
18805 * this function, memory access functions may still return -EFAULT.
18806 */
18807-#define access_ok(type, addr, size) \
18808- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
18809+extern int _cond_resched(void);
18810+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
18811+#define access_ok(type, addr, size) \
18812+({ \
18813+ long __size = size; \
18814+ unsigned long __addr = (unsigned long)addr; \
18815+ unsigned long __addr_ao = __addr & PAGE_MASK; \
18816+ unsigned long __end_ao = __addr + __size - 1; \
18817+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
18818+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
18819+ while(__addr_ao <= __end_ao) { \
18820+ char __c_ao; \
18821+ __addr_ao += PAGE_SIZE; \
18822+ if (__size > PAGE_SIZE) \
18823+ _cond_resched(); \
18824+ if (__get_user(__c_ao, (char __user *)__addr)) \
18825+ break; \
18826+ if (type != VERIFY_WRITE) { \
18827+ __addr = __addr_ao; \
18828+ continue; \
18829+ } \
18830+ if (__put_user(__c_ao, (char __user *)__addr)) \
18831+ break; \
18832+ __addr = __addr_ao; \
18833+ } \
18834+ } \
18835+ __ret_ao; \
18836+})
18837
18838 /*
18839 * The exception table consists of pairs of addresses relative to the
18840@@ -168,10 +200,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
18841 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
18842 __chk_user_ptr(ptr); \
18843 might_fault(); \
18844+ pax_open_userland(); \
18845 asm volatile("call __get_user_%P3" \
18846 : "=a" (__ret_gu), "=r" (__val_gu) \
18847 : "0" (ptr), "i" (sizeof(*(ptr)))); \
18848 (x) = (__typeof__(*(ptr))) __val_gu; \
18849+ pax_close_userland(); \
18850 __ret_gu; \
18851 })
18852
18853@@ -179,13 +213,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
18854 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
18855 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
18856
18857-
18858+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18859+#define __copyuser_seg "gs;"
18860+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
18861+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
18862+#else
18863+#define __copyuser_seg
18864+#define __COPYUSER_SET_ES
18865+#define __COPYUSER_RESTORE_ES
18866+#endif
18867
18868 #ifdef CONFIG_X86_32
18869 #define __put_user_asm_u64(x, addr, err, errret) \
18870 asm volatile(ASM_STAC "\n" \
18871- "1: movl %%eax,0(%2)\n" \
18872- "2: movl %%edx,4(%2)\n" \
18873+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
18874+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
18875 "3: " ASM_CLAC "\n" \
18876 ".section .fixup,\"ax\"\n" \
18877 "4: movl %3,%0\n" \
18878@@ -198,8 +240,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
18879
18880 #define __put_user_asm_ex_u64(x, addr) \
18881 asm volatile(ASM_STAC "\n" \
18882- "1: movl %%eax,0(%1)\n" \
18883- "2: movl %%edx,4(%1)\n" \
18884+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
18885+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
18886 "3: " ASM_CLAC "\n" \
18887 _ASM_EXTABLE_EX(1b, 2b) \
18888 _ASM_EXTABLE_EX(2b, 3b) \
18889@@ -249,7 +291,8 @@ extern void __put_user_8(void);
18890 __typeof__(*(ptr)) __pu_val; \
18891 __chk_user_ptr(ptr); \
18892 might_fault(); \
18893- __pu_val = x; \
18894+ __pu_val = (x); \
18895+ pax_open_userland(); \
18896 switch (sizeof(*(ptr))) { \
18897 case 1: \
18898 __put_user_x(1, __pu_val, ptr, __ret_pu); \
18899@@ -267,6 +310,7 @@ extern void __put_user_8(void);
18900 __put_user_x(X, __pu_val, ptr, __ret_pu); \
18901 break; \
18902 } \
18903+ pax_close_userland(); \
18904 __ret_pu; \
18905 })
18906
18907@@ -347,8 +391,10 @@ do { \
18908 } while (0)
18909
18910 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
18911+do { \
18912+ pax_open_userland(); \
18913 asm volatile(ASM_STAC "\n" \
18914- "1: mov"itype" %2,%"rtype"1\n" \
18915+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
18916 "2: " ASM_CLAC "\n" \
18917 ".section .fixup,\"ax\"\n" \
18918 "3: mov %3,%0\n" \
18919@@ -356,8 +402,10 @@ do { \
18920 " jmp 2b\n" \
18921 ".previous\n" \
18922 _ASM_EXTABLE(1b, 3b) \
18923- : "=r" (err), ltype(x) \
18924- : "m" (__m(addr)), "i" (errret), "0" (err))
18925+ : "=r" (err), ltype (x) \
18926+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
18927+ pax_close_userland(); \
18928+} while (0)
18929
18930 #define __get_user_size_ex(x, ptr, size) \
18931 do { \
18932@@ -381,7 +429,7 @@ do { \
18933 } while (0)
18934
18935 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
18936- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
18937+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
18938 "2:\n" \
18939 _ASM_EXTABLE_EX(1b, 2b) \
18940 : ltype(x) : "m" (__m(addr)))
18941@@ -398,13 +446,24 @@ do { \
18942 int __gu_err; \
18943 unsigned long __gu_val; \
18944 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
18945- (x) = (__force __typeof__(*(ptr)))__gu_val; \
18946+ (x) = (__typeof__(*(ptr)))__gu_val; \
18947 __gu_err; \
18948 })
18949
18950 /* FIXME: this hack is definitely wrong -AK */
18951 struct __large_struct { unsigned long buf[100]; };
18952-#define __m(x) (*(struct __large_struct __user *)(x))
18953+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18954+#define ____m(x) \
18955+({ \
18956+ unsigned long ____x = (unsigned long)(x); \
18957+ if (____x < pax_user_shadow_base) \
18958+ ____x += pax_user_shadow_base; \
18959+ (typeof(x))____x; \
18960+})
18961+#else
18962+#define ____m(x) (x)
18963+#endif
18964+#define __m(x) (*(struct __large_struct __user *)____m(x))
18965
18966 /*
18967 * Tell gcc we read from memory instead of writing: this is because
18968@@ -412,8 +471,10 @@ struct __large_struct { unsigned long buf[100]; };
18969 * aliasing issues.
18970 */
18971 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
18972+do { \
18973+ pax_open_userland(); \
18974 asm volatile(ASM_STAC "\n" \
18975- "1: mov"itype" %"rtype"1,%2\n" \
18976+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
18977 "2: " ASM_CLAC "\n" \
18978 ".section .fixup,\"ax\"\n" \
18979 "3: mov %3,%0\n" \
18980@@ -421,10 +482,12 @@ struct __large_struct { unsigned long buf[100]; };
18981 ".previous\n" \
18982 _ASM_EXTABLE(1b, 3b) \
18983 : "=r"(err) \
18984- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
18985+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
18986+ pax_close_userland(); \
18987+} while (0)
18988
18989 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
18990- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
18991+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
18992 "2:\n" \
18993 _ASM_EXTABLE_EX(1b, 2b) \
18994 : : ltype(x), "m" (__m(addr)))
18995@@ -434,11 +497,13 @@ struct __large_struct { unsigned long buf[100]; };
18996 */
18997 #define uaccess_try do { \
18998 current_thread_info()->uaccess_err = 0; \
18999+ pax_open_userland(); \
19000 stac(); \
19001 barrier();
19002
19003 #define uaccess_catch(err) \
19004 clac(); \
19005+ pax_close_userland(); \
19006 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19007 } while (0)
19008
19009@@ -463,8 +528,12 @@ struct __large_struct { unsigned long buf[100]; };
19010 * On error, the variable @x is set to zero.
19011 */
19012
19013+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19014+#define __get_user(x, ptr) get_user((x), (ptr))
19015+#else
19016 #define __get_user(x, ptr) \
19017 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19018+#endif
19019
19020 /**
19021 * __put_user: - Write a simple value into user space, with less checking.
19022@@ -486,8 +555,12 @@ struct __large_struct { unsigned long buf[100]; };
19023 * Returns zero on success, or -EFAULT on error.
19024 */
19025
19026+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19027+#define __put_user(x, ptr) put_user((x), (ptr))
19028+#else
19029 #define __put_user(x, ptr) \
19030 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19031+#endif
19032
19033 #define __get_user_unaligned __get_user
19034 #define __put_user_unaligned __put_user
19035@@ -505,7 +578,7 @@ struct __large_struct { unsigned long buf[100]; };
19036 #define get_user_ex(x, ptr) do { \
19037 unsigned long __gue_val; \
19038 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19039- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19040+ (x) = (__typeof__(*(ptr)))__gue_val; \
19041 } while (0)
19042
19043 #define put_user_try uaccess_try
19044@@ -536,17 +609,6 @@ extern struct movsl_mask {
19045
19046 #define ARCH_HAS_NOCACHE_UACCESS 1
19047
19048-#ifdef CONFIG_X86_32
19049-# include <asm/uaccess_32.h>
19050-#else
19051-# include <asm/uaccess_64.h>
19052-#endif
19053-
19054-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19055- unsigned n);
19056-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19057- unsigned n);
19058-
19059 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19060 # define copy_user_diag __compiletime_error
19061 #else
19062@@ -556,7 +618,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19063 extern void copy_user_diag("copy_from_user() buffer size is too small")
19064 copy_from_user_overflow(void);
19065 extern void copy_user_diag("copy_to_user() buffer size is too small")
19066-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19067+copy_to_user_overflow(void);
19068
19069 #undef copy_user_diag
19070
19071@@ -569,7 +631,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19072
19073 extern void
19074 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19075-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19076+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19077 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19078
19079 #else
19080@@ -584,10 +646,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19081
19082 #endif
19083
19084+#ifdef CONFIG_X86_32
19085+# include <asm/uaccess_32.h>
19086+#else
19087+# include <asm/uaccess_64.h>
19088+#endif
19089+
19090 static inline unsigned long __must_check
19091 copy_from_user(void *to, const void __user *from, unsigned long n)
19092 {
19093- int sz = __compiletime_object_size(to);
19094+ size_t sz = __compiletime_object_size(to);
19095
19096 might_fault();
19097
19098@@ -609,12 +677,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19099 * case, and do only runtime checking for non-constant sizes.
19100 */
19101
19102- if (likely(sz < 0 || sz >= n))
19103- n = _copy_from_user(to, from, n);
19104- else if(__builtin_constant_p(n))
19105- copy_from_user_overflow();
19106- else
19107- __copy_from_user_overflow(sz, n);
19108+ if (likely(sz != (size_t)-1 && sz < n)) {
19109+ if(__builtin_constant_p(n))
19110+ copy_from_user_overflow();
19111+ else
19112+ __copy_from_user_overflow(sz, n);
19113+ } if (access_ok(VERIFY_READ, from, n))
19114+ n = __copy_from_user(to, from, n);
19115+ else if ((long)n > 0)
19116+ memset(to, 0, n);
19117
19118 return n;
19119 }
19120@@ -622,17 +693,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19121 static inline unsigned long __must_check
19122 copy_to_user(void __user *to, const void *from, unsigned long n)
19123 {
19124- int sz = __compiletime_object_size(from);
19125+ size_t sz = __compiletime_object_size(from);
19126
19127 might_fault();
19128
19129 /* See the comment in copy_from_user() above. */
19130- if (likely(sz < 0 || sz >= n))
19131- n = _copy_to_user(to, from, n);
19132- else if(__builtin_constant_p(n))
19133- copy_to_user_overflow();
19134- else
19135- __copy_to_user_overflow(sz, n);
19136+ if (likely(sz != (size_t)-1 && sz < n)) {
19137+ if(__builtin_constant_p(n))
19138+ copy_to_user_overflow();
19139+ else
19140+ __copy_to_user_overflow(sz, n);
19141+ } else if (access_ok(VERIFY_WRITE, to, n))
19142+ n = __copy_to_user(to, from, n);
19143
19144 return n;
19145 }
19146diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
19147index 3c03a5d..1071638 100644
19148--- a/arch/x86/include/asm/uaccess_32.h
19149+++ b/arch/x86/include/asm/uaccess_32.h
19150@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
19151 static __always_inline unsigned long __must_check
19152 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
19153 {
19154+ if ((long)n < 0)
19155+ return n;
19156+
19157+ check_object_size(from, n, true);
19158+
19159 if (__builtin_constant_p(n)) {
19160 unsigned long ret;
19161
19162@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
19163 __copy_to_user(void __user *to, const void *from, unsigned long n)
19164 {
19165 might_fault();
19166+
19167 return __copy_to_user_inatomic(to, from, n);
19168 }
19169
19170 static __always_inline unsigned long
19171 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
19172 {
19173+ if ((long)n < 0)
19174+ return n;
19175+
19176 /* Avoid zeroing the tail if the copy fails..
19177 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
19178 * but as the zeroing behaviour is only significant when n is not
19179@@ -137,6 +146,12 @@ static __always_inline unsigned long
19180 __copy_from_user(void *to, const void __user *from, unsigned long n)
19181 {
19182 might_fault();
19183+
19184+ if ((long)n < 0)
19185+ return n;
19186+
19187+ check_object_size(to, n, false);
19188+
19189 if (__builtin_constant_p(n)) {
19190 unsigned long ret;
19191
19192@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
19193 const void __user *from, unsigned long n)
19194 {
19195 might_fault();
19196+
19197+ if ((long)n < 0)
19198+ return n;
19199+
19200 if (__builtin_constant_p(n)) {
19201 unsigned long ret;
19202
19203@@ -181,7 +200,10 @@ static __always_inline unsigned long
19204 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
19205 unsigned long n)
19206 {
19207- return __copy_from_user_ll_nocache_nozero(to, from, n);
19208+ if ((long)n < 0)
19209+ return n;
19210+
19211+ return __copy_from_user_ll_nocache_nozero(to, from, n);
19212 }
19213
19214 #endif /* _ASM_X86_UACCESS_32_H */
19215diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
19216index 190413d..8a80c2a 100644
19217--- a/arch/x86/include/asm/uaccess_64.h
19218+++ b/arch/x86/include/asm/uaccess_64.h
19219@@ -10,6 +10,9 @@
19220 #include <asm/alternative.h>
19221 #include <asm/cpufeature.h>
19222 #include <asm/page.h>
19223+#include <asm/pgtable.h>
19224+
19225+#define set_fs(x) (current_thread_info()->addr_limit = (x))
19226
19227 /*
19228 * Copy To/From Userspace
19229@@ -17,14 +20,14 @@
19230
19231 /* Handles exceptions in both to and from, but doesn't do access_ok */
19232 __must_check unsigned long
19233-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
19234+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
19235 __must_check unsigned long
19236-copy_user_generic_string(void *to, const void *from, unsigned len);
19237+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
19238 __must_check unsigned long
19239-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
19240+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
19241
19242 static __always_inline __must_check unsigned long
19243-copy_user_generic(void *to, const void *from, unsigned len)
19244+copy_user_generic(void *to, const void *from, unsigned long len)
19245 {
19246 unsigned ret;
19247
19248@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
19249 }
19250
19251 __must_check unsigned long
19252-copy_in_user(void __user *to, const void __user *from, unsigned len);
19253+copy_in_user(void __user *to, const void __user *from, unsigned long len);
19254
19255 static __always_inline __must_check
19256-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
19257+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
19258 {
19259- int ret = 0;
19260+ size_t sz = __compiletime_object_size(dst);
19261+ unsigned ret = 0;
19262+
19263+ if (size > INT_MAX)
19264+ return size;
19265+
19266+ check_object_size(dst, size, false);
19267+
19268+#ifdef CONFIG_PAX_MEMORY_UDEREF
19269+ if (!__access_ok(VERIFY_READ, src, size))
19270+ return size;
19271+#endif
19272+
19273+ if (unlikely(sz != (size_t)-1 && sz < size)) {
19274+ if(__builtin_constant_p(size))
19275+ copy_from_user_overflow();
19276+ else
19277+ __copy_from_user_overflow(sz, size);
19278+ return size;
19279+ }
19280
19281 if (!__builtin_constant_p(size))
19282- return copy_user_generic(dst, (__force void *)src, size);
19283+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19284 switch (size) {
19285- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
19286+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
19287 ret, "b", "b", "=q", 1);
19288 return ret;
19289- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
19290+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
19291 ret, "w", "w", "=r", 2);
19292 return ret;
19293- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
19294+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
19295 ret, "l", "k", "=r", 4);
19296 return ret;
19297- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
19298+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19299 ret, "q", "", "=r", 8);
19300 return ret;
19301 case 10:
19302- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
19303+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19304 ret, "q", "", "=r", 10);
19305 if (unlikely(ret))
19306 return ret;
19307 __get_user_asm(*(u16 *)(8 + (char *)dst),
19308- (u16 __user *)(8 + (char __user *)src),
19309+ (const u16 __user *)(8 + (const char __user *)src),
19310 ret, "w", "w", "=r", 2);
19311 return ret;
19312 case 16:
19313- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
19314+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19315 ret, "q", "", "=r", 16);
19316 if (unlikely(ret))
19317 return ret;
19318 __get_user_asm(*(u64 *)(8 + (char *)dst),
19319- (u64 __user *)(8 + (char __user *)src),
19320+ (const u64 __user *)(8 + (const char __user *)src),
19321 ret, "q", "", "=r", 8);
19322 return ret;
19323 default:
19324- return copy_user_generic(dst, (__force void *)src, size);
19325+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19326 }
19327 }
19328
19329 static __always_inline __must_check
19330-int __copy_from_user(void *dst, const void __user *src, unsigned size)
19331+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
19332 {
19333 might_fault();
19334 return __copy_from_user_nocheck(dst, src, size);
19335 }
19336
19337 static __always_inline __must_check
19338-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
19339+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
19340 {
19341- int ret = 0;
19342+ size_t sz = __compiletime_object_size(src);
19343+ unsigned ret = 0;
19344+
19345+ if (size > INT_MAX)
19346+ return size;
19347+
19348+ check_object_size(src, size, true);
19349+
19350+#ifdef CONFIG_PAX_MEMORY_UDEREF
19351+ if (!__access_ok(VERIFY_WRITE, dst, size))
19352+ return size;
19353+#endif
19354+
19355+ if (unlikely(sz != (size_t)-1 && sz < size)) {
19356+ if(__builtin_constant_p(size))
19357+ copy_to_user_overflow();
19358+ else
19359+ __copy_to_user_overflow(sz, size);
19360+ return size;
19361+ }
19362
19363 if (!__builtin_constant_p(size))
19364- return copy_user_generic((__force void *)dst, src, size);
19365+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19366 switch (size) {
19367- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
19368+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
19369 ret, "b", "b", "iq", 1);
19370 return ret;
19371- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
19372+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
19373 ret, "w", "w", "ir", 2);
19374 return ret;
19375- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
19376+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
19377 ret, "l", "k", "ir", 4);
19378 return ret;
19379- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
19380+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19381 ret, "q", "", "er", 8);
19382 return ret;
19383 case 10:
19384- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
19385+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19386 ret, "q", "", "er", 10);
19387 if (unlikely(ret))
19388 return ret;
19389 asm("":::"memory");
19390- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
19391+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
19392 ret, "w", "w", "ir", 2);
19393 return ret;
19394 case 16:
19395- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
19396+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19397 ret, "q", "", "er", 16);
19398 if (unlikely(ret))
19399 return ret;
19400 asm("":::"memory");
19401- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
19402+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
19403 ret, "q", "", "er", 8);
19404 return ret;
19405 default:
19406- return copy_user_generic((__force void *)dst, src, size);
19407+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19408 }
19409 }
19410
19411 static __always_inline __must_check
19412-int __copy_to_user(void __user *dst, const void *src, unsigned size)
19413+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
19414 {
19415 might_fault();
19416 return __copy_to_user_nocheck(dst, src, size);
19417 }
19418
19419 static __always_inline __must_check
19420-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19421+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19422 {
19423- int ret = 0;
19424+ unsigned ret = 0;
19425
19426 might_fault();
19427+
19428+ if (size > INT_MAX)
19429+ return size;
19430+
19431+#ifdef CONFIG_PAX_MEMORY_UDEREF
19432+ if (!__access_ok(VERIFY_READ, src, size))
19433+ return size;
19434+ if (!__access_ok(VERIFY_WRITE, dst, size))
19435+ return size;
19436+#endif
19437+
19438 if (!__builtin_constant_p(size))
19439- return copy_user_generic((__force void *)dst,
19440- (__force void *)src, size);
19441+ return copy_user_generic((__force_kernel void *)____m(dst),
19442+ (__force_kernel const void *)____m(src), size);
19443 switch (size) {
19444 case 1: {
19445 u8 tmp;
19446- __get_user_asm(tmp, (u8 __user *)src,
19447+ __get_user_asm(tmp, (const u8 __user *)src,
19448 ret, "b", "b", "=q", 1);
19449 if (likely(!ret))
19450 __put_user_asm(tmp, (u8 __user *)dst,
19451@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19452 }
19453 case 2: {
19454 u16 tmp;
19455- __get_user_asm(tmp, (u16 __user *)src,
19456+ __get_user_asm(tmp, (const u16 __user *)src,
19457 ret, "w", "w", "=r", 2);
19458 if (likely(!ret))
19459 __put_user_asm(tmp, (u16 __user *)dst,
19460@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19461
19462 case 4: {
19463 u32 tmp;
19464- __get_user_asm(tmp, (u32 __user *)src,
19465+ __get_user_asm(tmp, (const u32 __user *)src,
19466 ret, "l", "k", "=r", 4);
19467 if (likely(!ret))
19468 __put_user_asm(tmp, (u32 __user *)dst,
19469@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19470 }
19471 case 8: {
19472 u64 tmp;
19473- __get_user_asm(tmp, (u64 __user *)src,
19474+ __get_user_asm(tmp, (const u64 __user *)src,
19475 ret, "q", "", "=r", 8);
19476 if (likely(!ret))
19477 __put_user_asm(tmp, (u64 __user *)dst,
19478@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19479 return ret;
19480 }
19481 default:
19482- return copy_user_generic((__force void *)dst,
19483- (__force void *)src, size);
19484+ return copy_user_generic((__force_kernel void *)____m(dst),
19485+ (__force_kernel const void *)____m(src), size);
19486 }
19487 }
19488
19489-static __must_check __always_inline int
19490-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
19491+static __must_check __always_inline unsigned long
19492+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
19493 {
19494- return __copy_from_user_nocheck(dst, (__force const void *)src, size);
19495+ return __copy_from_user_nocheck(dst, src, size);
19496 }
19497
19498-static __must_check __always_inline int
19499-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
19500+static __must_check __always_inline unsigned long
19501+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
19502 {
19503- return __copy_to_user_nocheck((__force void *)dst, src, size);
19504+ return __copy_to_user_nocheck(dst, src, size);
19505 }
19506
19507-extern long __copy_user_nocache(void *dst, const void __user *src,
19508- unsigned size, int zerorest);
19509+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
19510+ unsigned long size, int zerorest);
19511
19512-static inline int
19513-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
19514+static inline unsigned long
19515+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
19516 {
19517 might_fault();
19518+
19519+ if (size > INT_MAX)
19520+ return size;
19521+
19522+#ifdef CONFIG_PAX_MEMORY_UDEREF
19523+ if (!__access_ok(VERIFY_READ, src, size))
19524+ return size;
19525+#endif
19526+
19527 return __copy_user_nocache(dst, src, size, 1);
19528 }
19529
19530-static inline int
19531+static inline unsigned long
19532 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
19533- unsigned size)
19534+ unsigned long size)
19535 {
19536+ if (size > INT_MAX)
19537+ return size;
19538+
19539+#ifdef CONFIG_PAX_MEMORY_UDEREF
19540+ if (!__access_ok(VERIFY_READ, src, size))
19541+ return size;
19542+#endif
19543+
19544 return __copy_user_nocache(dst, src, size, 0);
19545 }
19546
19547 unsigned long
19548-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
19549+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
19550
19551 #endif /* _ASM_X86_UACCESS_64_H */
19552diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
19553index 5b238981..77fdd78 100644
19554--- a/arch/x86/include/asm/word-at-a-time.h
19555+++ b/arch/x86/include/asm/word-at-a-time.h
19556@@ -11,7 +11,7 @@
19557 * and shift, for example.
19558 */
19559 struct word_at_a_time {
19560- const unsigned long one_bits, high_bits;
19561+ unsigned long one_bits, high_bits;
19562 };
19563
19564 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
19565diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
19566index 0f1be11..f7542bf 100644
19567--- a/arch/x86/include/asm/x86_init.h
19568+++ b/arch/x86/include/asm/x86_init.h
19569@@ -129,7 +129,7 @@ struct x86_init_ops {
19570 struct x86_init_timers timers;
19571 struct x86_init_iommu iommu;
19572 struct x86_init_pci pci;
19573-};
19574+} __no_const;
19575
19576 /**
19577 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
19578@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
19579 void (*setup_percpu_clockev)(void);
19580 void (*early_percpu_clock_init)(void);
19581 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
19582-};
19583+} __no_const;
19584
19585 struct timespec;
19586
19587@@ -168,7 +168,7 @@ struct x86_platform_ops {
19588 void (*save_sched_clock_state)(void);
19589 void (*restore_sched_clock_state)(void);
19590 void (*apic_post_init)(void);
19591-};
19592+} __no_const;
19593
19594 struct pci_dev;
19595 struct msi_msg;
19596@@ -185,7 +185,7 @@ struct x86_msi_ops {
19597 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
19598 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
19599 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
19600-};
19601+} __no_const;
19602
19603 struct IO_APIC_route_entry;
19604 struct io_apic_irq_attr;
19605@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
19606 unsigned int destination, int vector,
19607 struct io_apic_irq_attr *attr);
19608 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
19609-};
19610+} __no_const;
19611
19612 extern struct x86_init_ops x86_init;
19613 extern struct x86_cpuinit_ops x86_cpuinit;
19614diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
19615index b913915..4f5a581 100644
19616--- a/arch/x86/include/asm/xen/page.h
19617+++ b/arch/x86/include/asm/xen/page.h
19618@@ -56,7 +56,7 @@ extern int m2p_remove_override(struct page *page,
19619 extern struct page *m2p_find_override(unsigned long mfn);
19620 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
19621
19622-static inline unsigned long pfn_to_mfn(unsigned long pfn)
19623+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
19624 {
19625 unsigned long mfn;
19626
19627diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
19628index 0415cda..3b22adc 100644
19629--- a/arch/x86/include/asm/xsave.h
19630+++ b/arch/x86/include/asm/xsave.h
19631@@ -70,8 +70,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
19632 if (unlikely(err))
19633 return -EFAULT;
19634
19635+ pax_open_userland();
19636 __asm__ __volatile__(ASM_STAC "\n"
19637- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
19638+ "1:"
19639+ __copyuser_seg
19640+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
19641 "2: " ASM_CLAC "\n"
19642 ".section .fixup,\"ax\"\n"
19643 "3: movl $-1,%[err]\n"
19644@@ -81,18 +84,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
19645 : [err] "=r" (err)
19646 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
19647 : "memory");
19648+ pax_close_userland();
19649 return err;
19650 }
19651
19652 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
19653 {
19654 int err;
19655- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
19656+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
19657 u32 lmask = mask;
19658 u32 hmask = mask >> 32;
19659
19660+ pax_open_userland();
19661 __asm__ __volatile__(ASM_STAC "\n"
19662- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
19663+ "1:"
19664+ __copyuser_seg
19665+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
19666 "2: " ASM_CLAC "\n"
19667 ".section .fixup,\"ax\"\n"
19668 "3: movl $-1,%[err]\n"
19669@@ -102,6 +109,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
19670 : [err] "=r" (err)
19671 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
19672 : "memory"); /* memory required? */
19673+ pax_close_userland();
19674 return err;
19675 }
19676
19677diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
19678index bbae024..e1528f9 100644
19679--- a/arch/x86/include/uapi/asm/e820.h
19680+++ b/arch/x86/include/uapi/asm/e820.h
19681@@ -63,7 +63,7 @@ struct e820map {
19682 #define ISA_START_ADDRESS 0xa0000
19683 #define ISA_END_ADDRESS 0x100000
19684
19685-#define BIOS_BEGIN 0x000a0000
19686+#define BIOS_BEGIN 0x000c0000
19687 #define BIOS_END 0x00100000
19688
19689 #define BIOS_ROM_BASE 0xffe00000
19690diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
19691index 7b0a55a..ad115bf 100644
19692--- a/arch/x86/include/uapi/asm/ptrace-abi.h
19693+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
19694@@ -49,7 +49,6 @@
19695 #define EFLAGS 144
19696 #define RSP 152
19697 #define SS 160
19698-#define ARGOFFSET R11
19699 #endif /* __ASSEMBLY__ */
19700
19701 /* top of stack page */
19702diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
19703index 9b0a34e..fc7e553 100644
19704--- a/arch/x86/kernel/Makefile
19705+++ b/arch/x86/kernel/Makefile
19706@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
19707 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
19708 obj-$(CONFIG_IRQ_WORK) += irq_work.o
19709 obj-y += probe_roms.o
19710-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
19711+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
19712 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
19713 obj-y += syscall_$(BITS).o
19714 obj-$(CONFIG_X86_64) += vsyscall_64.o
19715diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
19716index 6c0b43b..e67bb31 100644
19717--- a/arch/x86/kernel/acpi/boot.c
19718+++ b/arch/x86/kernel/acpi/boot.c
19719@@ -1315,7 +1315,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
19720 * If your system is blacklisted here, but you find that acpi=force
19721 * works for you, please contact linux-acpi@vger.kernel.org
19722 */
19723-static struct dmi_system_id __initdata acpi_dmi_table[] = {
19724+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
19725 /*
19726 * Boxes that need ACPI disabled
19727 */
19728@@ -1390,7 +1390,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
19729 };
19730
19731 /* second table for DMI checks that should run after early-quirks */
19732-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
19733+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
19734 /*
19735 * HP laptops which use a DSDT reporting as HP/SB400/10000,
19736 * which includes some code which overrides all temperature
19737diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
19738index 3a2ae4c..9db31d6 100644
19739--- a/arch/x86/kernel/acpi/sleep.c
19740+++ b/arch/x86/kernel/acpi/sleep.c
19741@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
19742 #else /* CONFIG_64BIT */
19743 #ifdef CONFIG_SMP
19744 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
19745+
19746+ pax_open_kernel();
19747 early_gdt_descr.address =
19748 (unsigned long)get_cpu_gdt_table(smp_processor_id());
19749+ pax_close_kernel();
19750+
19751 initial_gs = per_cpu_offset(smp_processor_id());
19752 #endif
19753 initial_code = (unsigned long)wakeup_long64;
19754diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
19755index 665c6b7..eae4d56 100644
19756--- a/arch/x86/kernel/acpi/wakeup_32.S
19757+++ b/arch/x86/kernel/acpi/wakeup_32.S
19758@@ -29,13 +29,11 @@ wakeup_pmode_return:
19759 # and restore the stack ... but you need gdt for this to work
19760 movl saved_context_esp, %esp
19761
19762- movl %cs:saved_magic, %eax
19763- cmpl $0x12345678, %eax
19764+ cmpl $0x12345678, saved_magic
19765 jne bogus_magic
19766
19767 # jump to place where we left off
19768- movl saved_eip, %eax
19769- jmp *%eax
19770+ jmp *(saved_eip)
19771
19772 bogus_magic:
19773 jmp bogus_magic
19774diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
19775index df94598..f3b29bf 100644
19776--- a/arch/x86/kernel/alternative.c
19777+++ b/arch/x86/kernel/alternative.c
19778@@ -269,6 +269,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
19779 */
19780 for (a = start; a < end; a++) {
19781 instr = (u8 *)&a->instr_offset + a->instr_offset;
19782+
19783+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19784+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19785+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
19786+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19787+#endif
19788+
19789 replacement = (u8 *)&a->repl_offset + a->repl_offset;
19790 BUG_ON(a->replacementlen > a->instrlen);
19791 BUG_ON(a->instrlen > sizeof(insnbuf));
19792@@ -300,10 +307,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
19793 for (poff = start; poff < end; poff++) {
19794 u8 *ptr = (u8 *)poff + *poff;
19795
19796+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19797+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19798+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
19799+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19800+#endif
19801+
19802 if (!*poff || ptr < text || ptr >= text_end)
19803 continue;
19804 /* turn DS segment override prefix into lock prefix */
19805- if (*ptr == 0x3e)
19806+ if (*ktla_ktva(ptr) == 0x3e)
19807 text_poke(ptr, ((unsigned char []){0xf0}), 1);
19808 }
19809 mutex_unlock(&text_mutex);
19810@@ -318,10 +331,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
19811 for (poff = start; poff < end; poff++) {
19812 u8 *ptr = (u8 *)poff + *poff;
19813
19814+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19815+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19816+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
19817+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19818+#endif
19819+
19820 if (!*poff || ptr < text || ptr >= text_end)
19821 continue;
19822 /* turn lock prefix into DS segment override prefix */
19823- if (*ptr == 0xf0)
19824+ if (*ktla_ktva(ptr) == 0xf0)
19825 text_poke(ptr, ((unsigned char []){0x3E}), 1);
19826 }
19827 mutex_unlock(&text_mutex);
19828@@ -458,7 +477,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
19829
19830 BUG_ON(p->len > MAX_PATCH_LEN);
19831 /* prep the buffer with the original instructions */
19832- memcpy(insnbuf, p->instr, p->len);
19833+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
19834 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
19835 (unsigned long)p->instr, p->len);
19836
19837@@ -505,7 +524,7 @@ void __init alternative_instructions(void)
19838 if (!uniproc_patched || num_possible_cpus() == 1)
19839 free_init_pages("SMP alternatives",
19840 (unsigned long)__smp_locks,
19841- (unsigned long)__smp_locks_end);
19842+ PAGE_ALIGN((unsigned long)__smp_locks_end));
19843 #endif
19844
19845 apply_paravirt(__parainstructions, __parainstructions_end);
19846@@ -525,13 +544,17 @@ void __init alternative_instructions(void)
19847 * instructions. And on the local CPU you need to be protected again NMI or MCE
19848 * handlers seeing an inconsistent instruction while you patch.
19849 */
19850-void *__init_or_module text_poke_early(void *addr, const void *opcode,
19851+void *__kprobes text_poke_early(void *addr, const void *opcode,
19852 size_t len)
19853 {
19854 unsigned long flags;
19855 local_irq_save(flags);
19856- memcpy(addr, opcode, len);
19857+
19858+ pax_open_kernel();
19859+ memcpy(ktla_ktva(addr), opcode, len);
19860 sync_core();
19861+ pax_close_kernel();
19862+
19863 local_irq_restore(flags);
19864 /* Could also do a CLFLUSH here to speed up CPU recovery; but
19865 that causes hangs on some VIA CPUs. */
19866@@ -553,36 +576,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
19867 */
19868 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
19869 {
19870- unsigned long flags;
19871- char *vaddr;
19872+ unsigned char *vaddr = ktla_ktva(addr);
19873 struct page *pages[2];
19874- int i;
19875+ size_t i;
19876
19877 if (!core_kernel_text((unsigned long)addr)) {
19878- pages[0] = vmalloc_to_page(addr);
19879- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
19880+ pages[0] = vmalloc_to_page(vaddr);
19881+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
19882 } else {
19883- pages[0] = virt_to_page(addr);
19884+ pages[0] = virt_to_page(vaddr);
19885 WARN_ON(!PageReserved(pages[0]));
19886- pages[1] = virt_to_page(addr + PAGE_SIZE);
19887+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
19888 }
19889 BUG_ON(!pages[0]);
19890- local_irq_save(flags);
19891- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
19892- if (pages[1])
19893- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
19894- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
19895- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
19896- clear_fixmap(FIX_TEXT_POKE0);
19897- if (pages[1])
19898- clear_fixmap(FIX_TEXT_POKE1);
19899- local_flush_tlb();
19900- sync_core();
19901- /* Could also do a CLFLUSH here to speed up CPU recovery; but
19902- that causes hangs on some VIA CPUs. */
19903+ text_poke_early(addr, opcode, len);
19904 for (i = 0; i < len; i++)
19905- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
19906- local_irq_restore(flags);
19907+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
19908 return addr;
19909 }
19910
19911@@ -602,7 +611,7 @@ int poke_int3_handler(struct pt_regs *regs)
19912 if (likely(!bp_patching_in_progress))
19913 return 0;
19914
19915- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
19916+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
19917 return 0;
19918
19919 /* set up the specified breakpoint handler */
19920@@ -636,7 +645,7 @@ int poke_int3_handler(struct pt_regs *regs)
19921 */
19922 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
19923 {
19924- unsigned char int3 = 0xcc;
19925+ const unsigned char int3 = 0xcc;
19926
19927 bp_int3_handler = handler;
19928 bp_int3_addr = (u8 *)addr + sizeof(int3);
19929diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
19930index d278736..0b4af9a8 100644
19931--- a/arch/x86/kernel/apic/apic.c
19932+++ b/arch/x86/kernel/apic/apic.c
19933@@ -191,7 +191,7 @@ int first_system_vector = 0xfe;
19934 /*
19935 * Debug level, exported for io_apic.c
19936 */
19937-unsigned int apic_verbosity;
19938+int apic_verbosity;
19939
19940 int pic_mode;
19941
19942@@ -1986,7 +1986,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
19943 apic_write(APIC_ESR, 0);
19944 v1 = apic_read(APIC_ESR);
19945 ack_APIC_irq();
19946- atomic_inc(&irq_err_count);
19947+ atomic_inc_unchecked(&irq_err_count);
19948
19949 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
19950 smp_processor_id(), v0 , v1);
19951diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
19952index 00c77cf..2dc6a2d 100644
19953--- a/arch/x86/kernel/apic/apic_flat_64.c
19954+++ b/arch/x86/kernel/apic/apic_flat_64.c
19955@@ -157,7 +157,7 @@ static int flat_probe(void)
19956 return 1;
19957 }
19958
19959-static struct apic apic_flat = {
19960+static struct apic apic_flat __read_only = {
19961 .name = "flat",
19962 .probe = flat_probe,
19963 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
19964@@ -271,7 +271,7 @@ static int physflat_probe(void)
19965 return 0;
19966 }
19967
19968-static struct apic apic_physflat = {
19969+static struct apic apic_physflat __read_only = {
19970
19971 .name = "physical flat",
19972 .probe = physflat_probe,
19973diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
19974index e145f28..2752888 100644
19975--- a/arch/x86/kernel/apic/apic_noop.c
19976+++ b/arch/x86/kernel/apic/apic_noop.c
19977@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
19978 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
19979 }
19980
19981-struct apic apic_noop = {
19982+struct apic apic_noop __read_only = {
19983 .name = "noop",
19984 .probe = noop_probe,
19985 .acpi_madt_oem_check = NULL,
19986diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
19987index d50e364..543bee3 100644
19988--- a/arch/x86/kernel/apic/bigsmp_32.c
19989+++ b/arch/x86/kernel/apic/bigsmp_32.c
19990@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
19991 return dmi_bigsmp;
19992 }
19993
19994-static struct apic apic_bigsmp = {
19995+static struct apic apic_bigsmp __read_only = {
19996
19997 .name = "bigsmp",
19998 .probe = probe_bigsmp,
19999diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
20000index c552247..587a316 100644
20001--- a/arch/x86/kernel/apic/es7000_32.c
20002+++ b/arch/x86/kernel/apic/es7000_32.c
20003@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
20004 return ret && es7000_apic_is_cluster();
20005 }
20006
20007-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
20008-static struct apic __refdata apic_es7000_cluster = {
20009+static struct apic apic_es7000_cluster __read_only = {
20010
20011 .name = "es7000",
20012 .probe = probe_es7000,
20013@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
20014 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
20015 };
20016
20017-static struct apic __refdata apic_es7000 = {
20018+static struct apic apic_es7000 __read_only = {
20019
20020 .name = "es7000",
20021 .probe = probe_es7000,
20022diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20023index e63a5bd..c0babf8 100644
20024--- a/arch/x86/kernel/apic/io_apic.c
20025+++ b/arch/x86/kernel/apic/io_apic.c
20026@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
20027 }
20028 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
20029
20030-void lock_vector_lock(void)
20031+void lock_vector_lock(void) __acquires(vector_lock)
20032 {
20033 /* Used to the online set of cpus does not change
20034 * during assign_irq_vector.
20035@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
20036 raw_spin_lock(&vector_lock);
20037 }
20038
20039-void unlock_vector_lock(void)
20040+void unlock_vector_lock(void) __releases(vector_lock)
20041 {
20042 raw_spin_unlock(&vector_lock);
20043 }
20044@@ -2367,7 +2367,7 @@ static void ack_apic_edge(struct irq_data *data)
20045 ack_APIC_irq();
20046 }
20047
20048-atomic_t irq_mis_count;
20049+atomic_unchecked_t irq_mis_count;
20050
20051 #ifdef CONFIG_GENERIC_PENDING_IRQ
20052 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20053@@ -2508,7 +2508,7 @@ static void ack_apic_level(struct irq_data *data)
20054 * at the cpu.
20055 */
20056 if (!(v & (1 << (i & 0x1f)))) {
20057- atomic_inc(&irq_mis_count);
20058+ atomic_inc_unchecked(&irq_mis_count);
20059
20060 eoi_ioapic_irq(irq, cfg);
20061 }
20062diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
20063index 1e42e8f..daacf44 100644
20064--- a/arch/x86/kernel/apic/numaq_32.c
20065+++ b/arch/x86/kernel/apic/numaq_32.c
20066@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
20067 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
20068 }
20069
20070-/* Use __refdata to keep false positive warning calm. */
20071-static struct apic __refdata apic_numaq = {
20072+static struct apic apic_numaq __read_only = {
20073
20074 .name = "NUMAQ",
20075 .probe = probe_numaq,
20076diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20077index eb35ef9..f184a21 100644
20078--- a/arch/x86/kernel/apic/probe_32.c
20079+++ b/arch/x86/kernel/apic/probe_32.c
20080@@ -72,7 +72,7 @@ static int probe_default(void)
20081 return 1;
20082 }
20083
20084-static struct apic apic_default = {
20085+static struct apic apic_default __read_only = {
20086
20087 .name = "default",
20088 .probe = probe_default,
20089diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
20090index 77c95c0..434f8a4 100644
20091--- a/arch/x86/kernel/apic/summit_32.c
20092+++ b/arch/x86/kernel/apic/summit_32.c
20093@@ -486,7 +486,7 @@ void setup_summit(void)
20094 }
20095 #endif
20096
20097-static struct apic apic_summit = {
20098+static struct apic apic_summit __read_only = {
20099
20100 .name = "summit",
20101 .probe = probe_summit,
20102diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20103index 140e29d..d88bc95 100644
20104--- a/arch/x86/kernel/apic/x2apic_cluster.c
20105+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20106@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20107 return notifier_from_errno(err);
20108 }
20109
20110-static struct notifier_block __refdata x2apic_cpu_notifier = {
20111+static struct notifier_block x2apic_cpu_notifier = {
20112 .notifier_call = update_clusterinfo,
20113 };
20114
20115@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20116 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20117 }
20118
20119-static struct apic apic_x2apic_cluster = {
20120+static struct apic apic_x2apic_cluster __read_only = {
20121
20122 .name = "cluster x2apic",
20123 .probe = x2apic_cluster_probe,
20124diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20125index 562a76d..a003c0f 100644
20126--- a/arch/x86/kernel/apic/x2apic_phys.c
20127+++ b/arch/x86/kernel/apic/x2apic_phys.c
20128@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
20129 return apic == &apic_x2apic_phys;
20130 }
20131
20132-static struct apic apic_x2apic_phys = {
20133+static struct apic apic_x2apic_phys __read_only = {
20134
20135 .name = "physical x2apic",
20136 .probe = x2apic_phys_probe,
20137diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20138index ad0dc04..0d9cc56 100644
20139--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20140+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20141@@ -350,7 +350,7 @@ static int uv_probe(void)
20142 return apic == &apic_x2apic_uv_x;
20143 }
20144
20145-static struct apic __refdata apic_x2apic_uv_x = {
20146+static struct apic apic_x2apic_uv_x __read_only = {
20147
20148 .name = "UV large system",
20149 .probe = uv_probe,
20150diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20151index 3ab0343..814c4787 100644
20152--- a/arch/x86/kernel/apm_32.c
20153+++ b/arch/x86/kernel/apm_32.c
20154@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
20155 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20156 * even though they are called in protected mode.
20157 */
20158-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20159+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20160 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20161
20162 static const char driver_version[] = "1.16ac"; /* no spaces */
20163@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
20164 BUG_ON(cpu != 0);
20165 gdt = get_cpu_gdt_table(cpu);
20166 save_desc_40 = gdt[0x40 / 8];
20167+
20168+ pax_open_kernel();
20169 gdt[0x40 / 8] = bad_bios_desc;
20170+ pax_close_kernel();
20171
20172 apm_irq_save(flags);
20173 APM_DO_SAVE_SEGS;
20174@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
20175 &call->esi);
20176 APM_DO_RESTORE_SEGS;
20177 apm_irq_restore(flags);
20178+
20179+ pax_open_kernel();
20180 gdt[0x40 / 8] = save_desc_40;
20181+ pax_close_kernel();
20182+
20183 put_cpu();
20184
20185 return call->eax & 0xff;
20186@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
20187 BUG_ON(cpu != 0);
20188 gdt = get_cpu_gdt_table(cpu);
20189 save_desc_40 = gdt[0x40 / 8];
20190+
20191+ pax_open_kernel();
20192 gdt[0x40 / 8] = bad_bios_desc;
20193+ pax_close_kernel();
20194
20195 apm_irq_save(flags);
20196 APM_DO_SAVE_SEGS;
20197@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
20198 &call->eax);
20199 APM_DO_RESTORE_SEGS;
20200 apm_irq_restore(flags);
20201+
20202+ pax_open_kernel();
20203 gdt[0x40 / 8] = save_desc_40;
20204+ pax_close_kernel();
20205+
20206 put_cpu();
20207 return error;
20208 }
20209@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
20210 * code to that CPU.
20211 */
20212 gdt = get_cpu_gdt_table(0);
20213+
20214+ pax_open_kernel();
20215 set_desc_base(&gdt[APM_CS >> 3],
20216 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
20217 set_desc_base(&gdt[APM_CS_16 >> 3],
20218 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
20219 set_desc_base(&gdt[APM_DS >> 3],
20220 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
20221+ pax_close_kernel();
20222
20223 proc_create("apm", 0, NULL, &apm_file_ops);
20224
20225diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
20226index 9f6b934..cf5ffb3 100644
20227--- a/arch/x86/kernel/asm-offsets.c
20228+++ b/arch/x86/kernel/asm-offsets.c
20229@@ -32,6 +32,8 @@ void common(void) {
20230 OFFSET(TI_flags, thread_info, flags);
20231 OFFSET(TI_status, thread_info, status);
20232 OFFSET(TI_addr_limit, thread_info, addr_limit);
20233+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
20234+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
20235
20236 BLANK();
20237 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
20238@@ -52,8 +54,26 @@ void common(void) {
20239 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
20240 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
20241 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
20242+
20243+#ifdef CONFIG_PAX_KERNEXEC
20244+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
20245 #endif
20246
20247+#ifdef CONFIG_PAX_MEMORY_UDEREF
20248+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
20249+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
20250+#ifdef CONFIG_X86_64
20251+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
20252+#endif
20253+#endif
20254+
20255+#endif
20256+
20257+ BLANK();
20258+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
20259+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
20260+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
20261+
20262 #ifdef CONFIG_XEN
20263 BLANK();
20264 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
20265diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
20266index e7c798b..2b2019b 100644
20267--- a/arch/x86/kernel/asm-offsets_64.c
20268+++ b/arch/x86/kernel/asm-offsets_64.c
20269@@ -77,6 +77,7 @@ int main(void)
20270 BLANK();
20271 #undef ENTRY
20272
20273+ DEFINE(TSS_size, sizeof(struct tss_struct));
20274 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
20275 BLANK();
20276
20277diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
20278index 47b56a7..efc2bc6 100644
20279--- a/arch/x86/kernel/cpu/Makefile
20280+++ b/arch/x86/kernel/cpu/Makefile
20281@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
20282 CFLAGS_REMOVE_perf_event.o = -pg
20283 endif
20284
20285-# Make sure load_percpu_segment has no stackprotector
20286-nostackp := $(call cc-option, -fno-stack-protector)
20287-CFLAGS_common.o := $(nostackp)
20288-
20289 obj-y := intel_cacheinfo.o scattered.o topology.o
20290 obj-y += proc.o capflags.o powerflags.o common.o
20291 obj-y += rdrand.o
20292diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
20293index 59bfebc..d8f27bd 100644
20294--- a/arch/x86/kernel/cpu/amd.c
20295+++ b/arch/x86/kernel/cpu/amd.c
20296@@ -753,7 +753,7 @@ static void init_amd(struct cpuinfo_x86 *c)
20297 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
20298 {
20299 /* AMD errata T13 (order #21922) */
20300- if ((c->x86 == 6)) {
20301+ if (c->x86 == 6) {
20302 /* Duron Rev A0 */
20303 if (c->x86_model == 3 && c->x86_mask == 0)
20304 size = 64;
20305diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
20306index 6abc172..3b0df94 100644
20307--- a/arch/x86/kernel/cpu/common.c
20308+++ b/arch/x86/kernel/cpu/common.c
20309@@ -88,60 +88,6 @@ static const struct cpu_dev default_cpu = {
20310
20311 static const struct cpu_dev *this_cpu = &default_cpu;
20312
20313-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
20314-#ifdef CONFIG_X86_64
20315- /*
20316- * We need valid kernel segments for data and code in long mode too
20317- * IRET will check the segment types kkeil 2000/10/28
20318- * Also sysret mandates a special GDT layout
20319- *
20320- * TLS descriptors are currently at a different place compared to i386.
20321- * Hopefully nobody expects them at a fixed place (Wine?)
20322- */
20323- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
20324- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
20325- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
20326- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
20327- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
20328- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
20329-#else
20330- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
20331- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20332- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
20333- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
20334- /*
20335- * Segments used for calling PnP BIOS have byte granularity.
20336- * They code segments and data segments have fixed 64k limits,
20337- * the transfer segment sizes are set at run time.
20338- */
20339- /* 32-bit code */
20340- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
20341- /* 16-bit code */
20342- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
20343- /* 16-bit data */
20344- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
20345- /* 16-bit data */
20346- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
20347- /* 16-bit data */
20348- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
20349- /*
20350- * The APM segments have byte granularity and their bases
20351- * are set at run time. All have 64k limits.
20352- */
20353- /* 32-bit code */
20354- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
20355- /* 16-bit code */
20356- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
20357- /* data */
20358- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
20359-
20360- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20361- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20362- GDT_STACK_CANARY_INIT
20363-#endif
20364-} };
20365-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
20366-
20367 static int __init x86_xsave_setup(char *s)
20368 {
20369 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
20370@@ -288,6 +234,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
20371 set_in_cr4(X86_CR4_SMAP);
20372 }
20373
20374+#ifdef CONFIG_X86_64
20375+static __init int setup_disable_pcid(char *arg)
20376+{
20377+ setup_clear_cpu_cap(X86_FEATURE_PCID);
20378+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
20379+
20380+#ifdef CONFIG_PAX_MEMORY_UDEREF
20381+ if (clone_pgd_mask != ~(pgdval_t)0UL)
20382+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
20383+#endif
20384+
20385+ return 1;
20386+}
20387+__setup("nopcid", setup_disable_pcid);
20388+
20389+static void setup_pcid(struct cpuinfo_x86 *c)
20390+{
20391+ if (!cpu_has(c, X86_FEATURE_PCID)) {
20392+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
20393+
20394+#ifdef CONFIG_PAX_MEMORY_UDEREF
20395+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
20396+ pax_open_kernel();
20397+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
20398+ pax_close_kernel();
20399+ printk("PAX: slow and weak UDEREF enabled\n");
20400+ } else
20401+ printk("PAX: UDEREF disabled\n");
20402+#endif
20403+
20404+ return;
20405+ }
20406+
20407+ printk("PAX: PCID detected\n");
20408+ set_in_cr4(X86_CR4_PCIDE);
20409+
20410+#ifdef CONFIG_PAX_MEMORY_UDEREF
20411+ pax_open_kernel();
20412+ clone_pgd_mask = ~(pgdval_t)0UL;
20413+ pax_close_kernel();
20414+ if (pax_user_shadow_base)
20415+ printk("PAX: weak UDEREF enabled\n");
20416+ else {
20417+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
20418+ printk("PAX: strong UDEREF enabled\n");
20419+ }
20420+#endif
20421+
20422+ if (cpu_has(c, X86_FEATURE_INVPCID))
20423+ printk("PAX: INVPCID detected\n");
20424+}
20425+#endif
20426+
20427 /*
20428 * Some CPU features depend on higher CPUID levels, which may not always
20429 * be available due to CPUID level capping or broken virtualization
20430@@ -388,7 +387,7 @@ void switch_to_new_gdt(int cpu)
20431 {
20432 struct desc_ptr gdt_descr;
20433
20434- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
20435+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20436 gdt_descr.size = GDT_SIZE - 1;
20437 load_gdt(&gdt_descr);
20438 /* Reload the per-cpu base */
20439@@ -877,6 +876,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
20440 setup_smep(c);
20441 setup_smap(c);
20442
20443+#ifdef CONFIG_X86_64
20444+ setup_pcid(c);
20445+#endif
20446+
20447 /*
20448 * The vendor-specific functions might have changed features.
20449 * Now we do "generic changes."
20450@@ -885,6 +888,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
20451 /* Filter out anything that depends on CPUID levels we don't have */
20452 filter_cpuid_features(c, true);
20453
20454+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
20455+ setup_clear_cpu_cap(X86_FEATURE_SEP);
20456+#endif
20457+
20458 /* If the model name is still unset, do table lookup. */
20459 if (!c->x86_model_id[0]) {
20460 const char *p;
20461@@ -1072,10 +1079,12 @@ static __init int setup_disablecpuid(char *arg)
20462 }
20463 __setup("clearcpuid=", setup_disablecpuid);
20464
20465+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
20466+EXPORT_PER_CPU_SYMBOL(current_tinfo);
20467+
20468 #ifdef CONFIG_X86_64
20469-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
20470-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
20471- (unsigned long) debug_idt_table };
20472+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
20473+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
20474
20475 DEFINE_PER_CPU_FIRST(union irq_stack_union,
20476 irq_stack_union) __aligned(PAGE_SIZE) __visible;
20477@@ -1089,7 +1098,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
20478 EXPORT_PER_CPU_SYMBOL(current_task);
20479
20480 DEFINE_PER_CPU(unsigned long, kernel_stack) =
20481- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
20482+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
20483 EXPORT_PER_CPU_SYMBOL(kernel_stack);
20484
20485 DEFINE_PER_CPU(char *, irq_stack_ptr) =
20486@@ -1239,7 +1248,7 @@ void cpu_init(void)
20487 load_ucode_ap();
20488
20489 cpu = stack_smp_processor_id();
20490- t = &per_cpu(init_tss, cpu);
20491+ t = init_tss + cpu;
20492 oist = &per_cpu(orig_ist, cpu);
20493
20494 #ifdef CONFIG_NUMA
20495@@ -1274,7 +1283,6 @@ void cpu_init(void)
20496 wrmsrl(MSR_KERNEL_GS_BASE, 0);
20497 barrier();
20498
20499- x86_configure_nx();
20500 enable_x2apic();
20501
20502 /*
20503@@ -1326,7 +1334,7 @@ void cpu_init(void)
20504 {
20505 int cpu = smp_processor_id();
20506 struct task_struct *curr = current;
20507- struct tss_struct *t = &per_cpu(init_tss, cpu);
20508+ struct tss_struct *t = init_tss + cpu;
20509 struct thread_struct *thread = &curr->thread;
20510
20511 show_ucode_info_early();
20512diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
20513index 0641113..06f5ba4 100644
20514--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
20515+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
20516@@ -1014,6 +1014,22 @@ static struct attribute *default_attrs[] = {
20517 };
20518
20519 #ifdef CONFIG_AMD_NB
20520+static struct attribute *default_attrs_amd_nb[] = {
20521+ &type.attr,
20522+ &level.attr,
20523+ &coherency_line_size.attr,
20524+ &physical_line_partition.attr,
20525+ &ways_of_associativity.attr,
20526+ &number_of_sets.attr,
20527+ &size.attr,
20528+ &shared_cpu_map.attr,
20529+ &shared_cpu_list.attr,
20530+ NULL,
20531+ NULL,
20532+ NULL,
20533+ NULL
20534+};
20535+
20536 static struct attribute **amd_l3_attrs(void)
20537 {
20538 static struct attribute **attrs;
20539@@ -1024,18 +1040,7 @@ static struct attribute **amd_l3_attrs(void)
20540
20541 n = ARRAY_SIZE(default_attrs);
20542
20543- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
20544- n += 2;
20545-
20546- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
20547- n += 1;
20548-
20549- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
20550- if (attrs == NULL)
20551- return attrs = default_attrs;
20552-
20553- for (n = 0; default_attrs[n]; n++)
20554- attrs[n] = default_attrs[n];
20555+ attrs = default_attrs_amd_nb;
20556
20557 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
20558 attrs[n++] = &cache_disable_0.attr;
20559@@ -1086,6 +1091,13 @@ static struct kobj_type ktype_cache = {
20560 .default_attrs = default_attrs,
20561 };
20562
20563+#ifdef CONFIG_AMD_NB
20564+static struct kobj_type ktype_cache_amd_nb = {
20565+ .sysfs_ops = &sysfs_ops,
20566+ .default_attrs = default_attrs_amd_nb,
20567+};
20568+#endif
20569+
20570 static struct kobj_type ktype_percpu_entry = {
20571 .sysfs_ops = &sysfs_ops,
20572 };
20573@@ -1151,20 +1163,26 @@ static int cache_add_dev(struct device *dev)
20574 return retval;
20575 }
20576
20577+#ifdef CONFIG_AMD_NB
20578+ amd_l3_attrs();
20579+#endif
20580+
20581 for (i = 0; i < num_cache_leaves; i++) {
20582+ struct kobj_type *ktype;
20583+
20584 this_object = INDEX_KOBJECT_PTR(cpu, i);
20585 this_object->cpu = cpu;
20586 this_object->index = i;
20587
20588 this_leaf = CPUID4_INFO_IDX(cpu, i);
20589
20590- ktype_cache.default_attrs = default_attrs;
20591+ ktype = &ktype_cache;
20592 #ifdef CONFIG_AMD_NB
20593 if (this_leaf->base.nb)
20594- ktype_cache.default_attrs = amd_l3_attrs();
20595+ ktype = &ktype_cache_amd_nb;
20596 #endif
20597 retval = kobject_init_and_add(&(this_object->kobj),
20598- &ktype_cache,
20599+ ktype,
20600 per_cpu(ici_cache_kobject, cpu),
20601 "index%1lu", i);
20602 if (unlikely(retval)) {
20603diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
20604index b3218cd..99a75de 100644
20605--- a/arch/x86/kernel/cpu/mcheck/mce.c
20606+++ b/arch/x86/kernel/cpu/mcheck/mce.c
20607@@ -45,6 +45,7 @@
20608 #include <asm/processor.h>
20609 #include <asm/mce.h>
20610 #include <asm/msr.h>
20611+#include <asm/local.h>
20612
20613 #include "mce-internal.h"
20614
20615@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
20616 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
20617 m->cs, m->ip);
20618
20619- if (m->cs == __KERNEL_CS)
20620+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
20621 print_symbol("{%s}", m->ip);
20622 pr_cont("\n");
20623 }
20624@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
20625
20626 #define PANIC_TIMEOUT 5 /* 5 seconds */
20627
20628-static atomic_t mce_paniced;
20629+static atomic_unchecked_t mce_paniced;
20630
20631 static int fake_panic;
20632-static atomic_t mce_fake_paniced;
20633+static atomic_unchecked_t mce_fake_paniced;
20634
20635 /* Panic in progress. Enable interrupts and wait for final IPI */
20636 static void wait_for_panic(void)
20637@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
20638 /*
20639 * Make sure only one CPU runs in machine check panic
20640 */
20641- if (atomic_inc_return(&mce_paniced) > 1)
20642+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
20643 wait_for_panic();
20644 barrier();
20645
20646@@ -326,7 +327,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
20647 console_verbose();
20648 } else {
20649 /* Don't log too much for fake panic */
20650- if (atomic_inc_return(&mce_fake_paniced) > 1)
20651+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
20652 return;
20653 }
20654 /* First print corrected ones that are still unlogged */
20655@@ -365,7 +366,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
20656 if (!fake_panic) {
20657 if (panic_timeout == 0)
20658 panic_timeout = mca_cfg.panic_timeout;
20659- panic(msg);
20660+ panic("%s", msg);
20661 } else
20662 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
20663 }
20664@@ -695,7 +696,7 @@ static int mce_timed_out(u64 *t)
20665 * might have been modified by someone else.
20666 */
20667 rmb();
20668- if (atomic_read(&mce_paniced))
20669+ if (atomic_read_unchecked(&mce_paniced))
20670 wait_for_panic();
20671 if (!mca_cfg.monarch_timeout)
20672 goto out;
20673@@ -1666,7 +1667,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
20674 }
20675
20676 /* Call the installed machine check handler for this CPU setup. */
20677-void (*machine_check_vector)(struct pt_regs *, long error_code) =
20678+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
20679 unexpected_machine_check;
20680
20681 /*
20682@@ -1689,7 +1690,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
20683 return;
20684 }
20685
20686+ pax_open_kernel();
20687 machine_check_vector = do_machine_check;
20688+ pax_close_kernel();
20689
20690 __mcheck_cpu_init_generic();
20691 __mcheck_cpu_init_vendor(c);
20692@@ -1703,7 +1706,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
20693 */
20694
20695 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
20696-static int mce_chrdev_open_count; /* #times opened */
20697+static local_t mce_chrdev_open_count; /* #times opened */
20698 static int mce_chrdev_open_exclu; /* already open exclusive? */
20699
20700 static int mce_chrdev_open(struct inode *inode, struct file *file)
20701@@ -1711,7 +1714,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
20702 spin_lock(&mce_chrdev_state_lock);
20703
20704 if (mce_chrdev_open_exclu ||
20705- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
20706+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
20707 spin_unlock(&mce_chrdev_state_lock);
20708
20709 return -EBUSY;
20710@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
20711
20712 if (file->f_flags & O_EXCL)
20713 mce_chrdev_open_exclu = 1;
20714- mce_chrdev_open_count++;
20715+ local_inc(&mce_chrdev_open_count);
20716
20717 spin_unlock(&mce_chrdev_state_lock);
20718
20719@@ -1730,7 +1733,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
20720 {
20721 spin_lock(&mce_chrdev_state_lock);
20722
20723- mce_chrdev_open_count--;
20724+ local_dec(&mce_chrdev_open_count);
20725 mce_chrdev_open_exclu = 0;
20726
20727 spin_unlock(&mce_chrdev_state_lock);
20728@@ -2404,7 +2407,7 @@ static __init void mce_init_banks(void)
20729
20730 for (i = 0; i < mca_cfg.banks; i++) {
20731 struct mce_bank *b = &mce_banks[i];
20732- struct device_attribute *a = &b->attr;
20733+ device_attribute_no_const *a = &b->attr;
20734
20735 sysfs_attr_init(&a->attr);
20736 a->attr.name = b->attrname;
20737@@ -2472,7 +2475,7 @@ struct dentry *mce_get_debugfs_dir(void)
20738 static void mce_reset(void)
20739 {
20740 cpu_missing = 0;
20741- atomic_set(&mce_fake_paniced, 0);
20742+ atomic_set_unchecked(&mce_fake_paniced, 0);
20743 atomic_set(&mce_executing, 0);
20744 atomic_set(&mce_callin, 0);
20745 atomic_set(&global_nwo, 0);
20746diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
20747index 1c044b1..37a2a43 100644
20748--- a/arch/x86/kernel/cpu/mcheck/p5.c
20749+++ b/arch/x86/kernel/cpu/mcheck/p5.c
20750@@ -11,6 +11,7 @@
20751 #include <asm/processor.h>
20752 #include <asm/mce.h>
20753 #include <asm/msr.h>
20754+#include <asm/pgtable.h>
20755
20756 /* By default disabled */
20757 int mce_p5_enabled __read_mostly;
20758@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
20759 if (!cpu_has(c, X86_FEATURE_MCE))
20760 return;
20761
20762+ pax_open_kernel();
20763 machine_check_vector = pentium_machine_check;
20764+ pax_close_kernel();
20765 /* Make sure the vector pointer is visible before we enable MCEs: */
20766 wmb();
20767
20768diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
20769index e9a701a..35317d6 100644
20770--- a/arch/x86/kernel/cpu/mcheck/winchip.c
20771+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
20772@@ -10,6 +10,7 @@
20773 #include <asm/processor.h>
20774 #include <asm/mce.h>
20775 #include <asm/msr.h>
20776+#include <asm/pgtable.h>
20777
20778 /* Machine check handler for WinChip C6: */
20779 static void winchip_machine_check(struct pt_regs *regs, long error_code)
20780@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
20781 {
20782 u32 lo, hi;
20783
20784+ pax_open_kernel();
20785 machine_check_vector = winchip_machine_check;
20786+ pax_close_kernel();
20787 /* Make sure the vector pointer is visible before we enable MCEs: */
20788 wmb();
20789
20790diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
20791index f961de9..8a9d332 100644
20792--- a/arch/x86/kernel/cpu/mtrr/main.c
20793+++ b/arch/x86/kernel/cpu/mtrr/main.c
20794@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
20795 u64 size_or_mask, size_and_mask;
20796 static bool mtrr_aps_delayed_init;
20797
20798-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
20799+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
20800
20801 const struct mtrr_ops *mtrr_if;
20802
20803diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
20804index df5e41f..816c719 100644
20805--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
20806+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
20807@@ -25,7 +25,7 @@ struct mtrr_ops {
20808 int (*validate_add_page)(unsigned long base, unsigned long size,
20809 unsigned int type);
20810 int (*have_wrcomb)(void);
20811-};
20812+} __do_const;
20813
20814 extern int generic_get_free_region(unsigned long base, unsigned long size,
20815 int replace_reg);
20816diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
20817index 8e13293..9bfd68c 100644
20818--- a/arch/x86/kernel/cpu/perf_event.c
20819+++ b/arch/x86/kernel/cpu/perf_event.c
20820@@ -1348,7 +1348,7 @@ static void __init pmu_check_apic(void)
20821 pr_info("no hardware sampling interrupt available.\n");
20822 }
20823
20824-static struct attribute_group x86_pmu_format_group = {
20825+static attribute_group_no_const x86_pmu_format_group = {
20826 .name = "format",
20827 .attrs = NULL,
20828 };
20829@@ -1447,7 +1447,7 @@ static struct attribute *events_attr[] = {
20830 NULL,
20831 };
20832
20833-static struct attribute_group x86_pmu_events_group = {
20834+static attribute_group_no_const x86_pmu_events_group = {
20835 .name = "events",
20836 .attrs = events_attr,
20837 };
20838@@ -1958,7 +1958,7 @@ static unsigned long get_segment_base(unsigned int segment)
20839 if (idx > GDT_ENTRIES)
20840 return 0;
20841
20842- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
20843+ desc = get_cpu_gdt_table(smp_processor_id());
20844 }
20845
20846 return get_desc_base(desc + idx);
20847@@ -2048,7 +2048,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
20848 break;
20849
20850 perf_callchain_store(entry, frame.return_address);
20851- fp = frame.next_frame;
20852+ fp = (const void __force_user *)frame.next_frame;
20853 }
20854 }
20855
20856diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
20857index 639d128..e92d7e5 100644
20858--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
20859+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
20860@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
20861 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
20862 {
20863 struct attribute **attrs;
20864- struct attribute_group *attr_group;
20865+ attribute_group_no_const *attr_group;
20866 int i = 0, j;
20867
20868 while (amd_iommu_v2_event_descs[i].attr.attr.name)
20869diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
20870index 0fa4f24..17990ed 100644
20871--- a/arch/x86/kernel/cpu/perf_event_intel.c
20872+++ b/arch/x86/kernel/cpu/perf_event_intel.c
20873@@ -2314,10 +2314,10 @@ __init int intel_pmu_init(void)
20874 * v2 and above have a perf capabilities MSR
20875 */
20876 if (version > 1) {
20877- u64 capabilities;
20878+ u64 capabilities = x86_pmu.intel_cap.capabilities;
20879
20880- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
20881- x86_pmu.intel_cap.capabilities = capabilities;
20882+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
20883+ x86_pmu.intel_cap.capabilities = capabilities;
20884 }
20885
20886 intel_ds_init();
20887diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
20888index 29c2487..a5606fa 100644
20889--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
20890+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
20891@@ -3318,7 +3318,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
20892 static int __init uncore_type_init(struct intel_uncore_type *type)
20893 {
20894 struct intel_uncore_pmu *pmus;
20895- struct attribute_group *attr_group;
20896+ attribute_group_no_const *attr_group;
20897 struct attribute **attrs;
20898 int i, j;
20899
20900diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
20901index a80ab71..4089da5 100644
20902--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
20903+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
20904@@ -498,7 +498,7 @@ struct intel_uncore_box {
20905 struct uncore_event_desc {
20906 struct kobj_attribute attr;
20907 const char *config;
20908-};
20909+} __do_const;
20910
20911 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
20912 { \
20913diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
20914index 7d9481c..99c7e4b 100644
20915--- a/arch/x86/kernel/cpuid.c
20916+++ b/arch/x86/kernel/cpuid.c
20917@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
20918 return notifier_from_errno(err);
20919 }
20920
20921-static struct notifier_block __refdata cpuid_class_cpu_notifier =
20922+static struct notifier_block cpuid_class_cpu_notifier =
20923 {
20924 .notifier_call = cpuid_class_cpu_callback,
20925 };
20926diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
20927index 18677a9..f67c45b 100644
20928--- a/arch/x86/kernel/crash.c
20929+++ b/arch/x86/kernel/crash.c
20930@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
20931 {
20932 #ifdef CONFIG_X86_32
20933 struct pt_regs fixed_regs;
20934-#endif
20935
20936-#ifdef CONFIG_X86_32
20937- if (!user_mode_vm(regs)) {
20938+ if (!user_mode(regs)) {
20939 crash_fixup_ss_esp(&fixed_regs, regs);
20940 regs = &fixed_regs;
20941 }
20942diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
20943index afa64ad..dce67dd 100644
20944--- a/arch/x86/kernel/crash_dump_64.c
20945+++ b/arch/x86/kernel/crash_dump_64.c
20946@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
20947 return -ENOMEM;
20948
20949 if (userbuf) {
20950- if (copy_to_user(buf, vaddr + offset, csize)) {
20951+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
20952 iounmap(vaddr);
20953 return -EFAULT;
20954 }
20955diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
20956index 5d3fe8d..02e1429 100644
20957--- a/arch/x86/kernel/doublefault.c
20958+++ b/arch/x86/kernel/doublefault.c
20959@@ -13,7 +13,7 @@
20960
20961 #define DOUBLEFAULT_STACKSIZE (1024)
20962 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
20963-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
20964+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
20965
20966 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
20967
20968@@ -23,7 +23,7 @@ static void doublefault_fn(void)
20969 unsigned long gdt, tss;
20970
20971 native_store_gdt(&gdt_desc);
20972- gdt = gdt_desc.address;
20973+ gdt = (unsigned long)gdt_desc.address;
20974
20975 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
20976
20977@@ -60,10 +60,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
20978 /* 0x2 bit is always set */
20979 .flags = X86_EFLAGS_SF | 0x2,
20980 .sp = STACK_START,
20981- .es = __USER_DS,
20982+ .es = __KERNEL_DS,
20983 .cs = __KERNEL_CS,
20984 .ss = __KERNEL_DS,
20985- .ds = __USER_DS,
20986+ .ds = __KERNEL_DS,
20987 .fs = __KERNEL_PERCPU,
20988
20989 .__cr3 = __pa_nodebug(swapper_pg_dir),
20990diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
20991index d9c12d3..7858b62 100644
20992--- a/arch/x86/kernel/dumpstack.c
20993+++ b/arch/x86/kernel/dumpstack.c
20994@@ -2,6 +2,9 @@
20995 * Copyright (C) 1991, 1992 Linus Torvalds
20996 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
20997 */
20998+#ifdef CONFIG_GRKERNSEC_HIDESYM
20999+#define __INCLUDED_BY_HIDESYM 1
21000+#endif
21001 #include <linux/kallsyms.h>
21002 #include <linux/kprobes.h>
21003 #include <linux/uaccess.h>
21004@@ -40,16 +43,14 @@ void printk_address(unsigned long address)
21005 static void
21006 print_ftrace_graph_addr(unsigned long addr, void *data,
21007 const struct stacktrace_ops *ops,
21008- struct thread_info *tinfo, int *graph)
21009+ struct task_struct *task, int *graph)
21010 {
21011- struct task_struct *task;
21012 unsigned long ret_addr;
21013 int index;
21014
21015 if (addr != (unsigned long)return_to_handler)
21016 return;
21017
21018- task = tinfo->task;
21019 index = task->curr_ret_stack;
21020
21021 if (!task->ret_stack || index < *graph)
21022@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21023 static inline void
21024 print_ftrace_graph_addr(unsigned long addr, void *data,
21025 const struct stacktrace_ops *ops,
21026- struct thread_info *tinfo, int *graph)
21027+ struct task_struct *task, int *graph)
21028 { }
21029 #endif
21030
21031@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21032 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21033 */
21034
21035-static inline int valid_stack_ptr(struct thread_info *tinfo,
21036- void *p, unsigned int size, void *end)
21037+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21038 {
21039- void *t = tinfo;
21040 if (end) {
21041 if (p < end && p >= (end-THREAD_SIZE))
21042 return 1;
21043@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21044 }
21045
21046 unsigned long
21047-print_context_stack(struct thread_info *tinfo,
21048+print_context_stack(struct task_struct *task, void *stack_start,
21049 unsigned long *stack, unsigned long bp,
21050 const struct stacktrace_ops *ops, void *data,
21051 unsigned long *end, int *graph)
21052 {
21053 struct stack_frame *frame = (struct stack_frame *)bp;
21054
21055- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21056+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21057 unsigned long addr;
21058
21059 addr = *stack;
21060@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21061 } else {
21062 ops->address(data, addr, 0);
21063 }
21064- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21065+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21066 }
21067 stack++;
21068 }
21069@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21070 EXPORT_SYMBOL_GPL(print_context_stack);
21071
21072 unsigned long
21073-print_context_stack_bp(struct thread_info *tinfo,
21074+print_context_stack_bp(struct task_struct *task, void *stack_start,
21075 unsigned long *stack, unsigned long bp,
21076 const struct stacktrace_ops *ops, void *data,
21077 unsigned long *end, int *graph)
21078@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21079 struct stack_frame *frame = (struct stack_frame *)bp;
21080 unsigned long *ret_addr = &frame->return_address;
21081
21082- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21083+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21084 unsigned long addr = *ret_addr;
21085
21086 if (!__kernel_text_address(addr))
21087@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21088 ops->address(data, addr, 1);
21089 frame = frame->next_frame;
21090 ret_addr = &frame->return_address;
21091- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21092+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21093 }
21094
21095 return (unsigned long)frame;
21096@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
21097 static void print_trace_address(void *data, unsigned long addr, int reliable)
21098 {
21099 touch_nmi_watchdog();
21100- printk(data);
21101+ printk("%s", (char *)data);
21102 printk_stack_address(addr, reliable);
21103 }
21104
21105@@ -224,6 +223,8 @@ unsigned __kprobes long oops_begin(void)
21106 }
21107 EXPORT_SYMBOL_GPL(oops_begin);
21108
21109+extern void gr_handle_kernel_exploit(void);
21110+
21111 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21112 {
21113 if (regs && kexec_should_crash(current))
21114@@ -245,7 +246,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21115 panic("Fatal exception in interrupt");
21116 if (panic_on_oops)
21117 panic("Fatal exception");
21118- do_exit(signr);
21119+
21120+ gr_handle_kernel_exploit();
21121+
21122+ do_group_exit(signr);
21123 }
21124
21125 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
21126@@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
21127 print_modules();
21128 show_regs(regs);
21129 #ifdef CONFIG_X86_32
21130- if (user_mode_vm(regs)) {
21131+ if (user_mode(regs)) {
21132 sp = regs->sp;
21133 ss = regs->ss & 0xffff;
21134 } else {
21135@@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
21136 unsigned long flags = oops_begin();
21137 int sig = SIGSEGV;
21138
21139- if (!user_mode_vm(regs))
21140+ if (!user_mode(regs))
21141 report_bug(regs->ip, regs);
21142
21143 if (__die(str, regs, err))
21144diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
21145index f2a1770..540657f 100644
21146--- a/arch/x86/kernel/dumpstack_32.c
21147+++ b/arch/x86/kernel/dumpstack_32.c
21148@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21149 bp = stack_frame(task, regs);
21150
21151 for (;;) {
21152- struct thread_info *context;
21153+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21154
21155- context = (struct thread_info *)
21156- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
21157- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
21158+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
21159
21160- stack = (unsigned long *)context->previous_esp;
21161- if (!stack)
21162+ if (stack_start == task_stack_page(task))
21163 break;
21164+ stack = *(unsigned long **)stack_start;
21165 if (ops->stack(data, "IRQ") < 0)
21166 break;
21167 touch_nmi_watchdog();
21168@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
21169 int i;
21170
21171 show_regs_print_info(KERN_EMERG);
21172- __show_regs(regs, !user_mode_vm(regs));
21173+ __show_regs(regs, !user_mode(regs));
21174
21175 /*
21176 * When in-kernel, we also print out the stack and code at the
21177 * time of the fault..
21178 */
21179- if (!user_mode_vm(regs)) {
21180+ if (!user_mode(regs)) {
21181 unsigned int code_prologue = code_bytes * 43 / 64;
21182 unsigned int code_len = code_bytes;
21183 unsigned char c;
21184 u8 *ip;
21185+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
21186
21187 pr_emerg("Stack:\n");
21188 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
21189
21190 pr_emerg("Code:");
21191
21192- ip = (u8 *)regs->ip - code_prologue;
21193+ ip = (u8 *)regs->ip - code_prologue + cs_base;
21194 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
21195 /* try starting at IP */
21196- ip = (u8 *)regs->ip;
21197+ ip = (u8 *)regs->ip + cs_base;
21198 code_len = code_len - code_prologue + 1;
21199 }
21200 for (i = 0; i < code_len; i++, ip++) {
21201@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
21202 pr_cont(" Bad EIP value.");
21203 break;
21204 }
21205- if (ip == (u8 *)regs->ip)
21206+ if (ip == (u8 *)regs->ip + cs_base)
21207 pr_cont(" <%02x>", c);
21208 else
21209 pr_cont(" %02x", c);
21210@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
21211 {
21212 unsigned short ud2;
21213
21214+ ip = ktla_ktva(ip);
21215 if (ip < PAGE_OFFSET)
21216 return 0;
21217 if (probe_kernel_address((unsigned short *)ip, ud2))
21218@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
21219
21220 return ud2 == 0x0b0f;
21221 }
21222+
21223+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21224+void pax_check_alloca(unsigned long size)
21225+{
21226+ unsigned long sp = (unsigned long)&sp, stack_left;
21227+
21228+ /* all kernel stacks are of the same size */
21229+ stack_left = sp & (THREAD_SIZE - 1);
21230+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21231+}
21232+EXPORT_SYMBOL(pax_check_alloca);
21233+#endif
21234diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
21235index addb207..99635fa 100644
21236--- a/arch/x86/kernel/dumpstack_64.c
21237+++ b/arch/x86/kernel/dumpstack_64.c
21238@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21239 unsigned long *irq_stack_end =
21240 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
21241 unsigned used = 0;
21242- struct thread_info *tinfo;
21243 int graph = 0;
21244 unsigned long dummy;
21245+ void *stack_start;
21246
21247 if (!task)
21248 task = current;
21249@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21250 * current stack address. If the stacks consist of nested
21251 * exceptions
21252 */
21253- tinfo = task_thread_info(task);
21254 for (;;) {
21255 char *id;
21256 unsigned long *estack_end;
21257+
21258 estack_end = in_exception_stack(cpu, (unsigned long)stack,
21259 &used, &id);
21260
21261@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21262 if (ops->stack(data, id) < 0)
21263 break;
21264
21265- bp = ops->walk_stack(tinfo, stack, bp, ops,
21266+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
21267 data, estack_end, &graph);
21268 ops->stack(data, "<EOE>");
21269 /*
21270@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21271 * second-to-last pointer (index -2 to end) in the
21272 * exception stack:
21273 */
21274+ if ((u16)estack_end[-1] != __KERNEL_DS)
21275+ goto out;
21276 stack = (unsigned long *) estack_end[-2];
21277 continue;
21278 }
21279@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21280 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
21281 if (ops->stack(data, "IRQ") < 0)
21282 break;
21283- bp = ops->walk_stack(tinfo, stack, bp,
21284+ bp = ops->walk_stack(task, irq_stack, stack, bp,
21285 ops, data, irq_stack_end, &graph);
21286 /*
21287 * We link to the next stack (which would be
21288@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21289 /*
21290 * This handles the process stack:
21291 */
21292- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
21293+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21294+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
21295+out:
21296 put_cpu();
21297 }
21298 EXPORT_SYMBOL(dump_trace);
21299@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
21300
21301 return ud2 == 0x0b0f;
21302 }
21303+
21304+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21305+void pax_check_alloca(unsigned long size)
21306+{
21307+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
21308+ unsigned cpu, used;
21309+ char *id;
21310+
21311+ /* check the process stack first */
21312+ stack_start = (unsigned long)task_stack_page(current);
21313+ stack_end = stack_start + THREAD_SIZE;
21314+ if (likely(stack_start <= sp && sp < stack_end)) {
21315+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
21316+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21317+ return;
21318+ }
21319+
21320+ cpu = get_cpu();
21321+
21322+ /* check the irq stacks */
21323+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
21324+ stack_start = stack_end - IRQ_STACK_SIZE;
21325+ if (stack_start <= sp && sp < stack_end) {
21326+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
21327+ put_cpu();
21328+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21329+ return;
21330+ }
21331+
21332+ /* check the exception stacks */
21333+ used = 0;
21334+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
21335+ stack_start = stack_end - EXCEPTION_STKSZ;
21336+ if (stack_end && stack_start <= sp && sp < stack_end) {
21337+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
21338+ put_cpu();
21339+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21340+ return;
21341+ }
21342+
21343+ put_cpu();
21344+
21345+ /* unknown stack */
21346+ BUG();
21347+}
21348+EXPORT_SYMBOL(pax_check_alloca);
21349+#endif
21350diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
21351index 174da5f..5e55606 100644
21352--- a/arch/x86/kernel/e820.c
21353+++ b/arch/x86/kernel/e820.c
21354@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
21355
21356 static void early_panic(char *msg)
21357 {
21358- early_printk(msg);
21359- panic(msg);
21360+ early_printk("%s", msg);
21361+ panic("%s", msg);
21362 }
21363
21364 static int userdef __initdata;
21365diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
21366index 01d1c18..8073693 100644
21367--- a/arch/x86/kernel/early_printk.c
21368+++ b/arch/x86/kernel/early_printk.c
21369@@ -7,6 +7,7 @@
21370 #include <linux/pci_regs.h>
21371 #include <linux/pci_ids.h>
21372 #include <linux/errno.h>
21373+#include <linux/sched.h>
21374 #include <asm/io.h>
21375 #include <asm/processor.h>
21376 #include <asm/fcntl.h>
21377diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
21378index a2a4f46..6cab058 100644
21379--- a/arch/x86/kernel/entry_32.S
21380+++ b/arch/x86/kernel/entry_32.S
21381@@ -177,13 +177,153 @@
21382 /*CFI_REL_OFFSET gs, PT_GS*/
21383 .endm
21384 .macro SET_KERNEL_GS reg
21385+
21386+#ifdef CONFIG_CC_STACKPROTECTOR
21387 movl $(__KERNEL_STACK_CANARY), \reg
21388+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
21389+ movl $(__USER_DS), \reg
21390+#else
21391+ xorl \reg, \reg
21392+#endif
21393+
21394 movl \reg, %gs
21395 .endm
21396
21397 #endif /* CONFIG_X86_32_LAZY_GS */
21398
21399-.macro SAVE_ALL
21400+.macro pax_enter_kernel
21401+#ifdef CONFIG_PAX_KERNEXEC
21402+ call pax_enter_kernel
21403+#endif
21404+.endm
21405+
21406+.macro pax_exit_kernel
21407+#ifdef CONFIG_PAX_KERNEXEC
21408+ call pax_exit_kernel
21409+#endif
21410+.endm
21411+
21412+#ifdef CONFIG_PAX_KERNEXEC
21413+ENTRY(pax_enter_kernel)
21414+#ifdef CONFIG_PARAVIRT
21415+ pushl %eax
21416+ pushl %ecx
21417+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
21418+ mov %eax, %esi
21419+#else
21420+ mov %cr0, %esi
21421+#endif
21422+ bts $16, %esi
21423+ jnc 1f
21424+ mov %cs, %esi
21425+ cmp $__KERNEL_CS, %esi
21426+ jz 3f
21427+ ljmp $__KERNEL_CS, $3f
21428+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
21429+2:
21430+#ifdef CONFIG_PARAVIRT
21431+ mov %esi, %eax
21432+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
21433+#else
21434+ mov %esi, %cr0
21435+#endif
21436+3:
21437+#ifdef CONFIG_PARAVIRT
21438+ popl %ecx
21439+ popl %eax
21440+#endif
21441+ ret
21442+ENDPROC(pax_enter_kernel)
21443+
21444+ENTRY(pax_exit_kernel)
21445+#ifdef CONFIG_PARAVIRT
21446+ pushl %eax
21447+ pushl %ecx
21448+#endif
21449+ mov %cs, %esi
21450+ cmp $__KERNEXEC_KERNEL_CS, %esi
21451+ jnz 2f
21452+#ifdef CONFIG_PARAVIRT
21453+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
21454+ mov %eax, %esi
21455+#else
21456+ mov %cr0, %esi
21457+#endif
21458+ btr $16, %esi
21459+ ljmp $__KERNEL_CS, $1f
21460+1:
21461+#ifdef CONFIG_PARAVIRT
21462+ mov %esi, %eax
21463+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
21464+#else
21465+ mov %esi, %cr0
21466+#endif
21467+2:
21468+#ifdef CONFIG_PARAVIRT
21469+ popl %ecx
21470+ popl %eax
21471+#endif
21472+ ret
21473+ENDPROC(pax_exit_kernel)
21474+#endif
21475+
21476+ .macro pax_erase_kstack
21477+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21478+ call pax_erase_kstack
21479+#endif
21480+ .endm
21481+
21482+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21483+/*
21484+ * ebp: thread_info
21485+ */
21486+ENTRY(pax_erase_kstack)
21487+ pushl %edi
21488+ pushl %ecx
21489+ pushl %eax
21490+
21491+ mov TI_lowest_stack(%ebp), %edi
21492+ mov $-0xBEEF, %eax
21493+ std
21494+
21495+1: mov %edi, %ecx
21496+ and $THREAD_SIZE_asm - 1, %ecx
21497+ shr $2, %ecx
21498+ repne scasl
21499+ jecxz 2f
21500+
21501+ cmp $2*16, %ecx
21502+ jc 2f
21503+
21504+ mov $2*16, %ecx
21505+ repe scasl
21506+ jecxz 2f
21507+ jne 1b
21508+
21509+2: cld
21510+ mov %esp, %ecx
21511+ sub %edi, %ecx
21512+
21513+ cmp $THREAD_SIZE_asm, %ecx
21514+ jb 3f
21515+ ud2
21516+3:
21517+
21518+ shr $2, %ecx
21519+ rep stosl
21520+
21521+ mov TI_task_thread_sp0(%ebp), %edi
21522+ sub $128, %edi
21523+ mov %edi, TI_lowest_stack(%ebp)
21524+
21525+ popl %eax
21526+ popl %ecx
21527+ popl %edi
21528+ ret
21529+ENDPROC(pax_erase_kstack)
21530+#endif
21531+
21532+.macro __SAVE_ALL _DS
21533 cld
21534 PUSH_GS
21535 pushl_cfi %fs
21536@@ -206,7 +346,7 @@
21537 CFI_REL_OFFSET ecx, 0
21538 pushl_cfi %ebx
21539 CFI_REL_OFFSET ebx, 0
21540- movl $(__USER_DS), %edx
21541+ movl $\_DS, %edx
21542 movl %edx, %ds
21543 movl %edx, %es
21544 movl $(__KERNEL_PERCPU), %edx
21545@@ -214,6 +354,15 @@
21546 SET_KERNEL_GS %edx
21547 .endm
21548
21549+.macro SAVE_ALL
21550+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21551+ __SAVE_ALL __KERNEL_DS
21552+ pax_enter_kernel
21553+#else
21554+ __SAVE_ALL __USER_DS
21555+#endif
21556+.endm
21557+
21558 .macro RESTORE_INT_REGS
21559 popl_cfi %ebx
21560 CFI_RESTORE ebx
21561@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
21562 popfl_cfi
21563 jmp syscall_exit
21564 CFI_ENDPROC
21565-END(ret_from_fork)
21566+ENDPROC(ret_from_fork)
21567
21568 ENTRY(ret_from_kernel_thread)
21569 CFI_STARTPROC
21570@@ -344,7 +493,15 @@ ret_from_intr:
21571 andl $SEGMENT_RPL_MASK, %eax
21572 #endif
21573 cmpl $USER_RPL, %eax
21574+
21575+#ifdef CONFIG_PAX_KERNEXEC
21576+ jae resume_userspace
21577+
21578+ pax_exit_kernel
21579+ jmp resume_kernel
21580+#else
21581 jb resume_kernel # not returning to v8086 or userspace
21582+#endif
21583
21584 ENTRY(resume_userspace)
21585 LOCKDEP_SYS_EXIT
21586@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
21587 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
21588 # int/exception return?
21589 jne work_pending
21590- jmp restore_all
21591-END(ret_from_exception)
21592+ jmp restore_all_pax
21593+ENDPROC(ret_from_exception)
21594
21595 #ifdef CONFIG_PREEMPT
21596 ENTRY(resume_kernel)
21597@@ -369,7 +526,7 @@ need_resched:
21598 jz restore_all
21599 call preempt_schedule_irq
21600 jmp need_resched
21601-END(resume_kernel)
21602+ENDPROC(resume_kernel)
21603 #endif
21604 CFI_ENDPROC
21605 /*
21606@@ -403,30 +560,45 @@ sysenter_past_esp:
21607 /*CFI_REL_OFFSET cs, 0*/
21608 /*
21609 * Push current_thread_info()->sysenter_return to the stack.
21610- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
21611- * pushed above; +8 corresponds to copy_thread's esp0 setting.
21612 */
21613- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
21614+ pushl_cfi $0
21615 CFI_REL_OFFSET eip, 0
21616
21617 pushl_cfi %eax
21618 SAVE_ALL
21619+ GET_THREAD_INFO(%ebp)
21620+ movl TI_sysenter_return(%ebp),%ebp
21621+ movl %ebp,PT_EIP(%esp)
21622 ENABLE_INTERRUPTS(CLBR_NONE)
21623
21624 /*
21625 * Load the potential sixth argument from user stack.
21626 * Careful about security.
21627 */
21628+ movl PT_OLDESP(%esp),%ebp
21629+
21630+#ifdef CONFIG_PAX_MEMORY_UDEREF
21631+ mov PT_OLDSS(%esp),%ds
21632+1: movl %ds:(%ebp),%ebp
21633+ push %ss
21634+ pop %ds
21635+#else
21636 cmpl $__PAGE_OFFSET-3,%ebp
21637 jae syscall_fault
21638 ASM_STAC
21639 1: movl (%ebp),%ebp
21640 ASM_CLAC
21641+#endif
21642+
21643 movl %ebp,PT_EBP(%esp)
21644 _ASM_EXTABLE(1b,syscall_fault)
21645
21646 GET_THREAD_INFO(%ebp)
21647
21648+#ifdef CONFIG_PAX_RANDKSTACK
21649+ pax_erase_kstack
21650+#endif
21651+
21652 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
21653 jnz sysenter_audit
21654 sysenter_do_call:
21655@@ -441,12 +613,24 @@ sysenter_do_call:
21656 testl $_TIF_ALLWORK_MASK, %ecx
21657 jne sysexit_audit
21658 sysenter_exit:
21659+
21660+#ifdef CONFIG_PAX_RANDKSTACK
21661+ pushl_cfi %eax
21662+ movl %esp, %eax
21663+ call pax_randomize_kstack
21664+ popl_cfi %eax
21665+#endif
21666+
21667+ pax_erase_kstack
21668+
21669 /* if something modifies registers it must also disable sysexit */
21670 movl PT_EIP(%esp), %edx
21671 movl PT_OLDESP(%esp), %ecx
21672 xorl %ebp,%ebp
21673 TRACE_IRQS_ON
21674 1: mov PT_FS(%esp), %fs
21675+2: mov PT_DS(%esp), %ds
21676+3: mov PT_ES(%esp), %es
21677 PTGS_TO_GS
21678 ENABLE_INTERRUPTS_SYSEXIT
21679
21680@@ -463,6 +647,9 @@ sysenter_audit:
21681 movl %eax,%edx /* 2nd arg: syscall number */
21682 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
21683 call __audit_syscall_entry
21684+
21685+ pax_erase_kstack
21686+
21687 pushl_cfi %ebx
21688 movl PT_EAX(%esp),%eax /* reload syscall number */
21689 jmp sysenter_do_call
21690@@ -488,10 +675,16 @@ sysexit_audit:
21691
21692 CFI_ENDPROC
21693 .pushsection .fixup,"ax"
21694-2: movl $0,PT_FS(%esp)
21695+4: movl $0,PT_FS(%esp)
21696+ jmp 1b
21697+5: movl $0,PT_DS(%esp)
21698+ jmp 1b
21699+6: movl $0,PT_ES(%esp)
21700 jmp 1b
21701 .popsection
21702- _ASM_EXTABLE(1b,2b)
21703+ _ASM_EXTABLE(1b,4b)
21704+ _ASM_EXTABLE(2b,5b)
21705+ _ASM_EXTABLE(3b,6b)
21706 PTGS_TO_GS_EX
21707 ENDPROC(ia32_sysenter_target)
21708
21709@@ -506,6 +699,11 @@ ENTRY(system_call)
21710 pushl_cfi %eax # save orig_eax
21711 SAVE_ALL
21712 GET_THREAD_INFO(%ebp)
21713+
21714+#ifdef CONFIG_PAX_RANDKSTACK
21715+ pax_erase_kstack
21716+#endif
21717+
21718 # system call tracing in operation / emulation
21719 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
21720 jnz syscall_trace_entry
21721@@ -524,6 +722,15 @@ syscall_exit:
21722 testl $_TIF_ALLWORK_MASK, %ecx # current->work
21723 jne syscall_exit_work
21724
21725+restore_all_pax:
21726+
21727+#ifdef CONFIG_PAX_RANDKSTACK
21728+ movl %esp, %eax
21729+ call pax_randomize_kstack
21730+#endif
21731+
21732+ pax_erase_kstack
21733+
21734 restore_all:
21735 TRACE_IRQS_IRET
21736 restore_all_notrace:
21737@@ -580,14 +787,34 @@ ldt_ss:
21738 * compensating for the offset by changing to the ESPFIX segment with
21739 * a base address that matches for the difference.
21740 */
21741-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
21742+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
21743 mov %esp, %edx /* load kernel esp */
21744 mov PT_OLDESP(%esp), %eax /* load userspace esp */
21745 mov %dx, %ax /* eax: new kernel esp */
21746 sub %eax, %edx /* offset (low word is 0) */
21747+#ifdef CONFIG_SMP
21748+ movl PER_CPU_VAR(cpu_number), %ebx
21749+ shll $PAGE_SHIFT_asm, %ebx
21750+ addl $cpu_gdt_table, %ebx
21751+#else
21752+ movl $cpu_gdt_table, %ebx
21753+#endif
21754 shr $16, %edx
21755- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
21756- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
21757+
21758+#ifdef CONFIG_PAX_KERNEXEC
21759+ mov %cr0, %esi
21760+ btr $16, %esi
21761+ mov %esi, %cr0
21762+#endif
21763+
21764+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
21765+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
21766+
21767+#ifdef CONFIG_PAX_KERNEXEC
21768+ bts $16, %esi
21769+ mov %esi, %cr0
21770+#endif
21771+
21772 pushl_cfi $__ESPFIX_SS
21773 pushl_cfi %eax /* new kernel esp */
21774 /* Disable interrupts, but do not irqtrace this section: we
21775@@ -616,20 +843,18 @@ work_resched:
21776 movl TI_flags(%ebp), %ecx
21777 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
21778 # than syscall tracing?
21779- jz restore_all
21780+ jz restore_all_pax
21781 testb $_TIF_NEED_RESCHED, %cl
21782 jnz work_resched
21783
21784 work_notifysig: # deal with pending signals and
21785 # notify-resume requests
21786+ movl %esp, %eax
21787 #ifdef CONFIG_VM86
21788 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
21789- movl %esp, %eax
21790 jne work_notifysig_v86 # returning to kernel-space or
21791 # vm86-space
21792 1:
21793-#else
21794- movl %esp, %eax
21795 #endif
21796 TRACE_IRQS_ON
21797 ENABLE_INTERRUPTS(CLBR_NONE)
21798@@ -650,7 +875,7 @@ work_notifysig_v86:
21799 movl %eax, %esp
21800 jmp 1b
21801 #endif
21802-END(work_pending)
21803+ENDPROC(work_pending)
21804
21805 # perform syscall exit tracing
21806 ALIGN
21807@@ -658,11 +883,14 @@ syscall_trace_entry:
21808 movl $-ENOSYS,PT_EAX(%esp)
21809 movl %esp, %eax
21810 call syscall_trace_enter
21811+
21812+ pax_erase_kstack
21813+
21814 /* What it returned is what we'll actually use. */
21815 cmpl $(NR_syscalls), %eax
21816 jnae syscall_call
21817 jmp syscall_exit
21818-END(syscall_trace_entry)
21819+ENDPROC(syscall_trace_entry)
21820
21821 # perform syscall exit tracing
21822 ALIGN
21823@@ -675,21 +903,25 @@ syscall_exit_work:
21824 movl %esp, %eax
21825 call syscall_trace_leave
21826 jmp resume_userspace
21827-END(syscall_exit_work)
21828+ENDPROC(syscall_exit_work)
21829 CFI_ENDPROC
21830
21831 RING0_INT_FRAME # can't unwind into user space anyway
21832 syscall_fault:
21833+#ifdef CONFIG_PAX_MEMORY_UDEREF
21834+ push %ss
21835+ pop %ds
21836+#endif
21837 ASM_CLAC
21838 GET_THREAD_INFO(%ebp)
21839 movl $-EFAULT,PT_EAX(%esp)
21840 jmp resume_userspace
21841-END(syscall_fault)
21842+ENDPROC(syscall_fault)
21843
21844 syscall_badsys:
21845 movl $-ENOSYS,PT_EAX(%esp)
21846 jmp resume_userspace
21847-END(syscall_badsys)
21848+ENDPROC(syscall_badsys)
21849 CFI_ENDPROC
21850 /*
21851 * End of kprobes section
21852@@ -705,8 +937,15 @@ END(syscall_badsys)
21853 * normal stack and adjusts ESP with the matching offset.
21854 */
21855 /* fixup the stack */
21856- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
21857- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
21858+#ifdef CONFIG_SMP
21859+ movl PER_CPU_VAR(cpu_number), %ebx
21860+ shll $PAGE_SHIFT_asm, %ebx
21861+ addl $cpu_gdt_table, %ebx
21862+#else
21863+ movl $cpu_gdt_table, %ebx
21864+#endif
21865+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
21866+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
21867 shl $16, %eax
21868 addl %esp, %eax /* the adjusted stack pointer */
21869 pushl_cfi $__KERNEL_DS
21870@@ -759,7 +998,7 @@ vector=vector+1
21871 .endr
21872 2: jmp common_interrupt
21873 .endr
21874-END(irq_entries_start)
21875+ENDPROC(irq_entries_start)
21876
21877 .previous
21878 END(interrupt)
21879@@ -820,7 +1059,7 @@ ENTRY(coprocessor_error)
21880 pushl_cfi $do_coprocessor_error
21881 jmp error_code
21882 CFI_ENDPROC
21883-END(coprocessor_error)
21884+ENDPROC(coprocessor_error)
21885
21886 ENTRY(simd_coprocessor_error)
21887 RING0_INT_FRAME
21888@@ -833,7 +1072,7 @@ ENTRY(simd_coprocessor_error)
21889 .section .altinstructions,"a"
21890 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
21891 .previous
21892-.section .altinstr_replacement,"ax"
21893+.section .altinstr_replacement,"a"
21894 663: pushl $do_simd_coprocessor_error
21895 664:
21896 .previous
21897@@ -842,7 +1081,7 @@ ENTRY(simd_coprocessor_error)
21898 #endif
21899 jmp error_code
21900 CFI_ENDPROC
21901-END(simd_coprocessor_error)
21902+ENDPROC(simd_coprocessor_error)
21903
21904 ENTRY(device_not_available)
21905 RING0_INT_FRAME
21906@@ -851,18 +1090,18 @@ ENTRY(device_not_available)
21907 pushl_cfi $do_device_not_available
21908 jmp error_code
21909 CFI_ENDPROC
21910-END(device_not_available)
21911+ENDPROC(device_not_available)
21912
21913 #ifdef CONFIG_PARAVIRT
21914 ENTRY(native_iret)
21915 iret
21916 _ASM_EXTABLE(native_iret, iret_exc)
21917-END(native_iret)
21918+ENDPROC(native_iret)
21919
21920 ENTRY(native_irq_enable_sysexit)
21921 sti
21922 sysexit
21923-END(native_irq_enable_sysexit)
21924+ENDPROC(native_irq_enable_sysexit)
21925 #endif
21926
21927 ENTRY(overflow)
21928@@ -872,7 +1111,7 @@ ENTRY(overflow)
21929 pushl_cfi $do_overflow
21930 jmp error_code
21931 CFI_ENDPROC
21932-END(overflow)
21933+ENDPROC(overflow)
21934
21935 ENTRY(bounds)
21936 RING0_INT_FRAME
21937@@ -881,7 +1120,7 @@ ENTRY(bounds)
21938 pushl_cfi $do_bounds
21939 jmp error_code
21940 CFI_ENDPROC
21941-END(bounds)
21942+ENDPROC(bounds)
21943
21944 ENTRY(invalid_op)
21945 RING0_INT_FRAME
21946@@ -890,7 +1129,7 @@ ENTRY(invalid_op)
21947 pushl_cfi $do_invalid_op
21948 jmp error_code
21949 CFI_ENDPROC
21950-END(invalid_op)
21951+ENDPROC(invalid_op)
21952
21953 ENTRY(coprocessor_segment_overrun)
21954 RING0_INT_FRAME
21955@@ -899,7 +1138,7 @@ ENTRY(coprocessor_segment_overrun)
21956 pushl_cfi $do_coprocessor_segment_overrun
21957 jmp error_code
21958 CFI_ENDPROC
21959-END(coprocessor_segment_overrun)
21960+ENDPROC(coprocessor_segment_overrun)
21961
21962 ENTRY(invalid_TSS)
21963 RING0_EC_FRAME
21964@@ -907,7 +1146,7 @@ ENTRY(invalid_TSS)
21965 pushl_cfi $do_invalid_TSS
21966 jmp error_code
21967 CFI_ENDPROC
21968-END(invalid_TSS)
21969+ENDPROC(invalid_TSS)
21970
21971 ENTRY(segment_not_present)
21972 RING0_EC_FRAME
21973@@ -915,7 +1154,7 @@ ENTRY(segment_not_present)
21974 pushl_cfi $do_segment_not_present
21975 jmp error_code
21976 CFI_ENDPROC
21977-END(segment_not_present)
21978+ENDPROC(segment_not_present)
21979
21980 ENTRY(stack_segment)
21981 RING0_EC_FRAME
21982@@ -923,7 +1162,7 @@ ENTRY(stack_segment)
21983 pushl_cfi $do_stack_segment
21984 jmp error_code
21985 CFI_ENDPROC
21986-END(stack_segment)
21987+ENDPROC(stack_segment)
21988
21989 ENTRY(alignment_check)
21990 RING0_EC_FRAME
21991@@ -931,7 +1170,7 @@ ENTRY(alignment_check)
21992 pushl_cfi $do_alignment_check
21993 jmp error_code
21994 CFI_ENDPROC
21995-END(alignment_check)
21996+ENDPROC(alignment_check)
21997
21998 ENTRY(divide_error)
21999 RING0_INT_FRAME
22000@@ -940,7 +1179,7 @@ ENTRY(divide_error)
22001 pushl_cfi $do_divide_error
22002 jmp error_code
22003 CFI_ENDPROC
22004-END(divide_error)
22005+ENDPROC(divide_error)
22006
22007 #ifdef CONFIG_X86_MCE
22008 ENTRY(machine_check)
22009@@ -950,7 +1189,7 @@ ENTRY(machine_check)
22010 pushl_cfi machine_check_vector
22011 jmp error_code
22012 CFI_ENDPROC
22013-END(machine_check)
22014+ENDPROC(machine_check)
22015 #endif
22016
22017 ENTRY(spurious_interrupt_bug)
22018@@ -960,7 +1199,7 @@ ENTRY(spurious_interrupt_bug)
22019 pushl_cfi $do_spurious_interrupt_bug
22020 jmp error_code
22021 CFI_ENDPROC
22022-END(spurious_interrupt_bug)
22023+ENDPROC(spurious_interrupt_bug)
22024 /*
22025 * End of kprobes section
22026 */
22027@@ -1070,7 +1309,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22028
22029 ENTRY(mcount)
22030 ret
22031-END(mcount)
22032+ENDPROC(mcount)
22033
22034 ENTRY(ftrace_caller)
22035 cmpl $0, function_trace_stop
22036@@ -1103,7 +1342,7 @@ ftrace_graph_call:
22037 .globl ftrace_stub
22038 ftrace_stub:
22039 ret
22040-END(ftrace_caller)
22041+ENDPROC(ftrace_caller)
22042
22043 ENTRY(ftrace_regs_caller)
22044 pushf /* push flags before compare (in cs location) */
22045@@ -1207,7 +1446,7 @@ trace:
22046 popl %ecx
22047 popl %eax
22048 jmp ftrace_stub
22049-END(mcount)
22050+ENDPROC(mcount)
22051 #endif /* CONFIG_DYNAMIC_FTRACE */
22052 #endif /* CONFIG_FUNCTION_TRACER */
22053
22054@@ -1225,7 +1464,7 @@ ENTRY(ftrace_graph_caller)
22055 popl %ecx
22056 popl %eax
22057 ret
22058-END(ftrace_graph_caller)
22059+ENDPROC(ftrace_graph_caller)
22060
22061 .globl return_to_handler
22062 return_to_handler:
22063@@ -1291,15 +1530,18 @@ error_code:
22064 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22065 REG_TO_PTGS %ecx
22066 SET_KERNEL_GS %ecx
22067- movl $(__USER_DS), %ecx
22068+ movl $(__KERNEL_DS), %ecx
22069 movl %ecx, %ds
22070 movl %ecx, %es
22071+
22072+ pax_enter_kernel
22073+
22074 TRACE_IRQS_OFF
22075 movl %esp,%eax # pt_regs pointer
22076 call *%edi
22077 jmp ret_from_exception
22078 CFI_ENDPROC
22079-END(page_fault)
22080+ENDPROC(page_fault)
22081
22082 /*
22083 * Debug traps and NMI can happen at the one SYSENTER instruction
22084@@ -1342,7 +1584,7 @@ debug_stack_correct:
22085 call do_debug
22086 jmp ret_from_exception
22087 CFI_ENDPROC
22088-END(debug)
22089+ENDPROC(debug)
22090
22091 /*
22092 * NMI is doubly nasty. It can happen _while_ we're handling
22093@@ -1380,6 +1622,9 @@ nmi_stack_correct:
22094 xorl %edx,%edx # zero error code
22095 movl %esp,%eax # pt_regs pointer
22096 call do_nmi
22097+
22098+ pax_exit_kernel
22099+
22100 jmp restore_all_notrace
22101 CFI_ENDPROC
22102
22103@@ -1416,12 +1661,15 @@ nmi_espfix_stack:
22104 FIXUP_ESPFIX_STACK # %eax == %esp
22105 xorl %edx,%edx # zero error code
22106 call do_nmi
22107+
22108+ pax_exit_kernel
22109+
22110 RESTORE_REGS
22111 lss 12+4(%esp), %esp # back to espfix stack
22112 CFI_ADJUST_CFA_OFFSET -24
22113 jmp irq_return
22114 CFI_ENDPROC
22115-END(nmi)
22116+ENDPROC(nmi)
22117
22118 ENTRY(int3)
22119 RING0_INT_FRAME
22120@@ -1434,14 +1682,14 @@ ENTRY(int3)
22121 call do_int3
22122 jmp ret_from_exception
22123 CFI_ENDPROC
22124-END(int3)
22125+ENDPROC(int3)
22126
22127 ENTRY(general_protection)
22128 RING0_EC_FRAME
22129 pushl_cfi $do_general_protection
22130 jmp error_code
22131 CFI_ENDPROC
22132-END(general_protection)
22133+ENDPROC(general_protection)
22134
22135 #ifdef CONFIG_KVM_GUEST
22136 ENTRY(async_page_fault)
22137@@ -1450,7 +1698,7 @@ ENTRY(async_page_fault)
22138 pushl_cfi $do_async_page_fault
22139 jmp error_code
22140 CFI_ENDPROC
22141-END(async_page_fault)
22142+ENDPROC(async_page_fault)
22143 #endif
22144
22145 /*
22146diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
22147index 1e96c36..3ff710a 100644
22148--- a/arch/x86/kernel/entry_64.S
22149+++ b/arch/x86/kernel/entry_64.S
22150@@ -59,6 +59,8 @@
22151 #include <asm/context_tracking.h>
22152 #include <asm/smap.h>
22153 #include <linux/err.h>
22154+#include <asm/pgtable.h>
22155+#include <asm/alternative-asm.h>
22156
22157 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
22158 #include <linux/elf-em.h>
22159@@ -80,8 +82,9 @@
22160 #ifdef CONFIG_DYNAMIC_FTRACE
22161
22162 ENTRY(function_hook)
22163+ pax_force_retaddr
22164 retq
22165-END(function_hook)
22166+ENDPROC(function_hook)
22167
22168 /* skip is set if stack has been adjusted */
22169 .macro ftrace_caller_setup skip=0
22170@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
22171 #endif
22172
22173 GLOBAL(ftrace_stub)
22174+ pax_force_retaddr
22175 retq
22176-END(ftrace_caller)
22177+ENDPROC(ftrace_caller)
22178
22179 ENTRY(ftrace_regs_caller)
22180 /* Save the current flags before compare (in SS location)*/
22181@@ -191,7 +195,7 @@ ftrace_restore_flags:
22182 popfq
22183 jmp ftrace_stub
22184
22185-END(ftrace_regs_caller)
22186+ENDPROC(ftrace_regs_caller)
22187
22188
22189 #else /* ! CONFIG_DYNAMIC_FTRACE */
22190@@ -212,6 +216,7 @@ ENTRY(function_hook)
22191 #endif
22192
22193 GLOBAL(ftrace_stub)
22194+ pax_force_retaddr
22195 retq
22196
22197 trace:
22198@@ -225,12 +230,13 @@ trace:
22199 #endif
22200 subq $MCOUNT_INSN_SIZE, %rdi
22201
22202+ pax_force_fptr ftrace_trace_function
22203 call *ftrace_trace_function
22204
22205 MCOUNT_RESTORE_FRAME
22206
22207 jmp ftrace_stub
22208-END(function_hook)
22209+ENDPROC(function_hook)
22210 #endif /* CONFIG_DYNAMIC_FTRACE */
22211 #endif /* CONFIG_FUNCTION_TRACER */
22212
22213@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
22214
22215 MCOUNT_RESTORE_FRAME
22216
22217+ pax_force_retaddr
22218 retq
22219-END(ftrace_graph_caller)
22220+ENDPROC(ftrace_graph_caller)
22221
22222 GLOBAL(return_to_handler)
22223 subq $24, %rsp
22224@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
22225 movq 8(%rsp), %rdx
22226 movq (%rsp), %rax
22227 addq $24, %rsp
22228+ pax_force_fptr %rdi
22229 jmp *%rdi
22230+ENDPROC(return_to_handler)
22231 #endif
22232
22233
22234@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
22235 ENDPROC(native_usergs_sysret64)
22236 #endif /* CONFIG_PARAVIRT */
22237
22238+ .macro ljmpq sel, off
22239+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
22240+ .byte 0x48; ljmp *1234f(%rip)
22241+ .pushsection .rodata
22242+ .align 16
22243+ 1234: .quad \off; .word \sel
22244+ .popsection
22245+#else
22246+ pushq $\sel
22247+ pushq $\off
22248+ lretq
22249+#endif
22250+ .endm
22251+
22252+ .macro pax_enter_kernel
22253+ pax_set_fptr_mask
22254+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22255+ call pax_enter_kernel
22256+#endif
22257+ .endm
22258+
22259+ .macro pax_exit_kernel
22260+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22261+ call pax_exit_kernel
22262+#endif
22263+
22264+ .endm
22265+
22266+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22267+ENTRY(pax_enter_kernel)
22268+ pushq %rdi
22269+
22270+#ifdef CONFIG_PARAVIRT
22271+ PV_SAVE_REGS(CLBR_RDI)
22272+#endif
22273+
22274+#ifdef CONFIG_PAX_KERNEXEC
22275+ GET_CR0_INTO_RDI
22276+ bts $16,%rdi
22277+ jnc 3f
22278+ mov %cs,%edi
22279+ cmp $__KERNEL_CS,%edi
22280+ jnz 2f
22281+1:
22282+#endif
22283+
22284+#ifdef CONFIG_PAX_MEMORY_UDEREF
22285+ 661: jmp 111f
22286+ .pushsection .altinstr_replacement, "a"
22287+ 662: ASM_NOP2
22288+ .popsection
22289+ .pushsection .altinstructions, "a"
22290+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22291+ .popsection
22292+ GET_CR3_INTO_RDI
22293+ cmp $0,%dil
22294+ jnz 112f
22295+ mov $__KERNEL_DS,%edi
22296+ mov %edi,%ss
22297+ jmp 111f
22298+112: cmp $1,%dil
22299+ jz 113f
22300+ ud2
22301+113: sub $4097,%rdi
22302+ bts $63,%rdi
22303+ SET_RDI_INTO_CR3
22304+ mov $__UDEREF_KERNEL_DS,%edi
22305+ mov %edi,%ss
22306+111:
22307+#endif
22308+
22309+#ifdef CONFIG_PARAVIRT
22310+ PV_RESTORE_REGS(CLBR_RDI)
22311+#endif
22312+
22313+ popq %rdi
22314+ pax_force_retaddr
22315+ retq
22316+
22317+#ifdef CONFIG_PAX_KERNEXEC
22318+2: ljmpq __KERNEL_CS,1b
22319+3: ljmpq __KERNEXEC_KERNEL_CS,4f
22320+4: SET_RDI_INTO_CR0
22321+ jmp 1b
22322+#endif
22323+ENDPROC(pax_enter_kernel)
22324+
22325+ENTRY(pax_exit_kernel)
22326+ pushq %rdi
22327+
22328+#ifdef CONFIG_PARAVIRT
22329+ PV_SAVE_REGS(CLBR_RDI)
22330+#endif
22331+
22332+#ifdef CONFIG_PAX_KERNEXEC
22333+ mov %cs,%rdi
22334+ cmp $__KERNEXEC_KERNEL_CS,%edi
22335+ jz 2f
22336+ GET_CR0_INTO_RDI
22337+ bts $16,%rdi
22338+ jnc 4f
22339+1:
22340+#endif
22341+
22342+#ifdef CONFIG_PAX_MEMORY_UDEREF
22343+ 661: jmp 111f
22344+ .pushsection .altinstr_replacement, "a"
22345+ 662: ASM_NOP2
22346+ .popsection
22347+ .pushsection .altinstructions, "a"
22348+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22349+ .popsection
22350+ mov %ss,%edi
22351+ cmp $__UDEREF_KERNEL_DS,%edi
22352+ jnz 111f
22353+ GET_CR3_INTO_RDI
22354+ cmp $0,%dil
22355+ jz 112f
22356+ ud2
22357+112: add $4097,%rdi
22358+ bts $63,%rdi
22359+ SET_RDI_INTO_CR3
22360+ mov $__KERNEL_DS,%edi
22361+ mov %edi,%ss
22362+111:
22363+#endif
22364+
22365+#ifdef CONFIG_PARAVIRT
22366+ PV_RESTORE_REGS(CLBR_RDI);
22367+#endif
22368+
22369+ popq %rdi
22370+ pax_force_retaddr
22371+ retq
22372+
22373+#ifdef CONFIG_PAX_KERNEXEC
22374+2: GET_CR0_INTO_RDI
22375+ btr $16,%rdi
22376+ jnc 4f
22377+ ljmpq __KERNEL_CS,3f
22378+3: SET_RDI_INTO_CR0
22379+ jmp 1b
22380+4: ud2
22381+ jmp 4b
22382+#endif
22383+ENDPROC(pax_exit_kernel)
22384+#endif
22385+
22386+ .macro pax_enter_kernel_user
22387+ pax_set_fptr_mask
22388+#ifdef CONFIG_PAX_MEMORY_UDEREF
22389+ call pax_enter_kernel_user
22390+#endif
22391+ .endm
22392+
22393+ .macro pax_exit_kernel_user
22394+#ifdef CONFIG_PAX_MEMORY_UDEREF
22395+ call pax_exit_kernel_user
22396+#endif
22397+#ifdef CONFIG_PAX_RANDKSTACK
22398+ pushq %rax
22399+ pushq %r11
22400+ call pax_randomize_kstack
22401+ popq %r11
22402+ popq %rax
22403+#endif
22404+ .endm
22405+
22406+#ifdef CONFIG_PAX_MEMORY_UDEREF
22407+ENTRY(pax_enter_kernel_user)
22408+ pushq %rdi
22409+ pushq %rbx
22410+
22411+#ifdef CONFIG_PARAVIRT
22412+ PV_SAVE_REGS(CLBR_RDI)
22413+#endif
22414+
22415+ 661: jmp 111f
22416+ .pushsection .altinstr_replacement, "a"
22417+ 662: ASM_NOP2
22418+ .popsection
22419+ .pushsection .altinstructions, "a"
22420+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22421+ .popsection
22422+ GET_CR3_INTO_RDI
22423+ cmp $1,%dil
22424+ jnz 4f
22425+ sub $4097,%rdi
22426+ bts $63,%rdi
22427+ SET_RDI_INTO_CR3
22428+ jmp 3f
22429+111:
22430+
22431+ GET_CR3_INTO_RDI
22432+ mov %rdi,%rbx
22433+ add $__START_KERNEL_map,%rbx
22434+ sub phys_base(%rip),%rbx
22435+
22436+#ifdef CONFIG_PARAVIRT
22437+ cmpl $0, pv_info+PARAVIRT_enabled
22438+ jz 1f
22439+ pushq %rdi
22440+ i = 0
22441+ .rept USER_PGD_PTRS
22442+ mov i*8(%rbx),%rsi
22443+ mov $0,%sil
22444+ lea i*8(%rbx),%rdi
22445+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
22446+ i = i + 1
22447+ .endr
22448+ popq %rdi
22449+ jmp 2f
22450+1:
22451+#endif
22452+
22453+ i = 0
22454+ .rept USER_PGD_PTRS
22455+ movb $0,i*8(%rbx)
22456+ i = i + 1
22457+ .endr
22458+
22459+2: SET_RDI_INTO_CR3
22460+
22461+#ifdef CONFIG_PAX_KERNEXEC
22462+ GET_CR0_INTO_RDI
22463+ bts $16,%rdi
22464+ SET_RDI_INTO_CR0
22465+#endif
22466+
22467+3:
22468+
22469+#ifdef CONFIG_PARAVIRT
22470+ PV_RESTORE_REGS(CLBR_RDI)
22471+#endif
22472+
22473+ popq %rbx
22474+ popq %rdi
22475+ pax_force_retaddr
22476+ retq
22477+4: ud2
22478+ENDPROC(pax_enter_kernel_user)
22479+
22480+ENTRY(pax_exit_kernel_user)
22481+ pushq %rdi
22482+ pushq %rbx
22483+
22484+#ifdef CONFIG_PARAVIRT
22485+ PV_SAVE_REGS(CLBR_RDI)
22486+#endif
22487+
22488+ GET_CR3_INTO_RDI
22489+ 661: jmp 1f
22490+ .pushsection .altinstr_replacement, "a"
22491+ 662: ASM_NOP2
22492+ .popsection
22493+ .pushsection .altinstructions, "a"
22494+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22495+ .popsection
22496+ cmp $0,%dil
22497+ jnz 3f
22498+ add $4097,%rdi
22499+ bts $63,%rdi
22500+ SET_RDI_INTO_CR3
22501+ jmp 2f
22502+1:
22503+
22504+ mov %rdi,%rbx
22505+
22506+#ifdef CONFIG_PAX_KERNEXEC
22507+ GET_CR0_INTO_RDI
22508+ btr $16,%rdi
22509+ jnc 3f
22510+ SET_RDI_INTO_CR0
22511+#endif
22512+
22513+ add $__START_KERNEL_map,%rbx
22514+ sub phys_base(%rip),%rbx
22515+
22516+#ifdef CONFIG_PARAVIRT
22517+ cmpl $0, pv_info+PARAVIRT_enabled
22518+ jz 1f
22519+ i = 0
22520+ .rept USER_PGD_PTRS
22521+ mov i*8(%rbx),%rsi
22522+ mov $0x67,%sil
22523+ lea i*8(%rbx),%rdi
22524+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
22525+ i = i + 1
22526+ .endr
22527+ jmp 2f
22528+1:
22529+#endif
22530+
22531+ i = 0
22532+ .rept USER_PGD_PTRS
22533+ movb $0x67,i*8(%rbx)
22534+ i = i + 1
22535+ .endr
22536+2:
22537+
22538+#ifdef CONFIG_PARAVIRT
22539+ PV_RESTORE_REGS(CLBR_RDI)
22540+#endif
22541+
22542+ popq %rbx
22543+ popq %rdi
22544+ pax_force_retaddr
22545+ retq
22546+3: ud2
22547+ENDPROC(pax_exit_kernel_user)
22548+#endif
22549+
22550+ .macro pax_enter_kernel_nmi
22551+ pax_set_fptr_mask
22552+
22553+#ifdef CONFIG_PAX_KERNEXEC
22554+ GET_CR0_INTO_RDI
22555+ bts $16,%rdi
22556+ jc 110f
22557+ SET_RDI_INTO_CR0
22558+ or $2,%ebx
22559+110:
22560+#endif
22561+
22562+#ifdef CONFIG_PAX_MEMORY_UDEREF
22563+ 661: jmp 111f
22564+ .pushsection .altinstr_replacement, "a"
22565+ 662: ASM_NOP2
22566+ .popsection
22567+ .pushsection .altinstructions, "a"
22568+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22569+ .popsection
22570+ GET_CR3_INTO_RDI
22571+ cmp $0,%dil
22572+ jz 111f
22573+ sub $4097,%rdi
22574+ or $4,%ebx
22575+ bts $63,%rdi
22576+ SET_RDI_INTO_CR3
22577+ mov $__UDEREF_KERNEL_DS,%edi
22578+ mov %edi,%ss
22579+111:
22580+#endif
22581+ .endm
22582+
22583+ .macro pax_exit_kernel_nmi
22584+#ifdef CONFIG_PAX_KERNEXEC
22585+ btr $1,%ebx
22586+ jnc 110f
22587+ GET_CR0_INTO_RDI
22588+ btr $16,%rdi
22589+ SET_RDI_INTO_CR0
22590+110:
22591+#endif
22592+
22593+#ifdef CONFIG_PAX_MEMORY_UDEREF
22594+ btr $2,%ebx
22595+ jnc 111f
22596+ GET_CR3_INTO_RDI
22597+ add $4097,%rdi
22598+ bts $63,%rdi
22599+ SET_RDI_INTO_CR3
22600+ mov $__KERNEL_DS,%edi
22601+ mov %edi,%ss
22602+111:
22603+#endif
22604+ .endm
22605+
22606+ .macro pax_erase_kstack
22607+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22608+ call pax_erase_kstack
22609+#endif
22610+ .endm
22611+
22612+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22613+ENTRY(pax_erase_kstack)
22614+ pushq %rdi
22615+ pushq %rcx
22616+ pushq %rax
22617+ pushq %r11
22618+
22619+ GET_THREAD_INFO(%r11)
22620+ mov TI_lowest_stack(%r11), %rdi
22621+ mov $-0xBEEF, %rax
22622+ std
22623+
22624+1: mov %edi, %ecx
22625+ and $THREAD_SIZE_asm - 1, %ecx
22626+ shr $3, %ecx
22627+ repne scasq
22628+ jecxz 2f
22629+
22630+ cmp $2*8, %ecx
22631+ jc 2f
22632+
22633+ mov $2*8, %ecx
22634+ repe scasq
22635+ jecxz 2f
22636+ jne 1b
22637+
22638+2: cld
22639+ mov %esp, %ecx
22640+ sub %edi, %ecx
22641+
22642+ cmp $THREAD_SIZE_asm, %rcx
22643+ jb 3f
22644+ ud2
22645+3:
22646+
22647+ shr $3, %ecx
22648+ rep stosq
22649+
22650+ mov TI_task_thread_sp0(%r11), %rdi
22651+ sub $256, %rdi
22652+ mov %rdi, TI_lowest_stack(%r11)
22653+
22654+ popq %r11
22655+ popq %rax
22656+ popq %rcx
22657+ popq %rdi
22658+ pax_force_retaddr
22659+ ret
22660+ENDPROC(pax_erase_kstack)
22661+#endif
22662
22663 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
22664 #ifdef CONFIG_TRACE_IRQFLAGS
22665@@ -320,7 +753,7 @@ ENDPROC(native_usergs_sysret64)
22666 .endm
22667
22668 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
22669- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
22670+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
22671 jnc 1f
22672 TRACE_IRQS_ON_DEBUG
22673 1:
22674@@ -358,27 +791,6 @@ ENDPROC(native_usergs_sysret64)
22675 movq \tmp,R11+\offset(%rsp)
22676 .endm
22677
22678- .macro FAKE_STACK_FRAME child_rip
22679- /* push in order ss, rsp, eflags, cs, rip */
22680- xorl %eax, %eax
22681- pushq_cfi $__KERNEL_DS /* ss */
22682- /*CFI_REL_OFFSET ss,0*/
22683- pushq_cfi %rax /* rsp */
22684- CFI_REL_OFFSET rsp,0
22685- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
22686- /*CFI_REL_OFFSET rflags,0*/
22687- pushq_cfi $__KERNEL_CS /* cs */
22688- /*CFI_REL_OFFSET cs,0*/
22689- pushq_cfi \child_rip /* rip */
22690- CFI_REL_OFFSET rip,0
22691- pushq_cfi %rax /* orig rax */
22692- .endm
22693-
22694- .macro UNFAKE_STACK_FRAME
22695- addq $8*6, %rsp
22696- CFI_ADJUST_CFA_OFFSET -(6*8)
22697- .endm
22698-
22699 /*
22700 * initial frame state for interrupts (and exceptions without error code)
22701 */
22702@@ -445,25 +857,26 @@ ENDPROC(native_usergs_sysret64)
22703 /* save partial stack frame */
22704 .macro SAVE_ARGS_IRQ
22705 cld
22706- /* start from rbp in pt_regs and jump over */
22707- movq_cfi rdi, (RDI-RBP)
22708- movq_cfi rsi, (RSI-RBP)
22709- movq_cfi rdx, (RDX-RBP)
22710- movq_cfi rcx, (RCX-RBP)
22711- movq_cfi rax, (RAX-RBP)
22712- movq_cfi r8, (R8-RBP)
22713- movq_cfi r9, (R9-RBP)
22714- movq_cfi r10, (R10-RBP)
22715- movq_cfi r11, (R11-RBP)
22716+ /* start from r15 in pt_regs and jump over */
22717+ movq_cfi rdi, RDI
22718+ movq_cfi rsi, RSI
22719+ movq_cfi rdx, RDX
22720+ movq_cfi rcx, RCX
22721+ movq_cfi rax, RAX
22722+ movq_cfi r8, R8
22723+ movq_cfi r9, R9
22724+ movq_cfi r10, R10
22725+ movq_cfi r11, R11
22726+ movq_cfi r12, R12
22727
22728 /* Save rbp so that we can unwind from get_irq_regs() */
22729- movq_cfi rbp, 0
22730+ movq_cfi rbp, RBP
22731
22732 /* Save previous stack value */
22733 movq %rsp, %rsi
22734
22735- leaq -RBP(%rsp),%rdi /* arg1 for handler */
22736- testl $3, CS-RBP(%rsi)
22737+ movq %rsp,%rdi /* arg1 for handler */
22738+ testb $3, CS(%rsi)
22739 je 1f
22740 SWAPGS
22741 /*
22742@@ -483,6 +896,18 @@ ENDPROC(native_usergs_sysret64)
22743 0x06 /* DW_OP_deref */, \
22744 0x08 /* DW_OP_const1u */, SS+8-RBP, \
22745 0x22 /* DW_OP_plus */
22746+
22747+#ifdef CONFIG_PAX_MEMORY_UDEREF
22748+ testb $3, CS(%rdi)
22749+ jnz 1f
22750+ pax_enter_kernel
22751+ jmp 2f
22752+1: pax_enter_kernel_user
22753+2:
22754+#else
22755+ pax_enter_kernel
22756+#endif
22757+
22758 /* We entered an interrupt context - irqs are off: */
22759 TRACE_IRQS_OFF
22760 .endm
22761@@ -514,9 +939,52 @@ ENTRY(save_paranoid)
22762 js 1f /* negative -> in kernel */
22763 SWAPGS
22764 xorl %ebx,%ebx
22765-1: ret
22766+1:
22767+#ifdef CONFIG_PAX_MEMORY_UDEREF
22768+ testb $3, CS+8(%rsp)
22769+ jnz 1f
22770+ pax_enter_kernel
22771+ jmp 2f
22772+1: pax_enter_kernel_user
22773+2:
22774+#else
22775+ pax_enter_kernel
22776+#endif
22777+ pax_force_retaddr
22778+ ret
22779 CFI_ENDPROC
22780-END(save_paranoid)
22781+ENDPROC(save_paranoid)
22782+
22783+ENTRY(save_paranoid_nmi)
22784+ XCPT_FRAME 1 RDI+8
22785+ cld
22786+ movq_cfi rdi, RDI+8
22787+ movq_cfi rsi, RSI+8
22788+ movq_cfi rdx, RDX+8
22789+ movq_cfi rcx, RCX+8
22790+ movq_cfi rax, RAX+8
22791+ movq_cfi r8, R8+8
22792+ movq_cfi r9, R9+8
22793+ movq_cfi r10, R10+8
22794+ movq_cfi r11, R11+8
22795+ movq_cfi rbx, RBX+8
22796+ movq_cfi rbp, RBP+8
22797+ movq_cfi r12, R12+8
22798+ movq_cfi r13, R13+8
22799+ movq_cfi r14, R14+8
22800+ movq_cfi r15, R15+8
22801+ movl $1,%ebx
22802+ movl $MSR_GS_BASE,%ecx
22803+ rdmsr
22804+ testl %edx,%edx
22805+ js 1f /* negative -> in kernel */
22806+ SWAPGS
22807+ xorl %ebx,%ebx
22808+1: pax_enter_kernel_nmi
22809+ pax_force_retaddr
22810+ ret
22811+ CFI_ENDPROC
22812+ENDPROC(save_paranoid_nmi)
22813 .popsection
22814
22815 /*
22816@@ -538,7 +1006,7 @@ ENTRY(ret_from_fork)
22817
22818 RESTORE_REST
22819
22820- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
22821+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
22822 jz 1f
22823
22824 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
22825@@ -548,15 +1016,13 @@ ENTRY(ret_from_fork)
22826 jmp ret_from_sys_call # go to the SYSRET fastpath
22827
22828 1:
22829- subq $REST_SKIP, %rsp # leave space for volatiles
22830- CFI_ADJUST_CFA_OFFSET REST_SKIP
22831 movq %rbp, %rdi
22832 call *%rbx
22833 movl $0, RAX(%rsp)
22834 RESTORE_REST
22835 jmp int_ret_from_sys_call
22836 CFI_ENDPROC
22837-END(ret_from_fork)
22838+ENDPROC(ret_from_fork)
22839
22840 /*
22841 * System call entry. Up to 6 arguments in registers are supported.
22842@@ -593,7 +1059,7 @@ END(ret_from_fork)
22843 ENTRY(system_call)
22844 CFI_STARTPROC simple
22845 CFI_SIGNAL_FRAME
22846- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
22847+ CFI_DEF_CFA rsp,0
22848 CFI_REGISTER rip,rcx
22849 /*CFI_REGISTER rflags,r11*/
22850 SWAPGS_UNSAFE_STACK
22851@@ -606,16 +1072,23 @@ GLOBAL(system_call_after_swapgs)
22852
22853 movq %rsp,PER_CPU_VAR(old_rsp)
22854 movq PER_CPU_VAR(kernel_stack),%rsp
22855+ SAVE_ARGS 8*6,0
22856+ pax_enter_kernel_user
22857+
22858+#ifdef CONFIG_PAX_RANDKSTACK
22859+ pax_erase_kstack
22860+#endif
22861+
22862 /*
22863 * No need to follow this irqs off/on section - it's straight
22864 * and short:
22865 */
22866 ENABLE_INTERRUPTS(CLBR_NONE)
22867- SAVE_ARGS 8,0
22868 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
22869 movq %rcx,RIP-ARGOFFSET(%rsp)
22870 CFI_REL_OFFSET rip,RIP-ARGOFFSET
22871- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
22872+ GET_THREAD_INFO(%rcx)
22873+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
22874 jnz tracesys
22875 system_call_fastpath:
22876 #if __SYSCALL_MASK == ~0
22877@@ -639,10 +1112,13 @@ sysret_check:
22878 LOCKDEP_SYS_EXIT
22879 DISABLE_INTERRUPTS(CLBR_NONE)
22880 TRACE_IRQS_OFF
22881- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
22882+ GET_THREAD_INFO(%rcx)
22883+ movl TI_flags(%rcx),%edx
22884 andl %edi,%edx
22885 jnz sysret_careful
22886 CFI_REMEMBER_STATE
22887+ pax_exit_kernel_user
22888+ pax_erase_kstack
22889 /*
22890 * sysretq will re-enable interrupts:
22891 */
22892@@ -701,6 +1177,9 @@ auditsys:
22893 movq %rax,%rsi /* 2nd arg: syscall number */
22894 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
22895 call __audit_syscall_entry
22896+
22897+ pax_erase_kstack
22898+
22899 LOAD_ARGS 0 /* reload call-clobbered registers */
22900 jmp system_call_fastpath
22901
22902@@ -722,7 +1201,7 @@ sysret_audit:
22903 /* Do syscall tracing */
22904 tracesys:
22905 #ifdef CONFIG_AUDITSYSCALL
22906- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
22907+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
22908 jz auditsys
22909 #endif
22910 SAVE_REST
22911@@ -730,12 +1209,15 @@ tracesys:
22912 FIXUP_TOP_OF_STACK %rdi
22913 movq %rsp,%rdi
22914 call syscall_trace_enter
22915+
22916+ pax_erase_kstack
22917+
22918 /*
22919 * Reload arg registers from stack in case ptrace changed them.
22920 * We don't reload %rax because syscall_trace_enter() returned
22921 * the value it wants us to use in the table lookup.
22922 */
22923- LOAD_ARGS ARGOFFSET, 1
22924+ LOAD_ARGS 1
22925 RESTORE_REST
22926 #if __SYSCALL_MASK == ~0
22927 cmpq $__NR_syscall_max,%rax
22928@@ -765,7 +1247,9 @@ GLOBAL(int_with_check)
22929 andl %edi,%edx
22930 jnz int_careful
22931 andl $~TS_COMPAT,TI_status(%rcx)
22932- jmp retint_swapgs
22933+ pax_exit_kernel_user
22934+ pax_erase_kstack
22935+ jmp retint_swapgs_pax
22936
22937 /* Either reschedule or signal or syscall exit tracking needed. */
22938 /* First do a reschedule test. */
22939@@ -811,7 +1295,7 @@ int_restore_rest:
22940 TRACE_IRQS_OFF
22941 jmp int_with_check
22942 CFI_ENDPROC
22943-END(system_call)
22944+ENDPROC(system_call)
22945
22946 .macro FORK_LIKE func
22947 ENTRY(stub_\func)
22948@@ -824,9 +1308,10 @@ ENTRY(stub_\func)
22949 DEFAULT_FRAME 0 8 /* offset 8: return address */
22950 call sys_\func
22951 RESTORE_TOP_OF_STACK %r11, 8
22952- ret $REST_SKIP /* pop extended registers */
22953+ pax_force_retaddr
22954+ ret
22955 CFI_ENDPROC
22956-END(stub_\func)
22957+ENDPROC(stub_\func)
22958 .endm
22959
22960 .macro FIXED_FRAME label,func
22961@@ -836,9 +1321,10 @@ ENTRY(\label)
22962 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
22963 call \func
22964 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
22965+ pax_force_retaddr
22966 ret
22967 CFI_ENDPROC
22968-END(\label)
22969+ENDPROC(\label)
22970 .endm
22971
22972 FORK_LIKE clone
22973@@ -846,19 +1332,6 @@ END(\label)
22974 FORK_LIKE vfork
22975 FIXED_FRAME stub_iopl, sys_iopl
22976
22977-ENTRY(ptregscall_common)
22978- DEFAULT_FRAME 1 8 /* offset 8: return address */
22979- RESTORE_TOP_OF_STACK %r11, 8
22980- movq_cfi_restore R15+8, r15
22981- movq_cfi_restore R14+8, r14
22982- movq_cfi_restore R13+8, r13
22983- movq_cfi_restore R12+8, r12
22984- movq_cfi_restore RBP+8, rbp
22985- movq_cfi_restore RBX+8, rbx
22986- ret $REST_SKIP /* pop extended registers */
22987- CFI_ENDPROC
22988-END(ptregscall_common)
22989-
22990 ENTRY(stub_execve)
22991 CFI_STARTPROC
22992 addq $8, %rsp
22993@@ -870,7 +1343,7 @@ ENTRY(stub_execve)
22994 RESTORE_REST
22995 jmp int_ret_from_sys_call
22996 CFI_ENDPROC
22997-END(stub_execve)
22998+ENDPROC(stub_execve)
22999
23000 /*
23001 * sigreturn is special because it needs to restore all registers on return.
23002@@ -887,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
23003 RESTORE_REST
23004 jmp int_ret_from_sys_call
23005 CFI_ENDPROC
23006-END(stub_rt_sigreturn)
23007+ENDPROC(stub_rt_sigreturn)
23008
23009 #ifdef CONFIG_X86_X32_ABI
23010 ENTRY(stub_x32_rt_sigreturn)
23011@@ -901,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
23012 RESTORE_REST
23013 jmp int_ret_from_sys_call
23014 CFI_ENDPROC
23015-END(stub_x32_rt_sigreturn)
23016+ENDPROC(stub_x32_rt_sigreturn)
23017
23018 ENTRY(stub_x32_execve)
23019 CFI_STARTPROC
23020@@ -915,7 +1388,7 @@ ENTRY(stub_x32_execve)
23021 RESTORE_REST
23022 jmp int_ret_from_sys_call
23023 CFI_ENDPROC
23024-END(stub_x32_execve)
23025+ENDPROC(stub_x32_execve)
23026
23027 #endif
23028
23029@@ -952,7 +1425,7 @@ vector=vector+1
23030 2: jmp common_interrupt
23031 .endr
23032 CFI_ENDPROC
23033-END(irq_entries_start)
23034+ENDPROC(irq_entries_start)
23035
23036 .previous
23037 END(interrupt)
23038@@ -969,8 +1442,8 @@ END(interrupt)
23039 /* 0(%rsp): ~(interrupt number) */
23040 .macro interrupt func
23041 /* reserve pt_regs for scratch regs and rbp */
23042- subq $ORIG_RAX-RBP, %rsp
23043- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23044+ subq $ORIG_RAX, %rsp
23045+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23046 SAVE_ARGS_IRQ
23047 call \func
23048 .endm
23049@@ -997,14 +1470,14 @@ ret_from_intr:
23050
23051 /* Restore saved previous stack */
23052 popq %rsi
23053- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23054- leaq ARGOFFSET-RBP(%rsi), %rsp
23055+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23056+ movq %rsi, %rsp
23057 CFI_DEF_CFA_REGISTER rsp
23058- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23059+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23060
23061 exit_intr:
23062 GET_THREAD_INFO(%rcx)
23063- testl $3,CS-ARGOFFSET(%rsp)
23064+ testb $3,CS-ARGOFFSET(%rsp)
23065 je retint_kernel
23066
23067 /* Interrupt came from user space */
23068@@ -1026,12 +1499,16 @@ retint_swapgs: /* return to user-space */
23069 * The iretq could re-enable interrupts:
23070 */
23071 DISABLE_INTERRUPTS(CLBR_ANY)
23072+ pax_exit_kernel_user
23073+retint_swapgs_pax:
23074 TRACE_IRQS_IRETQ
23075 SWAPGS
23076 jmp restore_args
23077
23078 retint_restore_args: /* return to kernel space */
23079 DISABLE_INTERRUPTS(CLBR_ANY)
23080+ pax_exit_kernel
23081+ pax_force_retaddr (RIP-ARGOFFSET)
23082 /*
23083 * The iretq could re-enable interrupts:
23084 */
23085@@ -1112,7 +1589,7 @@ ENTRY(retint_kernel)
23086 #endif
23087
23088 CFI_ENDPROC
23089-END(common_interrupt)
23090+ENDPROC(common_interrupt)
23091 /*
23092 * End of kprobes section
23093 */
23094@@ -1130,7 +1607,7 @@ ENTRY(\sym)
23095 interrupt \do_sym
23096 jmp ret_from_intr
23097 CFI_ENDPROC
23098-END(\sym)
23099+ENDPROC(\sym)
23100 .endm
23101
23102 #ifdef CONFIG_TRACING
23103@@ -1218,7 +1695,7 @@ ENTRY(\sym)
23104 call \do_sym
23105 jmp error_exit /* %ebx: no swapgs flag */
23106 CFI_ENDPROC
23107-END(\sym)
23108+ENDPROC(\sym)
23109 .endm
23110
23111 .macro paranoidzeroentry sym do_sym
23112@@ -1236,10 +1713,10 @@ ENTRY(\sym)
23113 call \do_sym
23114 jmp paranoid_exit /* %ebx: no swapgs flag */
23115 CFI_ENDPROC
23116-END(\sym)
23117+ENDPROC(\sym)
23118 .endm
23119
23120-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23121+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23122 .macro paranoidzeroentry_ist sym do_sym ist
23123 ENTRY(\sym)
23124 INTR_FRAME
23125@@ -1252,12 +1729,18 @@ ENTRY(\sym)
23126 TRACE_IRQS_OFF_DEBUG
23127 movq %rsp,%rdi /* pt_regs pointer */
23128 xorl %esi,%esi /* no error code */
23129+#ifdef CONFIG_SMP
23130+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23131+ lea init_tss(%r13), %r13
23132+#else
23133+ lea init_tss(%rip), %r13
23134+#endif
23135 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
23136 call \do_sym
23137 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
23138 jmp paranoid_exit /* %ebx: no swapgs flag */
23139 CFI_ENDPROC
23140-END(\sym)
23141+ENDPROC(\sym)
23142 .endm
23143
23144 .macro errorentry sym do_sym
23145@@ -1275,7 +1758,7 @@ ENTRY(\sym)
23146 call \do_sym
23147 jmp error_exit /* %ebx: no swapgs flag */
23148 CFI_ENDPROC
23149-END(\sym)
23150+ENDPROC(\sym)
23151 .endm
23152
23153 #ifdef CONFIG_TRACING
23154@@ -1306,7 +1789,7 @@ ENTRY(\sym)
23155 call \do_sym
23156 jmp paranoid_exit /* %ebx: no swapgs flag */
23157 CFI_ENDPROC
23158-END(\sym)
23159+ENDPROC(\sym)
23160 .endm
23161
23162 zeroentry divide_error do_divide_error
23163@@ -1336,9 +1819,10 @@ gs_change:
23164 2: mfence /* workaround */
23165 SWAPGS
23166 popfq_cfi
23167+ pax_force_retaddr
23168 ret
23169 CFI_ENDPROC
23170-END(native_load_gs_index)
23171+ENDPROC(native_load_gs_index)
23172
23173 _ASM_EXTABLE(gs_change,bad_gs)
23174 .section .fixup,"ax"
23175@@ -1366,9 +1850,10 @@ ENTRY(do_softirq_own_stack)
23176 CFI_DEF_CFA_REGISTER rsp
23177 CFI_ADJUST_CFA_OFFSET -8
23178 decl PER_CPU_VAR(irq_count)
23179+ pax_force_retaddr
23180 ret
23181 CFI_ENDPROC
23182-END(do_softirq_own_stack)
23183+ENDPROC(do_softirq_own_stack)
23184
23185 #ifdef CONFIG_XEN
23186 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
23187@@ -1406,7 +1891,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
23188 decl PER_CPU_VAR(irq_count)
23189 jmp error_exit
23190 CFI_ENDPROC
23191-END(xen_do_hypervisor_callback)
23192+ENDPROC(xen_do_hypervisor_callback)
23193
23194 /*
23195 * Hypervisor uses this for application faults while it executes.
23196@@ -1465,7 +1950,7 @@ ENTRY(xen_failsafe_callback)
23197 SAVE_ALL
23198 jmp error_exit
23199 CFI_ENDPROC
23200-END(xen_failsafe_callback)
23201+ENDPROC(xen_failsafe_callback)
23202
23203 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
23204 xen_hvm_callback_vector xen_evtchn_do_upcall
23205@@ -1517,18 +2002,33 @@ ENTRY(paranoid_exit)
23206 DEFAULT_FRAME
23207 DISABLE_INTERRUPTS(CLBR_NONE)
23208 TRACE_IRQS_OFF_DEBUG
23209- testl %ebx,%ebx /* swapgs needed? */
23210+ testl $1,%ebx /* swapgs needed? */
23211 jnz paranoid_restore
23212- testl $3,CS(%rsp)
23213+ testb $3,CS(%rsp)
23214 jnz paranoid_userspace
23215+#ifdef CONFIG_PAX_MEMORY_UDEREF
23216+ pax_exit_kernel
23217+ TRACE_IRQS_IRETQ 0
23218+ SWAPGS_UNSAFE_STACK
23219+ RESTORE_ALL 8
23220+ pax_force_retaddr_bts
23221+ jmp irq_return
23222+#endif
23223 paranoid_swapgs:
23224+#ifdef CONFIG_PAX_MEMORY_UDEREF
23225+ pax_exit_kernel_user
23226+#else
23227+ pax_exit_kernel
23228+#endif
23229 TRACE_IRQS_IRETQ 0
23230 SWAPGS_UNSAFE_STACK
23231 RESTORE_ALL 8
23232 jmp irq_return
23233 paranoid_restore:
23234+ pax_exit_kernel
23235 TRACE_IRQS_IRETQ_DEBUG 0
23236 RESTORE_ALL 8
23237+ pax_force_retaddr_bts
23238 jmp irq_return
23239 paranoid_userspace:
23240 GET_THREAD_INFO(%rcx)
23241@@ -1557,7 +2057,7 @@ paranoid_schedule:
23242 TRACE_IRQS_OFF
23243 jmp paranoid_userspace
23244 CFI_ENDPROC
23245-END(paranoid_exit)
23246+ENDPROC(paranoid_exit)
23247
23248 /*
23249 * Exception entry point. This expects an error code/orig_rax on the stack.
23250@@ -1584,12 +2084,23 @@ ENTRY(error_entry)
23251 movq_cfi r14, R14+8
23252 movq_cfi r15, R15+8
23253 xorl %ebx,%ebx
23254- testl $3,CS+8(%rsp)
23255+ testb $3,CS+8(%rsp)
23256 je error_kernelspace
23257 error_swapgs:
23258 SWAPGS
23259 error_sti:
23260+#ifdef CONFIG_PAX_MEMORY_UDEREF
23261+ testb $3, CS+8(%rsp)
23262+ jnz 1f
23263+ pax_enter_kernel
23264+ jmp 2f
23265+1: pax_enter_kernel_user
23266+2:
23267+#else
23268+ pax_enter_kernel
23269+#endif
23270 TRACE_IRQS_OFF
23271+ pax_force_retaddr
23272 ret
23273
23274 /*
23275@@ -1616,7 +2127,7 @@ bstep_iret:
23276 movq %rcx,RIP+8(%rsp)
23277 jmp error_swapgs
23278 CFI_ENDPROC
23279-END(error_entry)
23280+ENDPROC(error_entry)
23281
23282
23283 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
23284@@ -1627,7 +2138,7 @@ ENTRY(error_exit)
23285 DISABLE_INTERRUPTS(CLBR_NONE)
23286 TRACE_IRQS_OFF
23287 GET_THREAD_INFO(%rcx)
23288- testl %eax,%eax
23289+ testl $1,%eax
23290 jne retint_kernel
23291 LOCKDEP_SYS_EXIT_IRQ
23292 movl TI_flags(%rcx),%edx
23293@@ -1636,7 +2147,7 @@ ENTRY(error_exit)
23294 jnz retint_careful
23295 jmp retint_swapgs
23296 CFI_ENDPROC
23297-END(error_exit)
23298+ENDPROC(error_exit)
23299
23300 /*
23301 * Test if a given stack is an NMI stack or not.
23302@@ -1694,9 +2205,11 @@ ENTRY(nmi)
23303 * If %cs was not the kernel segment, then the NMI triggered in user
23304 * space, which means it is definitely not nested.
23305 */
23306+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
23307+ je 1f
23308 cmpl $__KERNEL_CS, 16(%rsp)
23309 jne first_nmi
23310-
23311+1:
23312 /*
23313 * Check the special variable on the stack to see if NMIs are
23314 * executing.
23315@@ -1730,8 +2243,7 @@ nested_nmi:
23316
23317 1:
23318 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
23319- leaq -1*8(%rsp), %rdx
23320- movq %rdx, %rsp
23321+ subq $8, %rsp
23322 CFI_ADJUST_CFA_OFFSET 1*8
23323 leaq -10*8(%rsp), %rdx
23324 pushq_cfi $__KERNEL_DS
23325@@ -1749,6 +2261,7 @@ nested_nmi_out:
23326 CFI_RESTORE rdx
23327
23328 /* No need to check faults here */
23329+# pax_force_retaddr_bts
23330 INTERRUPT_RETURN
23331
23332 CFI_RESTORE_STATE
23333@@ -1845,13 +2358,13 @@ end_repeat_nmi:
23334 subq $ORIG_RAX-R15, %rsp
23335 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
23336 /*
23337- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
23338+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
23339 * as we should not be calling schedule in NMI context.
23340 * Even with normal interrupts enabled. An NMI should not be
23341 * setting NEED_RESCHED or anything that normal interrupts and
23342 * exceptions might do.
23343 */
23344- call save_paranoid
23345+ call save_paranoid_nmi
23346 DEFAULT_FRAME 0
23347
23348 /*
23349@@ -1861,9 +2374,9 @@ end_repeat_nmi:
23350 * NMI itself takes a page fault, the page fault that was preempted
23351 * will read the information from the NMI page fault and not the
23352 * origin fault. Save it off and restore it if it changes.
23353- * Use the r12 callee-saved register.
23354+ * Use the r13 callee-saved register.
23355 */
23356- movq %cr2, %r12
23357+ movq %cr2, %r13
23358
23359 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
23360 movq %rsp,%rdi
23361@@ -1872,31 +2385,36 @@ end_repeat_nmi:
23362
23363 /* Did the NMI take a page fault? Restore cr2 if it did */
23364 movq %cr2, %rcx
23365- cmpq %rcx, %r12
23366+ cmpq %rcx, %r13
23367 je 1f
23368- movq %r12, %cr2
23369+ movq %r13, %cr2
23370 1:
23371
23372- testl %ebx,%ebx /* swapgs needed? */
23373+ testl $1,%ebx /* swapgs needed? */
23374 jnz nmi_restore
23375 nmi_swapgs:
23376 SWAPGS_UNSAFE_STACK
23377 nmi_restore:
23378+ pax_exit_kernel_nmi
23379 /* Pop the extra iret frame at once */
23380 RESTORE_ALL 6*8
23381+ testb $3, 8(%rsp)
23382+ jnz 1f
23383+ pax_force_retaddr_bts
23384+1:
23385
23386 /* Clear the NMI executing stack variable */
23387 movq $0, 5*8(%rsp)
23388 jmp irq_return
23389 CFI_ENDPROC
23390-END(nmi)
23391+ENDPROC(nmi)
23392
23393 ENTRY(ignore_sysret)
23394 CFI_STARTPROC
23395 mov $-ENOSYS,%eax
23396 sysret
23397 CFI_ENDPROC
23398-END(ignore_sysret)
23399+ENDPROC(ignore_sysret)
23400
23401 /*
23402 * End of kprobes section
23403diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
23404index d4bdd25..912664c 100644
23405--- a/arch/x86/kernel/ftrace.c
23406+++ b/arch/x86/kernel/ftrace.c
23407@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
23408 {
23409 unsigned char replaced[MCOUNT_INSN_SIZE];
23410
23411+ ip = ktla_ktva(ip);
23412+
23413 /*
23414 * Note: Due to modules and __init, code can
23415 * disappear and change, we need to protect against faulting
23416@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
23417 unsigned char old[MCOUNT_INSN_SIZE], *new;
23418 int ret;
23419
23420- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
23421+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
23422 new = ftrace_call_replace(ip, (unsigned long)func);
23423
23424 /* See comment above by declaration of modifying_ftrace_code */
23425@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
23426 /* Also update the regs callback function */
23427 if (!ret) {
23428 ip = (unsigned long)(&ftrace_regs_call);
23429- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
23430+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
23431 new = ftrace_call_replace(ip, (unsigned long)func);
23432 ret = ftrace_modify_code(ip, old, new);
23433 }
23434@@ -291,7 +293,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
23435 * kernel identity mapping to modify code.
23436 */
23437 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
23438- ip = (unsigned long)__va(__pa_symbol(ip));
23439+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
23440
23441 return probe_kernel_write((void *)ip, val, size);
23442 }
23443@@ -301,7 +303,7 @@ static int add_break(unsigned long ip, const char *old)
23444 unsigned char replaced[MCOUNT_INSN_SIZE];
23445 unsigned char brk = BREAKPOINT_INSTRUCTION;
23446
23447- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
23448+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
23449 return -EFAULT;
23450
23451 /* Make sure it is what we expect it to be */
23452@@ -649,7 +651,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
23453 return ret;
23454
23455 fail_update:
23456- probe_kernel_write((void *)ip, &old_code[0], 1);
23457+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
23458 goto out;
23459 }
23460
23461@@ -682,6 +684,8 @@ static int ftrace_mod_jmp(unsigned long ip,
23462 {
23463 unsigned char code[MCOUNT_INSN_SIZE];
23464
23465+ ip = ktla_ktva(ip);
23466+
23467 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
23468 return -EFAULT;
23469
23470diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
23471index 85126cc..1bbce17 100644
23472--- a/arch/x86/kernel/head64.c
23473+++ b/arch/x86/kernel/head64.c
23474@@ -67,12 +67,12 @@ again:
23475 pgd = *pgd_p;
23476
23477 /*
23478- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
23479- * critical -- __PAGE_OFFSET would point us back into the dynamic
23480+ * The use of __early_va rather than __va here is critical:
23481+ * __va would point us back into the dynamic
23482 * range and we might end up looping forever...
23483 */
23484 if (pgd)
23485- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
23486+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
23487 else {
23488 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
23489 reset_early_page_tables();
23490@@ -82,13 +82,13 @@ again:
23491 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
23492 for (i = 0; i < PTRS_PER_PUD; i++)
23493 pud_p[i] = 0;
23494- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
23495+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
23496 }
23497 pud_p += pud_index(address);
23498 pud = *pud_p;
23499
23500 if (pud)
23501- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
23502+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
23503 else {
23504 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
23505 reset_early_page_tables();
23506@@ -98,7 +98,7 @@ again:
23507 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
23508 for (i = 0; i < PTRS_PER_PMD; i++)
23509 pmd_p[i] = 0;
23510- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
23511+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
23512 }
23513 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
23514 pmd_p[pmd_index(address)] = pmd;
23515@@ -175,7 +175,6 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
23516 if (console_loglevel == 10)
23517 early_printk("Kernel alive\n");
23518
23519- clear_page(init_level4_pgt);
23520 /* set init_level4_pgt kernel high mapping*/
23521 init_level4_pgt[511] = early_level4_pgt[511];
23522
23523diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
23524index 81ba276..30c5411 100644
23525--- a/arch/x86/kernel/head_32.S
23526+++ b/arch/x86/kernel/head_32.S
23527@@ -26,6 +26,12 @@
23528 /* Physical address */
23529 #define pa(X) ((X) - __PAGE_OFFSET)
23530
23531+#ifdef CONFIG_PAX_KERNEXEC
23532+#define ta(X) (X)
23533+#else
23534+#define ta(X) ((X) - __PAGE_OFFSET)
23535+#endif
23536+
23537 /*
23538 * References to members of the new_cpu_data structure.
23539 */
23540@@ -55,11 +61,7 @@
23541 * and small than max_low_pfn, otherwise will waste some page table entries
23542 */
23543
23544-#if PTRS_PER_PMD > 1
23545-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
23546-#else
23547-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
23548-#endif
23549+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
23550
23551 /* Number of possible pages in the lowmem region */
23552 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
23553@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
23554 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
23555
23556 /*
23557+ * Real beginning of normal "text" segment
23558+ */
23559+ENTRY(stext)
23560+ENTRY(_stext)
23561+
23562+/*
23563 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
23564 * %esi points to the real-mode code as a 32-bit pointer.
23565 * CS and DS must be 4 GB flat segments, but we don't depend on
23566@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
23567 * can.
23568 */
23569 __HEAD
23570+
23571+#ifdef CONFIG_PAX_KERNEXEC
23572+ jmp startup_32
23573+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
23574+.fill PAGE_SIZE-5,1,0xcc
23575+#endif
23576+
23577 ENTRY(startup_32)
23578 movl pa(stack_start),%ecx
23579
23580@@ -106,6 +121,59 @@ ENTRY(startup_32)
23581 2:
23582 leal -__PAGE_OFFSET(%ecx),%esp
23583
23584+#ifdef CONFIG_SMP
23585+ movl $pa(cpu_gdt_table),%edi
23586+ movl $__per_cpu_load,%eax
23587+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
23588+ rorl $16,%eax
23589+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
23590+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
23591+ movl $__per_cpu_end - 1,%eax
23592+ subl $__per_cpu_start,%eax
23593+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
23594+#endif
23595+
23596+#ifdef CONFIG_PAX_MEMORY_UDEREF
23597+ movl $NR_CPUS,%ecx
23598+ movl $pa(cpu_gdt_table),%edi
23599+1:
23600+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
23601+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
23602+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
23603+ addl $PAGE_SIZE_asm,%edi
23604+ loop 1b
23605+#endif
23606+
23607+#ifdef CONFIG_PAX_KERNEXEC
23608+ movl $pa(boot_gdt),%edi
23609+ movl $__LOAD_PHYSICAL_ADDR,%eax
23610+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
23611+ rorl $16,%eax
23612+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
23613+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
23614+ rorl $16,%eax
23615+
23616+ ljmp $(__BOOT_CS),$1f
23617+1:
23618+
23619+ movl $NR_CPUS,%ecx
23620+ movl $pa(cpu_gdt_table),%edi
23621+ addl $__PAGE_OFFSET,%eax
23622+1:
23623+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
23624+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
23625+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
23626+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
23627+ rorl $16,%eax
23628+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
23629+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
23630+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
23631+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
23632+ rorl $16,%eax
23633+ addl $PAGE_SIZE_asm,%edi
23634+ loop 1b
23635+#endif
23636+
23637 /*
23638 * Clear BSS first so that there are no surprises...
23639 */
23640@@ -201,8 +269,11 @@ ENTRY(startup_32)
23641 movl %eax, pa(max_pfn_mapped)
23642
23643 /* Do early initialization of the fixmap area */
23644- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
23645- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
23646+#ifdef CONFIG_COMPAT_VDSO
23647+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
23648+#else
23649+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
23650+#endif
23651 #else /* Not PAE */
23652
23653 page_pde_offset = (__PAGE_OFFSET >> 20);
23654@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
23655 movl %eax, pa(max_pfn_mapped)
23656
23657 /* Do early initialization of the fixmap area */
23658- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
23659- movl %eax,pa(initial_page_table+0xffc)
23660+#ifdef CONFIG_COMPAT_VDSO
23661+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
23662+#else
23663+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
23664+#endif
23665 #endif
23666
23667 #ifdef CONFIG_PARAVIRT
23668@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
23669 cmpl $num_subarch_entries, %eax
23670 jae bad_subarch
23671
23672- movl pa(subarch_entries)(,%eax,4), %eax
23673- subl $__PAGE_OFFSET, %eax
23674- jmp *%eax
23675+ jmp *pa(subarch_entries)(,%eax,4)
23676
23677 bad_subarch:
23678 WEAK(lguest_entry)
23679@@ -261,10 +333,10 @@ WEAK(xen_entry)
23680 __INITDATA
23681
23682 subarch_entries:
23683- .long default_entry /* normal x86/PC */
23684- .long lguest_entry /* lguest hypervisor */
23685- .long xen_entry /* Xen hypervisor */
23686- .long default_entry /* Moorestown MID */
23687+ .long ta(default_entry) /* normal x86/PC */
23688+ .long ta(lguest_entry) /* lguest hypervisor */
23689+ .long ta(xen_entry) /* Xen hypervisor */
23690+ .long ta(default_entry) /* Moorestown MID */
23691 num_subarch_entries = (. - subarch_entries) / 4
23692 .previous
23693 #else
23694@@ -354,6 +426,7 @@ default_entry:
23695 movl pa(mmu_cr4_features),%eax
23696 movl %eax,%cr4
23697
23698+#ifdef CONFIG_X86_PAE
23699 testb $X86_CR4_PAE, %al # check if PAE is enabled
23700 jz enable_paging
23701
23702@@ -382,6 +455,9 @@ default_entry:
23703 /* Make changes effective */
23704 wrmsr
23705
23706+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
23707+#endif
23708+
23709 enable_paging:
23710
23711 /*
23712@@ -449,14 +525,20 @@ is486:
23713 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
23714 movl %eax,%ss # after changing gdt.
23715
23716- movl $(__USER_DS),%eax # DS/ES contains default USER segment
23717+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
23718 movl %eax,%ds
23719 movl %eax,%es
23720
23721 movl $(__KERNEL_PERCPU), %eax
23722 movl %eax,%fs # set this cpu's percpu
23723
23724+#ifdef CONFIG_CC_STACKPROTECTOR
23725 movl $(__KERNEL_STACK_CANARY),%eax
23726+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
23727+ movl $(__USER_DS),%eax
23728+#else
23729+ xorl %eax,%eax
23730+#endif
23731 movl %eax,%gs
23732
23733 xorl %eax,%eax # Clear LDT
23734@@ -512,8 +594,11 @@ setup_once:
23735 * relocation. Manually set base address in stack canary
23736 * segment descriptor.
23737 */
23738- movl $gdt_page,%eax
23739+ movl $cpu_gdt_table,%eax
23740 movl $stack_canary,%ecx
23741+#ifdef CONFIG_SMP
23742+ addl $__per_cpu_load,%ecx
23743+#endif
23744 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
23745 shrl $16, %ecx
23746 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
23747@@ -544,7 +629,7 @@ ENDPROC(early_idt_handlers)
23748 /* This is global to keep gas from relaxing the jumps */
23749 ENTRY(early_idt_handler)
23750 cld
23751- cmpl $2,%ss:early_recursion_flag
23752+ cmpl $1,%ss:early_recursion_flag
23753 je hlt_loop
23754 incl %ss:early_recursion_flag
23755
23756@@ -582,8 +667,8 @@ ENTRY(early_idt_handler)
23757 pushl (20+6*4)(%esp) /* trapno */
23758 pushl $fault_msg
23759 call printk
23760-#endif
23761 call dump_stack
23762+#endif
23763 hlt_loop:
23764 hlt
23765 jmp hlt_loop
23766@@ -602,8 +687,11 @@ ENDPROC(early_idt_handler)
23767 /* This is the default interrupt "handler" :-) */
23768 ALIGN
23769 ignore_int:
23770- cld
23771 #ifdef CONFIG_PRINTK
23772+ cmpl $2,%ss:early_recursion_flag
23773+ je hlt_loop
23774+ incl %ss:early_recursion_flag
23775+ cld
23776 pushl %eax
23777 pushl %ecx
23778 pushl %edx
23779@@ -612,9 +700,6 @@ ignore_int:
23780 movl $(__KERNEL_DS),%eax
23781 movl %eax,%ds
23782 movl %eax,%es
23783- cmpl $2,early_recursion_flag
23784- je hlt_loop
23785- incl early_recursion_flag
23786 pushl 16(%esp)
23787 pushl 24(%esp)
23788 pushl 32(%esp)
23789@@ -648,29 +733,34 @@ ENTRY(setup_once_ref)
23790 /*
23791 * BSS section
23792 */
23793-__PAGE_ALIGNED_BSS
23794- .align PAGE_SIZE
23795 #ifdef CONFIG_X86_PAE
23796+.section .initial_pg_pmd,"a",@progbits
23797 initial_pg_pmd:
23798 .fill 1024*KPMDS,4,0
23799 #else
23800+.section .initial_page_table,"a",@progbits
23801 ENTRY(initial_page_table)
23802 .fill 1024,4,0
23803 #endif
23804+.section .initial_pg_fixmap,"a",@progbits
23805 initial_pg_fixmap:
23806 .fill 1024,4,0
23807+.section .empty_zero_page,"a",@progbits
23808 ENTRY(empty_zero_page)
23809 .fill 4096,1,0
23810+.section .swapper_pg_dir,"a",@progbits
23811 ENTRY(swapper_pg_dir)
23812+#ifdef CONFIG_X86_PAE
23813+ .fill 4,8,0
23814+#else
23815 .fill 1024,4,0
23816+#endif
23817
23818 /*
23819 * This starts the data section.
23820 */
23821 #ifdef CONFIG_X86_PAE
23822-__PAGE_ALIGNED_DATA
23823- /* Page-aligned for the benefit of paravirt? */
23824- .align PAGE_SIZE
23825+.section .initial_page_table,"a",@progbits
23826 ENTRY(initial_page_table)
23827 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
23828 # if KPMDS == 3
23829@@ -689,12 +779,20 @@ ENTRY(initial_page_table)
23830 # error "Kernel PMDs should be 1, 2 or 3"
23831 # endif
23832 .align PAGE_SIZE /* needs to be page-sized too */
23833+
23834+#ifdef CONFIG_PAX_PER_CPU_PGD
23835+ENTRY(cpu_pgd)
23836+ .rept 2*NR_CPUS
23837+ .fill 4,8,0
23838+ .endr
23839+#endif
23840+
23841 #endif
23842
23843 .data
23844 .balign 4
23845 ENTRY(stack_start)
23846- .long init_thread_union+THREAD_SIZE
23847+ .long init_thread_union+THREAD_SIZE-8
23848
23849 __INITRODATA
23850 int_msg:
23851@@ -722,7 +820,7 @@ fault_msg:
23852 * segment size, and 32-bit linear address value:
23853 */
23854
23855- .data
23856+.section .rodata,"a",@progbits
23857 .globl boot_gdt_descr
23858 .globl idt_descr
23859
23860@@ -731,7 +829,7 @@ fault_msg:
23861 .word 0 # 32 bit align gdt_desc.address
23862 boot_gdt_descr:
23863 .word __BOOT_DS+7
23864- .long boot_gdt - __PAGE_OFFSET
23865+ .long pa(boot_gdt)
23866
23867 .word 0 # 32-bit align idt_desc.address
23868 idt_descr:
23869@@ -742,7 +840,7 @@ idt_descr:
23870 .word 0 # 32 bit align gdt_desc.address
23871 ENTRY(early_gdt_descr)
23872 .word GDT_ENTRIES*8-1
23873- .long gdt_page /* Overwritten for secondary CPUs */
23874+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
23875
23876 /*
23877 * The boot_gdt must mirror the equivalent in setup.S and is
23878@@ -751,5 +849,65 @@ ENTRY(early_gdt_descr)
23879 .align L1_CACHE_BYTES
23880 ENTRY(boot_gdt)
23881 .fill GDT_ENTRY_BOOT_CS,8,0
23882- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
23883- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
23884+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
23885+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
23886+
23887+ .align PAGE_SIZE_asm
23888+ENTRY(cpu_gdt_table)
23889+ .rept NR_CPUS
23890+ .quad 0x0000000000000000 /* NULL descriptor */
23891+ .quad 0x0000000000000000 /* 0x0b reserved */
23892+ .quad 0x0000000000000000 /* 0x13 reserved */
23893+ .quad 0x0000000000000000 /* 0x1b reserved */
23894+
23895+#ifdef CONFIG_PAX_KERNEXEC
23896+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
23897+#else
23898+ .quad 0x0000000000000000 /* 0x20 unused */
23899+#endif
23900+
23901+ .quad 0x0000000000000000 /* 0x28 unused */
23902+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
23903+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
23904+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
23905+ .quad 0x0000000000000000 /* 0x4b reserved */
23906+ .quad 0x0000000000000000 /* 0x53 reserved */
23907+ .quad 0x0000000000000000 /* 0x5b reserved */
23908+
23909+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
23910+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
23911+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
23912+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
23913+
23914+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
23915+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
23916+
23917+ /*
23918+ * Segments used for calling PnP BIOS have byte granularity.
23919+ * The code segments and data segments have fixed 64k limits,
23920+ * the transfer segment sizes are set at run time.
23921+ */
23922+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
23923+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
23924+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
23925+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
23926+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
23927+
23928+ /*
23929+ * The APM segments have byte granularity and their bases
23930+ * are set at run time. All have 64k limits.
23931+ */
23932+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
23933+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
23934+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
23935+
23936+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
23937+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
23938+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
23939+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
23940+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
23941+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
23942+
23943+ /* Be sure this is zeroed to avoid false validations in Xen */
23944+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
23945+ .endr
23946diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
23947index e1aabdb..fee4fee 100644
23948--- a/arch/x86/kernel/head_64.S
23949+++ b/arch/x86/kernel/head_64.S
23950@@ -20,6 +20,8 @@
23951 #include <asm/processor-flags.h>
23952 #include <asm/percpu.h>
23953 #include <asm/nops.h>
23954+#include <asm/cpufeature.h>
23955+#include <asm/alternative-asm.h>
23956
23957 #ifdef CONFIG_PARAVIRT
23958 #include <asm/asm-offsets.h>
23959@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
23960 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
23961 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
23962 L3_START_KERNEL = pud_index(__START_KERNEL_map)
23963+L4_VMALLOC_START = pgd_index(VMALLOC_START)
23964+L3_VMALLOC_START = pud_index(VMALLOC_START)
23965+L4_VMALLOC_END = pgd_index(VMALLOC_END)
23966+L3_VMALLOC_END = pud_index(VMALLOC_END)
23967+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
23968+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
23969
23970 .text
23971 __HEAD
23972@@ -89,11 +97,24 @@ startup_64:
23973 * Fixup the physical addresses in the page table
23974 */
23975 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
23976+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
23977+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
23978+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
23979+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
23980+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
23981
23982- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
23983- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
23984+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
23985+#ifndef CONFIG_XEN
23986+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
23987+#endif
23988+
23989+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
23990+
23991+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
23992+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
23993
23994 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
23995+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
23996
23997 /*
23998 * Set up the identity mapping for the switchover. These
23999@@ -177,8 +198,8 @@ ENTRY(secondary_startup_64)
24000 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24001 1:
24002
24003- /* Enable PAE mode and PGE */
24004- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24005+ /* Enable PAE mode and PSE/PGE */
24006+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24007 movq %rcx, %cr4
24008
24009 /* Setup early boot stage 4 level pagetables. */
24010@@ -199,10 +220,19 @@ ENTRY(secondary_startup_64)
24011 movl $MSR_EFER, %ecx
24012 rdmsr
24013 btsl $_EFER_SCE, %eax /* Enable System Call */
24014- btl $20,%edi /* No Execute supported? */
24015+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24016 jnc 1f
24017 btsl $_EFER_NX, %eax
24018 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24019+#ifndef CONFIG_EFI
24020+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24021+#endif
24022+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24023+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24024+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24025+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24026+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24027+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24028 1: wrmsr /* Make changes effective */
24029
24030 /* Setup cr0 */
24031@@ -282,6 +312,7 @@ ENTRY(secondary_startup_64)
24032 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24033 * address given in m16:64.
24034 */
24035+ pax_set_fptr_mask
24036 movq initial_code(%rip),%rax
24037 pushq $0 # fake return address to stop unwinder
24038 pushq $__KERNEL_CS # set correct cs
24039@@ -388,7 +419,7 @@ ENTRY(early_idt_handler)
24040 call dump_stack
24041 #ifdef CONFIG_KALLSYMS
24042 leaq early_idt_ripmsg(%rip),%rdi
24043- movq 40(%rsp),%rsi # %rip again
24044+ movq 88(%rsp),%rsi # %rip again
24045 call __print_symbol
24046 #endif
24047 #endif /* EARLY_PRINTK */
24048@@ -416,6 +447,7 @@ ENDPROC(early_idt_handler)
24049 early_recursion_flag:
24050 .long 0
24051
24052+ .section .rodata,"a",@progbits
24053 #ifdef CONFIG_EARLY_PRINTK
24054 early_idt_msg:
24055 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24056@@ -443,29 +475,52 @@ NEXT_PAGE(early_level4_pgt)
24057 NEXT_PAGE(early_dynamic_pgts)
24058 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24059
24060- .data
24061+ .section .rodata,"a",@progbits
24062
24063-#ifndef CONFIG_XEN
24064 NEXT_PAGE(init_level4_pgt)
24065- .fill 512,8,0
24066-#else
24067-NEXT_PAGE(init_level4_pgt)
24068- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24069 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24070 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24071+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24072+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24073+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24074+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24075+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24076+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24077 .org init_level4_pgt + L4_START_KERNEL*8, 0
24078 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24079 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24080
24081+#ifdef CONFIG_PAX_PER_CPU_PGD
24082+NEXT_PAGE(cpu_pgd)
24083+ .rept 2*NR_CPUS
24084+ .fill 512,8,0
24085+ .endr
24086+#endif
24087+
24088 NEXT_PAGE(level3_ident_pgt)
24089 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24090+#ifdef CONFIG_XEN
24091 .fill 511, 8, 0
24092+#else
24093+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24094+ .fill 510,8,0
24095+#endif
24096+
24097+NEXT_PAGE(level3_vmalloc_start_pgt)
24098+ .fill 512,8,0
24099+
24100+NEXT_PAGE(level3_vmalloc_end_pgt)
24101+ .fill 512,8,0
24102+
24103+NEXT_PAGE(level3_vmemmap_pgt)
24104+ .fill L3_VMEMMAP_START,8,0
24105+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24106+
24107 NEXT_PAGE(level2_ident_pgt)
24108- /* Since I easily can, map the first 1G.
24109+ /* Since I easily can, map the first 2G.
24110 * Don't set NX because code runs from these pages.
24111 */
24112- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24113-#endif
24114+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
24115
24116 NEXT_PAGE(level3_kernel_pgt)
24117 .fill L3_START_KERNEL,8,0
24118@@ -473,6 +528,9 @@ NEXT_PAGE(level3_kernel_pgt)
24119 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
24120 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24121
24122+NEXT_PAGE(level2_vmemmap_pgt)
24123+ .fill 512,8,0
24124+
24125 NEXT_PAGE(level2_kernel_pgt)
24126 /*
24127 * 512 MB kernel mapping. We spend a full page on this pagetable
24128@@ -490,28 +548,64 @@ NEXT_PAGE(level2_kernel_pgt)
24129 NEXT_PAGE(level2_fixmap_pgt)
24130 .fill 506,8,0
24131 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24132- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24133- .fill 5,8,0
24134+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24135+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24136+ .fill 4,8,0
24137
24138 NEXT_PAGE(level1_fixmap_pgt)
24139 .fill 512,8,0
24140
24141+NEXT_PAGE(level1_vsyscall_pgt)
24142+ .fill 512,8,0
24143+
24144 #undef PMDS
24145
24146- .data
24147+ .align PAGE_SIZE
24148+ENTRY(cpu_gdt_table)
24149+ .rept NR_CPUS
24150+ .quad 0x0000000000000000 /* NULL descriptor */
24151+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24152+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
24153+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
24154+ .quad 0x00cffb000000ffff /* __USER32_CS */
24155+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24156+ .quad 0x00affb000000ffff /* __USER_CS */
24157+
24158+#ifdef CONFIG_PAX_KERNEXEC
24159+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
24160+#else
24161+ .quad 0x0 /* unused */
24162+#endif
24163+
24164+ .quad 0,0 /* TSS */
24165+ .quad 0,0 /* LDT */
24166+ .quad 0,0,0 /* three TLS descriptors */
24167+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
24168+ /* asm/segment.h:GDT_ENTRIES must match this */
24169+
24170+#ifdef CONFIG_PAX_MEMORY_UDEREF
24171+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
24172+#else
24173+ .quad 0x0 /* unused */
24174+#endif
24175+
24176+ /* zero the remaining page */
24177+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
24178+ .endr
24179+
24180 .align 16
24181 .globl early_gdt_descr
24182 early_gdt_descr:
24183 .word GDT_ENTRIES*8-1
24184 early_gdt_descr_base:
24185- .quad INIT_PER_CPU_VAR(gdt_page)
24186+ .quad cpu_gdt_table
24187
24188 ENTRY(phys_base)
24189 /* This must match the first entry in level2_kernel_pgt */
24190 .quad 0x0000000000000000
24191
24192 #include "../../x86/xen/xen-head.S"
24193-
24194- __PAGE_ALIGNED_BSS
24195+
24196+ .section .rodata,"a",@progbits
24197 NEXT_PAGE(empty_zero_page)
24198 .skip PAGE_SIZE
24199diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
24200index 05fd74f..c3548b1 100644
24201--- a/arch/x86/kernel/i386_ksyms_32.c
24202+++ b/arch/x86/kernel/i386_ksyms_32.c
24203@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
24204 EXPORT_SYMBOL(cmpxchg8b_emu);
24205 #endif
24206
24207+EXPORT_SYMBOL_GPL(cpu_gdt_table);
24208+
24209 /* Networking helper routines. */
24210 EXPORT_SYMBOL(csum_partial_copy_generic);
24211+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
24212+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
24213
24214 EXPORT_SYMBOL(__get_user_1);
24215 EXPORT_SYMBOL(__get_user_2);
24216@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
24217 EXPORT_SYMBOL(___preempt_schedule_context);
24218 #endif
24219 #endif
24220+
24221+#ifdef CONFIG_PAX_KERNEXEC
24222+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
24223+#endif
24224+
24225+#ifdef CONFIG_PAX_PER_CPU_PGD
24226+EXPORT_SYMBOL(cpu_pgd);
24227+#endif
24228diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
24229index e8368c6..9c1a712 100644
24230--- a/arch/x86/kernel/i387.c
24231+++ b/arch/x86/kernel/i387.c
24232@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
24233 static inline bool interrupted_user_mode(void)
24234 {
24235 struct pt_regs *regs = get_irq_regs();
24236- return regs && user_mode_vm(regs);
24237+ return regs && user_mode(regs);
24238 }
24239
24240 /*
24241diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
24242index 2e977b5..5f2c273 100644
24243--- a/arch/x86/kernel/i8259.c
24244+++ b/arch/x86/kernel/i8259.c
24245@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
24246 static void make_8259A_irq(unsigned int irq)
24247 {
24248 disable_irq_nosync(irq);
24249- io_apic_irqs &= ~(1<<irq);
24250+ io_apic_irqs &= ~(1UL<<irq);
24251 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
24252 i8259A_chip.name);
24253 enable_irq(irq);
24254@@ -209,7 +209,7 @@ spurious_8259A_irq:
24255 "spurious 8259A interrupt: IRQ%d.\n", irq);
24256 spurious_irq_mask |= irqmask;
24257 }
24258- atomic_inc(&irq_err_count);
24259+ atomic_inc_unchecked(&irq_err_count);
24260 /*
24261 * Theoretically we do not have to handle this IRQ,
24262 * but in Linux this does not cause problems and is
24263@@ -332,14 +332,16 @@ static void init_8259A(int auto_eoi)
24264 /* (slave's support for AEOI in flat mode is to be investigated) */
24265 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
24266
24267+ pax_open_kernel();
24268 if (auto_eoi)
24269 /*
24270 * In AEOI mode we just have to mask the interrupt
24271 * when acking.
24272 */
24273- i8259A_chip.irq_mask_ack = disable_8259A_irq;
24274+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
24275 else
24276- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
24277+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
24278+ pax_close_kernel();
24279
24280 udelay(100); /* wait for 8259A to initialize */
24281
24282diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
24283index a979b5b..1d6db75 100644
24284--- a/arch/x86/kernel/io_delay.c
24285+++ b/arch/x86/kernel/io_delay.c
24286@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
24287 * Quirk table for systems that misbehave (lock up, etc.) if port
24288 * 0x80 is used:
24289 */
24290-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
24291+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
24292 {
24293 .callback = dmi_io_delay_0xed_port,
24294 .ident = "Compaq Presario V6000",
24295diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
24296index 4ddaf66..49d5c18 100644
24297--- a/arch/x86/kernel/ioport.c
24298+++ b/arch/x86/kernel/ioport.c
24299@@ -6,6 +6,7 @@
24300 #include <linux/sched.h>
24301 #include <linux/kernel.h>
24302 #include <linux/capability.h>
24303+#include <linux/security.h>
24304 #include <linux/errno.h>
24305 #include <linux/types.h>
24306 #include <linux/ioport.h>
24307@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
24308 return -EINVAL;
24309 if (turn_on && !capable(CAP_SYS_RAWIO))
24310 return -EPERM;
24311+#ifdef CONFIG_GRKERNSEC_IO
24312+ if (turn_on && grsec_disable_privio) {
24313+ gr_handle_ioperm();
24314+ return -ENODEV;
24315+ }
24316+#endif
24317
24318 /*
24319 * If it's the first ioperm() call in this thread's lifetime, set the
24320@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
24321 * because the ->io_bitmap_max value must match the bitmap
24322 * contents:
24323 */
24324- tss = &per_cpu(init_tss, get_cpu());
24325+ tss = init_tss + get_cpu();
24326
24327 if (turn_on)
24328 bitmap_clear(t->io_bitmap_ptr, from, num);
24329@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
24330 if (level > old) {
24331 if (!capable(CAP_SYS_RAWIO))
24332 return -EPERM;
24333+#ifdef CONFIG_GRKERNSEC_IO
24334+ if (grsec_disable_privio) {
24335+ gr_handle_iopl();
24336+ return -ENODEV;
24337+ }
24338+#endif
24339 }
24340 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
24341 t->iopl = level << 12;
24342diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
24343index 22d0687..e07b2a5 100644
24344--- a/arch/x86/kernel/irq.c
24345+++ b/arch/x86/kernel/irq.c
24346@@ -21,7 +21,7 @@
24347 #define CREATE_TRACE_POINTS
24348 #include <asm/trace/irq_vectors.h>
24349
24350-atomic_t irq_err_count;
24351+atomic_unchecked_t irq_err_count;
24352
24353 /* Function pointer for generic interrupt vector handling */
24354 void (*x86_platform_ipi_callback)(void) = NULL;
24355@@ -125,9 +125,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
24356 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
24357 seq_printf(p, " Machine check polls\n");
24358 #endif
24359- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
24360+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
24361 #if defined(CONFIG_X86_IO_APIC)
24362- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
24363+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
24364 #endif
24365 return 0;
24366 }
24367@@ -167,7 +167,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
24368
24369 u64 arch_irq_stat(void)
24370 {
24371- u64 sum = atomic_read(&irq_err_count);
24372+ u64 sum = atomic_read_unchecked(&irq_err_count);
24373 return sum;
24374 }
24375
24376diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
24377index d7fcbed..1f747f7 100644
24378--- a/arch/x86/kernel/irq_32.c
24379+++ b/arch/x86/kernel/irq_32.c
24380@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
24381 __asm__ __volatile__("andl %%esp,%0" :
24382 "=r" (sp) : "0" (THREAD_SIZE - 1));
24383
24384- return sp < (sizeof(struct thread_info) + STACK_WARN);
24385+ return sp < STACK_WARN;
24386 }
24387
24388 static void print_stack_overflow(void)
24389@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
24390 * per-CPU IRQ handling contexts (thread information and stack)
24391 */
24392 union irq_ctx {
24393- struct thread_info tinfo;
24394- u32 stack[THREAD_SIZE/sizeof(u32)];
24395+ unsigned long previous_esp;
24396+ u32 stack[THREAD_SIZE/sizeof(u32)];
24397 } __attribute__((aligned(THREAD_SIZE)));
24398
24399 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
24400@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
24401 static inline int
24402 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24403 {
24404- union irq_ctx *curctx, *irqctx;
24405+ union irq_ctx *irqctx;
24406 u32 *isp, arg1, arg2;
24407
24408- curctx = (union irq_ctx *) current_thread_info();
24409 irqctx = __this_cpu_read(hardirq_ctx);
24410
24411 /*
24412@@ -92,13 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24413 * handler) we can't do that and just have to keep using the
24414 * current stack (which is the irq stack already after all)
24415 */
24416- if (unlikely(curctx == irqctx))
24417+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
24418 return 0;
24419
24420 /* build the stack frame on the IRQ stack */
24421- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
24422- irqctx->tinfo.task = curctx->tinfo.task;
24423- irqctx->tinfo.previous_esp = current_stack_pointer;
24424+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
24425+ irqctx->previous_esp = current_stack_pointer;
24426+
24427+#ifdef CONFIG_PAX_MEMORY_UDEREF
24428+ __set_fs(MAKE_MM_SEG(0));
24429+#endif
24430
24431 if (unlikely(overflow))
24432 call_on_stack(print_stack_overflow, isp);
24433@@ -110,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24434 : "0" (irq), "1" (desc), "2" (isp),
24435 "D" (desc->handle_irq)
24436 : "memory", "cc", "ecx");
24437+
24438+#ifdef CONFIG_PAX_MEMORY_UDEREF
24439+ __set_fs(current_thread_info()->addr_limit);
24440+#endif
24441+
24442 return 1;
24443 }
24444
24445@@ -118,48 +125,34 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24446 */
24447 void irq_ctx_init(int cpu)
24448 {
24449- union irq_ctx *irqctx;
24450-
24451 if (per_cpu(hardirq_ctx, cpu))
24452 return;
24453
24454- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
24455- THREADINFO_GFP,
24456- THREAD_SIZE_ORDER));
24457- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
24458- irqctx->tinfo.cpu = cpu;
24459- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
24460-
24461- per_cpu(hardirq_ctx, cpu) = irqctx;
24462-
24463- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
24464- THREADINFO_GFP,
24465- THREAD_SIZE_ORDER));
24466- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
24467- irqctx->tinfo.cpu = cpu;
24468- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
24469-
24470- per_cpu(softirq_ctx, cpu) = irqctx;
24471-
24472- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
24473- cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
24474+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
24475+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
24476 }
24477
24478 void do_softirq_own_stack(void)
24479 {
24480- struct thread_info *curctx;
24481 union irq_ctx *irqctx;
24482 u32 *isp;
24483
24484- curctx = current_thread_info();
24485 irqctx = __this_cpu_read(softirq_ctx);
24486- irqctx->tinfo.task = curctx->task;
24487- irqctx->tinfo.previous_esp = current_stack_pointer;
24488+ irqctx->previous_esp = current_stack_pointer;
24489
24490 /* build the stack frame on the softirq stack */
24491- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
24492+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
24493+
24494+#ifdef CONFIG_PAX_MEMORY_UDEREF
24495+ __set_fs(MAKE_MM_SEG(0));
24496+#endif
24497
24498 call_on_stack(__do_softirq, isp);
24499+
24500+#ifdef CONFIG_PAX_MEMORY_UDEREF
24501+ __set_fs(current_thread_info()->addr_limit);
24502+#endif
24503+
24504 }
24505
24506 bool handle_irq(unsigned irq, struct pt_regs *regs)
24507@@ -173,7 +166,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
24508 if (unlikely(!desc))
24509 return false;
24510
24511- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
24512+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
24513 if (unlikely(overflow))
24514 print_stack_overflow();
24515 desc->handle_irq(irq, desc);
24516diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
24517index 4d1c746..232961d 100644
24518--- a/arch/x86/kernel/irq_64.c
24519+++ b/arch/x86/kernel/irq_64.c
24520@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
24521 u64 estack_top, estack_bottom;
24522 u64 curbase = (u64)task_stack_page(current);
24523
24524- if (user_mode_vm(regs))
24525+ if (user_mode(regs))
24526 return;
24527
24528 if (regs->sp >= curbase + sizeof(struct thread_info) +
24529diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
24530index 26d5a55..a01160a 100644
24531--- a/arch/x86/kernel/jump_label.c
24532+++ b/arch/x86/kernel/jump_label.c
24533@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
24534 * Jump label is enabled for the first time.
24535 * So we expect a default_nop...
24536 */
24537- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
24538+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
24539 != 0))
24540 bug_at((void *)entry->code, __LINE__);
24541 } else {
24542@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
24543 * ...otherwise expect an ideal_nop. Otherwise
24544 * something went horribly wrong.
24545 */
24546- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
24547+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
24548 != 0))
24549 bug_at((void *)entry->code, __LINE__);
24550 }
24551@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
24552 * are converting the default nop to the ideal nop.
24553 */
24554 if (init) {
24555- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
24556+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
24557 bug_at((void *)entry->code, __LINE__);
24558 } else {
24559 code.jump = 0xe9;
24560 code.offset = entry->target -
24561 (entry->code + JUMP_LABEL_NOP_SIZE);
24562- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
24563+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
24564 bug_at((void *)entry->code, __LINE__);
24565 }
24566 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
24567diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
24568index 836f832..a8bda67 100644
24569--- a/arch/x86/kernel/kgdb.c
24570+++ b/arch/x86/kernel/kgdb.c
24571@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
24572 #ifdef CONFIG_X86_32
24573 switch (regno) {
24574 case GDB_SS:
24575- if (!user_mode_vm(regs))
24576+ if (!user_mode(regs))
24577 *(unsigned long *)mem = __KERNEL_DS;
24578 break;
24579 case GDB_SP:
24580- if (!user_mode_vm(regs))
24581+ if (!user_mode(regs))
24582 *(unsigned long *)mem = kernel_stack_pointer(regs);
24583 break;
24584 case GDB_GS:
24585@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
24586 bp->attr.bp_addr = breakinfo[breakno].addr;
24587 bp->attr.bp_len = breakinfo[breakno].len;
24588 bp->attr.bp_type = breakinfo[breakno].type;
24589- info->address = breakinfo[breakno].addr;
24590+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
24591+ info->address = ktla_ktva(breakinfo[breakno].addr);
24592+ else
24593+ info->address = breakinfo[breakno].addr;
24594 info->len = breakinfo[breakno].len;
24595 info->type = breakinfo[breakno].type;
24596 val = arch_install_hw_breakpoint(bp);
24597@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
24598 case 'k':
24599 /* clear the trace bit */
24600 linux_regs->flags &= ~X86_EFLAGS_TF;
24601- atomic_set(&kgdb_cpu_doing_single_step, -1);
24602+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
24603
24604 /* set the trace bit if we're stepping */
24605 if (remcomInBuffer[0] == 's') {
24606 linux_regs->flags |= X86_EFLAGS_TF;
24607- atomic_set(&kgdb_cpu_doing_single_step,
24608+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
24609 raw_smp_processor_id());
24610 }
24611
24612@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
24613
24614 switch (cmd) {
24615 case DIE_DEBUG:
24616- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
24617+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
24618 if (user_mode(regs))
24619 return single_step_cont(regs, args);
24620 break;
24621@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
24622 #endif /* CONFIG_DEBUG_RODATA */
24623
24624 bpt->type = BP_BREAKPOINT;
24625- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
24626+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
24627 BREAK_INSTR_SIZE);
24628 if (err)
24629 return err;
24630- err = probe_kernel_write((char *)bpt->bpt_addr,
24631+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
24632 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
24633 #ifdef CONFIG_DEBUG_RODATA
24634 if (!err)
24635@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
24636 return -EBUSY;
24637 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
24638 BREAK_INSTR_SIZE);
24639- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
24640+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
24641 if (err)
24642 return err;
24643 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
24644@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
24645 if (mutex_is_locked(&text_mutex))
24646 goto knl_write;
24647 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
24648- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
24649+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
24650 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
24651 goto knl_write;
24652 return err;
24653 knl_write:
24654 #endif /* CONFIG_DEBUG_RODATA */
24655- return probe_kernel_write((char *)bpt->bpt_addr,
24656+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
24657 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
24658 }
24659
24660diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
24661index 79a3f96..6ba030a 100644
24662--- a/arch/x86/kernel/kprobes/core.c
24663+++ b/arch/x86/kernel/kprobes/core.c
24664@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
24665 s32 raddr;
24666 } __packed *insn;
24667
24668- insn = (struct __arch_relative_insn *)from;
24669+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
24670+
24671+ pax_open_kernel();
24672 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
24673 insn->op = op;
24674+ pax_close_kernel();
24675 }
24676
24677 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
24678@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
24679 kprobe_opcode_t opcode;
24680 kprobe_opcode_t *orig_opcodes = opcodes;
24681
24682- if (search_exception_tables((unsigned long)opcodes))
24683+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
24684 return 0; /* Page fault may occur on this address. */
24685
24686 retry:
24687@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
24688 * for the first byte, we can recover the original instruction
24689 * from it and kp->opcode.
24690 */
24691- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
24692+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
24693 buf[0] = kp->opcode;
24694- return (unsigned long)buf;
24695+ return ktva_ktla((unsigned long)buf);
24696 }
24697
24698 /*
24699@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
24700 /* Another subsystem puts a breakpoint, failed to recover */
24701 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
24702 return 0;
24703+ pax_open_kernel();
24704 memcpy(dest, insn.kaddr, insn.length);
24705+ pax_close_kernel();
24706
24707 #ifdef CONFIG_X86_64
24708 if (insn_rip_relative(&insn)) {
24709@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
24710 return 0;
24711 }
24712 disp = (u8 *) dest + insn_offset_displacement(&insn);
24713+ pax_open_kernel();
24714 *(s32 *) disp = (s32) newdisp;
24715+ pax_close_kernel();
24716 }
24717 #endif
24718 return insn.length;
24719@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
24720 * nor set current_kprobe, because it doesn't use single
24721 * stepping.
24722 */
24723- regs->ip = (unsigned long)p->ainsn.insn;
24724+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
24725 preempt_enable_no_resched();
24726 return;
24727 }
24728@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
24729 regs->flags &= ~X86_EFLAGS_IF;
24730 /* single step inline if the instruction is an int3 */
24731 if (p->opcode == BREAKPOINT_INSTRUCTION)
24732- regs->ip = (unsigned long)p->addr;
24733+ regs->ip = ktla_ktva((unsigned long)p->addr);
24734 else
24735- regs->ip = (unsigned long)p->ainsn.insn;
24736+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
24737 }
24738
24739 /*
24740@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
24741 setup_singlestep(p, regs, kcb, 0);
24742 return 1;
24743 }
24744- } else if (*addr != BREAKPOINT_INSTRUCTION) {
24745+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
24746 /*
24747 * The breakpoint instruction was removed right
24748 * after we hit it. Another cpu has removed
24749@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
24750 " movq %rax, 152(%rsp)\n"
24751 RESTORE_REGS_STRING
24752 " popfq\n"
24753+#ifdef KERNEXEC_PLUGIN
24754+ " btsq $63,(%rsp)\n"
24755+#endif
24756 #else
24757 " pushf\n"
24758 SAVE_REGS_STRING
24759@@ -779,7 +789,7 @@ static void __kprobes
24760 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
24761 {
24762 unsigned long *tos = stack_addr(regs);
24763- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
24764+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
24765 unsigned long orig_ip = (unsigned long)p->addr;
24766 kprobe_opcode_t *insn = p->ainsn.insn;
24767
24768@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
24769 struct die_args *args = data;
24770 int ret = NOTIFY_DONE;
24771
24772- if (args->regs && user_mode_vm(args->regs))
24773+ if (args->regs && user_mode(args->regs))
24774 return ret;
24775
24776 switch (val) {
24777diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
24778index 898160b..758cde8 100644
24779--- a/arch/x86/kernel/kprobes/opt.c
24780+++ b/arch/x86/kernel/kprobes/opt.c
24781@@ -79,6 +79,7 @@ found:
24782 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
24783 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
24784 {
24785+ pax_open_kernel();
24786 #ifdef CONFIG_X86_64
24787 *addr++ = 0x48;
24788 *addr++ = 0xbf;
24789@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
24790 *addr++ = 0xb8;
24791 #endif
24792 *(unsigned long *)addr = val;
24793+ pax_close_kernel();
24794 }
24795
24796 asm (
24797@@ -335,7 +337,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
24798 * Verify if the address gap is in 2GB range, because this uses
24799 * a relative jump.
24800 */
24801- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
24802+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
24803 if (abs(rel) > 0x7fffffff)
24804 return -ERANGE;
24805
24806@@ -350,16 +352,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
24807 op->optinsn.size = ret;
24808
24809 /* Copy arch-dep-instance from template */
24810- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
24811+ pax_open_kernel();
24812+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
24813+ pax_close_kernel();
24814
24815 /* Set probe information */
24816 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
24817
24818 /* Set probe function call */
24819- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
24820+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
24821
24822 /* Set returning jmp instruction at the tail of out-of-line buffer */
24823- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
24824+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
24825 (u8 *)op->kp.addr + op->optinsn.size);
24826
24827 flush_icache_range((unsigned long) buf,
24828@@ -384,7 +388,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist)
24829 WARN_ON(kprobe_disabled(&op->kp));
24830
24831 /* Backup instructions which will be replaced by jump address */
24832- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
24833+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
24834 RELATIVE_ADDR_SIZE);
24835
24836 insn_buf[0] = RELATIVEJUMP_OPCODE;
24837@@ -433,7 +437,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
24838 /* This kprobe is really able to run optimized path. */
24839 op = container_of(p, struct optimized_kprobe, kp);
24840 /* Detour through copied instructions */
24841- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
24842+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
24843 if (!reenter)
24844 reset_current_kprobe();
24845 preempt_enable_no_resched();
24846diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
24847index ebc9873..1b9724b 100644
24848--- a/arch/x86/kernel/ldt.c
24849+++ b/arch/x86/kernel/ldt.c
24850@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
24851 if (reload) {
24852 #ifdef CONFIG_SMP
24853 preempt_disable();
24854- load_LDT(pc);
24855+ load_LDT_nolock(pc);
24856 if (!cpumask_equal(mm_cpumask(current->mm),
24857 cpumask_of(smp_processor_id())))
24858 smp_call_function(flush_ldt, current->mm, 1);
24859 preempt_enable();
24860 #else
24861- load_LDT(pc);
24862+ load_LDT_nolock(pc);
24863 #endif
24864 }
24865 if (oldsize) {
24866@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
24867 return err;
24868
24869 for (i = 0; i < old->size; i++)
24870- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
24871+ write_ldt_entry(new->ldt, i, old->ldt + i);
24872 return 0;
24873 }
24874
24875@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
24876 retval = copy_ldt(&mm->context, &old_mm->context);
24877 mutex_unlock(&old_mm->context.lock);
24878 }
24879+
24880+ if (tsk == current) {
24881+ mm->context.vdso = 0;
24882+
24883+#ifdef CONFIG_X86_32
24884+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24885+ mm->context.user_cs_base = 0UL;
24886+ mm->context.user_cs_limit = ~0UL;
24887+
24888+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
24889+ cpus_clear(mm->context.cpu_user_cs_mask);
24890+#endif
24891+
24892+#endif
24893+#endif
24894+
24895+ }
24896+
24897 return retval;
24898 }
24899
24900@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
24901 }
24902 }
24903
24904+#ifdef CONFIG_PAX_SEGMEXEC
24905+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
24906+ error = -EINVAL;
24907+ goto out_unlock;
24908+ }
24909+#endif
24910+
24911 fill_ldt(&ldt, &ldt_info);
24912 if (oldmode)
24913 ldt.avl = 0;
24914diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
24915index 5b19e4d..6476a76 100644
24916--- a/arch/x86/kernel/machine_kexec_32.c
24917+++ b/arch/x86/kernel/machine_kexec_32.c
24918@@ -26,7 +26,7 @@
24919 #include <asm/cacheflush.h>
24920 #include <asm/debugreg.h>
24921
24922-static void set_idt(void *newidt, __u16 limit)
24923+static void set_idt(struct desc_struct *newidt, __u16 limit)
24924 {
24925 struct desc_ptr curidt;
24926
24927@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
24928 }
24929
24930
24931-static void set_gdt(void *newgdt, __u16 limit)
24932+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
24933 {
24934 struct desc_ptr curgdt;
24935
24936@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
24937 }
24938
24939 control_page = page_address(image->control_code_page);
24940- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
24941+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
24942
24943 relocate_kernel_ptr = control_page;
24944 page_list[PA_CONTROL_PAGE] = __pa(control_page);
24945diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
24946index 15c9876..0a43909 100644
24947--- a/arch/x86/kernel/microcode_core.c
24948+++ b/arch/x86/kernel/microcode_core.c
24949@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
24950 return NOTIFY_OK;
24951 }
24952
24953-static struct notifier_block __refdata mc_cpu_notifier = {
24954+static struct notifier_block mc_cpu_notifier = {
24955 .notifier_call = mc_cpu_callback,
24956 };
24957
24958diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
24959index 5fb2ceb..3ae90bb 100644
24960--- a/arch/x86/kernel/microcode_intel.c
24961+++ b/arch/x86/kernel/microcode_intel.c
24962@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
24963
24964 static int get_ucode_user(void *to, const void *from, size_t n)
24965 {
24966- return copy_from_user(to, from, n);
24967+ return copy_from_user(to, (const void __force_user *)from, n);
24968 }
24969
24970 static enum ucode_state
24971 request_microcode_user(int cpu, const void __user *buf, size_t size)
24972 {
24973- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
24974+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
24975 }
24976
24977 static void microcode_fini_cpu(int cpu)
24978diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
24979index 18be189..4a9fe40 100644
24980--- a/arch/x86/kernel/module.c
24981+++ b/arch/x86/kernel/module.c
24982@@ -43,15 +43,60 @@ do { \
24983 } while (0)
24984 #endif
24985
24986-void *module_alloc(unsigned long size)
24987+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
24988 {
24989- if (PAGE_ALIGN(size) > MODULES_LEN)
24990+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
24991 return NULL;
24992 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
24993- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
24994+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
24995 NUMA_NO_NODE, __builtin_return_address(0));
24996 }
24997
24998+void *module_alloc(unsigned long size)
24999+{
25000+
25001+#ifdef CONFIG_PAX_KERNEXEC
25002+ return __module_alloc(size, PAGE_KERNEL);
25003+#else
25004+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25005+#endif
25006+
25007+}
25008+
25009+#ifdef CONFIG_PAX_KERNEXEC
25010+#ifdef CONFIG_X86_32
25011+void *module_alloc_exec(unsigned long size)
25012+{
25013+ struct vm_struct *area;
25014+
25015+ if (size == 0)
25016+ return NULL;
25017+
25018+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25019+ return area ? area->addr : NULL;
25020+}
25021+EXPORT_SYMBOL(module_alloc_exec);
25022+
25023+void module_free_exec(struct module *mod, void *module_region)
25024+{
25025+ vunmap(module_region);
25026+}
25027+EXPORT_SYMBOL(module_free_exec);
25028+#else
25029+void module_free_exec(struct module *mod, void *module_region)
25030+{
25031+ module_free(mod, module_region);
25032+}
25033+EXPORT_SYMBOL(module_free_exec);
25034+
25035+void *module_alloc_exec(unsigned long size)
25036+{
25037+ return __module_alloc(size, PAGE_KERNEL_RX);
25038+}
25039+EXPORT_SYMBOL(module_alloc_exec);
25040+#endif
25041+#endif
25042+
25043 #ifdef CONFIG_X86_32
25044 int apply_relocate(Elf32_Shdr *sechdrs,
25045 const char *strtab,
25046@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
25047 unsigned int i;
25048 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
25049 Elf32_Sym *sym;
25050- uint32_t *location;
25051+ uint32_t *plocation, location;
25052
25053 DEBUGP("Applying relocate section %u to %u\n",
25054 relsec, sechdrs[relsec].sh_info);
25055 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
25056 /* This is where to make the change */
25057- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
25058- + rel[i].r_offset;
25059+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
25060+ location = (uint32_t)plocation;
25061+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
25062+ plocation = ktla_ktva((void *)plocation);
25063 /* This is the symbol it is referring to. Note that all
25064 undefined symbols have been resolved. */
25065 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
25066@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
25067 switch (ELF32_R_TYPE(rel[i].r_info)) {
25068 case R_386_32:
25069 /* We add the value into the location given */
25070- *location += sym->st_value;
25071+ pax_open_kernel();
25072+ *plocation += sym->st_value;
25073+ pax_close_kernel();
25074 break;
25075 case R_386_PC32:
25076 /* Add the value, subtract its position */
25077- *location += sym->st_value - (uint32_t)location;
25078+ pax_open_kernel();
25079+ *plocation += sym->st_value - location;
25080+ pax_close_kernel();
25081 break;
25082 default:
25083 pr_err("%s: Unknown relocation: %u\n",
25084@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
25085 case R_X86_64_NONE:
25086 break;
25087 case R_X86_64_64:
25088+ pax_open_kernel();
25089 *(u64 *)loc = val;
25090+ pax_close_kernel();
25091 break;
25092 case R_X86_64_32:
25093+ pax_open_kernel();
25094 *(u32 *)loc = val;
25095+ pax_close_kernel();
25096 if (val != *(u32 *)loc)
25097 goto overflow;
25098 break;
25099 case R_X86_64_32S:
25100+ pax_open_kernel();
25101 *(s32 *)loc = val;
25102+ pax_close_kernel();
25103 if ((s64)val != *(s32 *)loc)
25104 goto overflow;
25105 break;
25106 case R_X86_64_PC32:
25107 val -= (u64)loc;
25108+ pax_open_kernel();
25109 *(u32 *)loc = val;
25110+ pax_close_kernel();
25111+
25112 #if 0
25113 if ((s64)val != *(s32 *)loc)
25114 goto overflow;
25115diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
25116index 05266b5..3432443 100644
25117--- a/arch/x86/kernel/msr.c
25118+++ b/arch/x86/kernel/msr.c
25119@@ -37,6 +37,7 @@
25120 #include <linux/notifier.h>
25121 #include <linux/uaccess.h>
25122 #include <linux/gfp.h>
25123+#include <linux/grsecurity.h>
25124
25125 #include <asm/processor.h>
25126 #include <asm/msr.h>
25127@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
25128 int err = 0;
25129 ssize_t bytes = 0;
25130
25131+#ifdef CONFIG_GRKERNSEC_KMEM
25132+ gr_handle_msr_write();
25133+ return -EPERM;
25134+#endif
25135+
25136 if (count % 8)
25137 return -EINVAL; /* Invalid chunk size */
25138
25139@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
25140 err = -EBADF;
25141 break;
25142 }
25143+#ifdef CONFIG_GRKERNSEC_KMEM
25144+ gr_handle_msr_write();
25145+ return -EPERM;
25146+#endif
25147 if (copy_from_user(&regs, uregs, sizeof regs)) {
25148 err = -EFAULT;
25149 break;
25150@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
25151 return notifier_from_errno(err);
25152 }
25153
25154-static struct notifier_block __refdata msr_class_cpu_notifier = {
25155+static struct notifier_block msr_class_cpu_notifier = {
25156 .notifier_call = msr_class_cpu_callback,
25157 };
25158
25159diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
25160index 6fcb49c..5b3f4ff 100644
25161--- a/arch/x86/kernel/nmi.c
25162+++ b/arch/x86/kernel/nmi.c
25163@@ -138,7 +138,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
25164 return handled;
25165 }
25166
25167-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
25168+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
25169 {
25170 struct nmi_desc *desc = nmi_to_desc(type);
25171 unsigned long flags;
25172@@ -162,9 +162,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
25173 * event confuses some handlers (kdump uses this flag)
25174 */
25175 if (action->flags & NMI_FLAG_FIRST)
25176- list_add_rcu(&action->list, &desc->head);
25177+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
25178 else
25179- list_add_tail_rcu(&action->list, &desc->head);
25180+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
25181
25182 spin_unlock_irqrestore(&desc->lock, flags);
25183 return 0;
25184@@ -187,7 +187,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
25185 if (!strcmp(n->name, name)) {
25186 WARN(in_nmi(),
25187 "Trying to free NMI (%s) from NMI context!\n", n->name);
25188- list_del_rcu(&n->list);
25189+ pax_list_del_rcu((struct list_head *)&n->list);
25190 break;
25191 }
25192 }
25193@@ -512,6 +512,17 @@ static inline void nmi_nesting_postprocess(void)
25194 dotraplinkage notrace __kprobes void
25195 do_nmi(struct pt_regs *regs, long error_code)
25196 {
25197+
25198+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25199+ if (!user_mode(regs)) {
25200+ unsigned long cs = regs->cs & 0xFFFF;
25201+ unsigned long ip = ktva_ktla(regs->ip);
25202+
25203+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
25204+ regs->ip = ip;
25205+ }
25206+#endif
25207+
25208 nmi_nesting_preprocess(regs);
25209
25210 nmi_enter();
25211diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
25212index 6d9582e..f746287 100644
25213--- a/arch/x86/kernel/nmi_selftest.c
25214+++ b/arch/x86/kernel/nmi_selftest.c
25215@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
25216 {
25217 /* trap all the unknown NMIs we may generate */
25218 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
25219- __initdata);
25220+ __initconst);
25221 }
25222
25223 static void __init cleanup_nmi_testsuite(void)
25224@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
25225 unsigned long timeout;
25226
25227 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
25228- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
25229+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
25230 nmi_fail = FAILURE;
25231 return;
25232 }
25233diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
25234index bbb6c73..24a58ef 100644
25235--- a/arch/x86/kernel/paravirt-spinlocks.c
25236+++ b/arch/x86/kernel/paravirt-spinlocks.c
25237@@ -8,7 +8,7 @@
25238
25239 #include <asm/paravirt.h>
25240
25241-struct pv_lock_ops pv_lock_ops = {
25242+struct pv_lock_ops pv_lock_ops __read_only = {
25243 #ifdef CONFIG_SMP
25244 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
25245 .unlock_kick = paravirt_nop,
25246diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
25247index 1b10af8..0b58cbc 100644
25248--- a/arch/x86/kernel/paravirt.c
25249+++ b/arch/x86/kernel/paravirt.c
25250@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
25251 {
25252 return x;
25253 }
25254+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25255+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
25256+#endif
25257
25258 void __init default_banner(void)
25259 {
25260@@ -142,15 +145,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
25261 if (opfunc == NULL)
25262 /* If there's no function, patch it with a ud2a (BUG) */
25263 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
25264- else if (opfunc == _paravirt_nop)
25265+ else if (opfunc == (void *)_paravirt_nop)
25266 /* If the operation is a nop, then nop the callsite */
25267 ret = paravirt_patch_nop();
25268
25269 /* identity functions just return their single argument */
25270- else if (opfunc == _paravirt_ident_32)
25271+ else if (opfunc == (void *)_paravirt_ident_32)
25272 ret = paravirt_patch_ident_32(insnbuf, len);
25273- else if (opfunc == _paravirt_ident_64)
25274+ else if (opfunc == (void *)_paravirt_ident_64)
25275 ret = paravirt_patch_ident_64(insnbuf, len);
25276+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25277+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
25278+ ret = paravirt_patch_ident_64(insnbuf, len);
25279+#endif
25280
25281 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
25282 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
25283@@ -175,7 +182,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
25284 if (insn_len > len || start == NULL)
25285 insn_len = len;
25286 else
25287- memcpy(insnbuf, start, insn_len);
25288+ memcpy(insnbuf, ktla_ktva(start), insn_len);
25289
25290 return insn_len;
25291 }
25292@@ -299,7 +306,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
25293 return this_cpu_read(paravirt_lazy_mode);
25294 }
25295
25296-struct pv_info pv_info = {
25297+struct pv_info pv_info __read_only = {
25298 .name = "bare hardware",
25299 .paravirt_enabled = 0,
25300 .kernel_rpl = 0,
25301@@ -310,16 +317,16 @@ struct pv_info pv_info = {
25302 #endif
25303 };
25304
25305-struct pv_init_ops pv_init_ops = {
25306+struct pv_init_ops pv_init_ops __read_only = {
25307 .patch = native_patch,
25308 };
25309
25310-struct pv_time_ops pv_time_ops = {
25311+struct pv_time_ops pv_time_ops __read_only = {
25312 .sched_clock = native_sched_clock,
25313 .steal_clock = native_steal_clock,
25314 };
25315
25316-__visible struct pv_irq_ops pv_irq_ops = {
25317+__visible struct pv_irq_ops pv_irq_ops __read_only = {
25318 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
25319 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
25320 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
25321@@ -331,7 +338,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
25322 #endif
25323 };
25324
25325-__visible struct pv_cpu_ops pv_cpu_ops = {
25326+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
25327 .cpuid = native_cpuid,
25328 .get_debugreg = native_get_debugreg,
25329 .set_debugreg = native_set_debugreg,
25330@@ -389,21 +396,26 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
25331 .end_context_switch = paravirt_nop,
25332 };
25333
25334-struct pv_apic_ops pv_apic_ops = {
25335+struct pv_apic_ops pv_apic_ops __read_only= {
25336 #ifdef CONFIG_X86_LOCAL_APIC
25337 .startup_ipi_hook = paravirt_nop,
25338 #endif
25339 };
25340
25341-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
25342+#ifdef CONFIG_X86_32
25343+#ifdef CONFIG_X86_PAE
25344+/* 64-bit pagetable entries */
25345+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
25346+#else
25347 /* 32-bit pagetable entries */
25348 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
25349+#endif
25350 #else
25351 /* 64-bit pagetable entries */
25352 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
25353 #endif
25354
25355-struct pv_mmu_ops pv_mmu_ops = {
25356+struct pv_mmu_ops pv_mmu_ops __read_only = {
25357
25358 .read_cr2 = native_read_cr2,
25359 .write_cr2 = native_write_cr2,
25360@@ -453,6 +465,7 @@ struct pv_mmu_ops pv_mmu_ops = {
25361 .make_pud = PTE_IDENT,
25362
25363 .set_pgd = native_set_pgd,
25364+ .set_pgd_batched = native_set_pgd_batched,
25365 #endif
25366 #endif /* PAGETABLE_LEVELS >= 3 */
25367
25368@@ -473,6 +486,12 @@ struct pv_mmu_ops pv_mmu_ops = {
25369 },
25370
25371 .set_fixmap = native_set_fixmap,
25372+
25373+#ifdef CONFIG_PAX_KERNEXEC
25374+ .pax_open_kernel = native_pax_open_kernel,
25375+ .pax_close_kernel = native_pax_close_kernel,
25376+#endif
25377+
25378 };
25379
25380 EXPORT_SYMBOL_GPL(pv_time_ops);
25381diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
25382index 299d493..2ccb0ee 100644
25383--- a/arch/x86/kernel/pci-calgary_64.c
25384+++ b/arch/x86/kernel/pci-calgary_64.c
25385@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
25386 tce_space = be64_to_cpu(readq(target));
25387 tce_space = tce_space & TAR_SW_BITS;
25388
25389- tce_space = tce_space & (~specified_table_size);
25390+ tce_space = tce_space & (~(unsigned long)specified_table_size);
25391 info->tce_space = (u64 *)__va(tce_space);
25392 }
25393 }
25394diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
25395index 35ccf75..7a15747 100644
25396--- a/arch/x86/kernel/pci-iommu_table.c
25397+++ b/arch/x86/kernel/pci-iommu_table.c
25398@@ -2,7 +2,7 @@
25399 #include <asm/iommu_table.h>
25400 #include <linux/string.h>
25401 #include <linux/kallsyms.h>
25402-
25403+#include <linux/sched.h>
25404
25405 #define DEBUG 1
25406
25407diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
25408index 6c483ba..d10ce2f 100644
25409--- a/arch/x86/kernel/pci-swiotlb.c
25410+++ b/arch/x86/kernel/pci-swiotlb.c
25411@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
25412 void *vaddr, dma_addr_t dma_addr,
25413 struct dma_attrs *attrs)
25414 {
25415- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
25416+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
25417 }
25418
25419 static struct dma_map_ops swiotlb_dma_ops = {
25420diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
25421index ca7f0d5..8996469 100644
25422--- a/arch/x86/kernel/preempt.S
25423+++ b/arch/x86/kernel/preempt.S
25424@@ -3,12 +3,14 @@
25425 #include <asm/dwarf2.h>
25426 #include <asm/asm.h>
25427 #include <asm/calling.h>
25428+#include <asm/alternative-asm.h>
25429
25430 ENTRY(___preempt_schedule)
25431 CFI_STARTPROC
25432 SAVE_ALL
25433 call preempt_schedule
25434 RESTORE_ALL
25435+ pax_force_retaddr
25436 ret
25437 CFI_ENDPROC
25438
25439@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
25440 SAVE_ALL
25441 call preempt_schedule_context
25442 RESTORE_ALL
25443+ pax_force_retaddr
25444 ret
25445 CFI_ENDPROC
25446
25447diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
25448index 3fb8d95..254dc51 100644
25449--- a/arch/x86/kernel/process.c
25450+++ b/arch/x86/kernel/process.c
25451@@ -36,7 +36,8 @@
25452 * section. Since TSS's are completely CPU-local, we want them
25453 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
25454 */
25455-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
25456+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
25457+EXPORT_SYMBOL(init_tss);
25458
25459 #ifdef CONFIG_X86_64
25460 static DEFINE_PER_CPU(unsigned char, is_idle);
25461@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
25462 task_xstate_cachep =
25463 kmem_cache_create("task_xstate", xstate_size,
25464 __alignof__(union thread_xstate),
25465- SLAB_PANIC | SLAB_NOTRACK, NULL);
25466+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
25467 }
25468
25469 /*
25470@@ -105,7 +106,7 @@ void exit_thread(void)
25471 unsigned long *bp = t->io_bitmap_ptr;
25472
25473 if (bp) {
25474- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
25475+ struct tss_struct *tss = init_tss + get_cpu();
25476
25477 t->io_bitmap_ptr = NULL;
25478 clear_thread_flag(TIF_IO_BITMAP);
25479@@ -125,6 +126,9 @@ void flush_thread(void)
25480 {
25481 struct task_struct *tsk = current;
25482
25483+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
25484+ loadsegment(gs, 0);
25485+#endif
25486 flush_ptrace_hw_breakpoint(tsk);
25487 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
25488 drop_init_fpu(tsk);
25489@@ -271,7 +275,7 @@ static void __exit_idle(void)
25490 void exit_idle(void)
25491 {
25492 /* idle loop has pid 0 */
25493- if (current->pid)
25494+ if (task_pid_nr(current))
25495 return;
25496 __exit_idle();
25497 }
25498@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
25499 return ret;
25500 }
25501 #endif
25502-void stop_this_cpu(void *dummy)
25503+__noreturn void stop_this_cpu(void *dummy)
25504 {
25505 local_irq_disable();
25506 /*
25507@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
25508 }
25509 early_param("idle", idle_setup);
25510
25511-unsigned long arch_align_stack(unsigned long sp)
25512+#ifdef CONFIG_PAX_RANDKSTACK
25513+void pax_randomize_kstack(struct pt_regs *regs)
25514 {
25515- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
25516- sp -= get_random_int() % 8192;
25517- return sp & ~0xf;
25518-}
25519+ struct thread_struct *thread = &current->thread;
25520+ unsigned long time;
25521
25522-unsigned long arch_randomize_brk(struct mm_struct *mm)
25523-{
25524- unsigned long range_end = mm->brk + 0x02000000;
25525- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
25526-}
25527+ if (!randomize_va_space)
25528+ return;
25529+
25530+ if (v8086_mode(regs))
25531+ return;
25532
25533+ rdtscl(time);
25534+
25535+ /* P4 seems to return a 0 LSB, ignore it */
25536+#ifdef CONFIG_MPENTIUM4
25537+ time &= 0x3EUL;
25538+ time <<= 2;
25539+#elif defined(CONFIG_X86_64)
25540+ time &= 0xFUL;
25541+ time <<= 4;
25542+#else
25543+ time &= 0x1FUL;
25544+ time <<= 3;
25545+#endif
25546+
25547+ thread->sp0 ^= time;
25548+ load_sp0(init_tss + smp_processor_id(), thread);
25549+
25550+#ifdef CONFIG_X86_64
25551+ this_cpu_write(kernel_stack, thread->sp0);
25552+#endif
25553+}
25554+#endif
25555diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
25556index 6f1236c..fd448d4 100644
25557--- a/arch/x86/kernel/process_32.c
25558+++ b/arch/x86/kernel/process_32.c
25559@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
25560 unsigned long thread_saved_pc(struct task_struct *tsk)
25561 {
25562 return ((unsigned long *)tsk->thread.sp)[3];
25563+//XXX return tsk->thread.eip;
25564 }
25565
25566 void __show_regs(struct pt_regs *regs, int all)
25567@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
25568 unsigned long sp;
25569 unsigned short ss, gs;
25570
25571- if (user_mode_vm(regs)) {
25572+ if (user_mode(regs)) {
25573 sp = regs->sp;
25574 ss = regs->ss & 0xffff;
25575- gs = get_user_gs(regs);
25576 } else {
25577 sp = kernel_stack_pointer(regs);
25578 savesegment(ss, ss);
25579- savesegment(gs, gs);
25580 }
25581+ gs = get_user_gs(regs);
25582
25583 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
25584 (u16)regs->cs, regs->ip, regs->flags,
25585- smp_processor_id());
25586+ raw_smp_processor_id());
25587 print_symbol("EIP is at %s\n", regs->ip);
25588
25589 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
25590@@ -133,20 +133,21 @@ void release_thread(struct task_struct *dead_task)
25591 int copy_thread(unsigned long clone_flags, unsigned long sp,
25592 unsigned long arg, struct task_struct *p)
25593 {
25594- struct pt_regs *childregs = task_pt_regs(p);
25595+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
25596 struct task_struct *tsk;
25597 int err;
25598
25599 p->thread.sp = (unsigned long) childregs;
25600 p->thread.sp0 = (unsigned long) (childregs+1);
25601+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
25602
25603 if (unlikely(p->flags & PF_KTHREAD)) {
25604 /* kernel thread */
25605 memset(childregs, 0, sizeof(struct pt_regs));
25606 p->thread.ip = (unsigned long) ret_from_kernel_thread;
25607- task_user_gs(p) = __KERNEL_STACK_CANARY;
25608- childregs->ds = __USER_DS;
25609- childregs->es = __USER_DS;
25610+ savesegment(gs, childregs->gs);
25611+ childregs->ds = __KERNEL_DS;
25612+ childregs->es = __KERNEL_DS;
25613 childregs->fs = __KERNEL_PERCPU;
25614 childregs->bx = sp; /* function */
25615 childregs->bp = arg;
25616@@ -253,7 +254,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25617 struct thread_struct *prev = &prev_p->thread,
25618 *next = &next_p->thread;
25619 int cpu = smp_processor_id();
25620- struct tss_struct *tss = &per_cpu(init_tss, cpu);
25621+ struct tss_struct *tss = init_tss + cpu;
25622 fpu_switch_t fpu;
25623
25624 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
25625@@ -277,6 +278,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25626 */
25627 lazy_save_gs(prev->gs);
25628
25629+#ifdef CONFIG_PAX_MEMORY_UDEREF
25630+ __set_fs(task_thread_info(next_p)->addr_limit);
25631+#endif
25632+
25633 /*
25634 * Load the per-thread Thread-Local Storage descriptor.
25635 */
25636@@ -315,6 +320,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25637 */
25638 arch_end_context_switch(next_p);
25639
25640+ this_cpu_write(current_task, next_p);
25641+ this_cpu_write(current_tinfo, &next_p->tinfo);
25642+
25643 /*
25644 * Restore %gs if needed (which is common)
25645 */
25646@@ -323,8 +331,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25647
25648 switch_fpu_finish(next_p, fpu);
25649
25650- this_cpu_write(current_task, next_p);
25651-
25652 return prev_p;
25653 }
25654
25655@@ -354,4 +360,3 @@ unsigned long get_wchan(struct task_struct *p)
25656 } while (count++ < 16);
25657 return 0;
25658 }
25659-
25660diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
25661index 9c0280f..5bbb1c0 100644
25662--- a/arch/x86/kernel/process_64.c
25663+++ b/arch/x86/kernel/process_64.c
25664@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
25665 struct pt_regs *childregs;
25666 struct task_struct *me = current;
25667
25668- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
25669+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
25670 childregs = task_pt_regs(p);
25671 p->thread.sp = (unsigned long) childregs;
25672 p->thread.usersp = me->thread.usersp;
25673+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
25674 set_tsk_thread_flag(p, TIF_FORK);
25675 p->thread.fpu_counter = 0;
25676 p->thread.io_bitmap_ptr = NULL;
25677@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
25678 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
25679 savesegment(es, p->thread.es);
25680 savesegment(ds, p->thread.ds);
25681+ savesegment(ss, p->thread.ss);
25682+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
25683 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
25684
25685 if (unlikely(p->flags & PF_KTHREAD)) {
25686@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25687 struct thread_struct *prev = &prev_p->thread;
25688 struct thread_struct *next = &next_p->thread;
25689 int cpu = smp_processor_id();
25690- struct tss_struct *tss = &per_cpu(init_tss, cpu);
25691+ struct tss_struct *tss = init_tss + cpu;
25692 unsigned fsindex, gsindex;
25693 fpu_switch_t fpu;
25694
25695@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25696 if (unlikely(next->ds | prev->ds))
25697 loadsegment(ds, next->ds);
25698
25699+ savesegment(ss, prev->ss);
25700+ if (unlikely(next->ss != prev->ss))
25701+ loadsegment(ss, next->ss);
25702
25703 /* We must save %fs and %gs before load_TLS() because
25704 * %fs and %gs may be cleared by load_TLS().
25705@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25706 prev->usersp = this_cpu_read(old_rsp);
25707 this_cpu_write(old_rsp, next->usersp);
25708 this_cpu_write(current_task, next_p);
25709+ this_cpu_write(current_tinfo, &next_p->tinfo);
25710
25711 /*
25712 * If it were not for PREEMPT_ACTIVE we could guarantee that the
25713@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25714 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
25715 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
25716
25717- this_cpu_write(kernel_stack,
25718- (unsigned long)task_stack_page(next_p) +
25719- THREAD_SIZE - KERNEL_STACK_OFFSET);
25720+ this_cpu_write(kernel_stack, next->sp0);
25721
25722 /*
25723 * Now maybe reload the debug registers and handle I/O bitmaps
25724@@ -442,12 +447,11 @@ unsigned long get_wchan(struct task_struct *p)
25725 if (!p || p == current || p->state == TASK_RUNNING)
25726 return 0;
25727 stack = (unsigned long)task_stack_page(p);
25728- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
25729+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
25730 return 0;
25731 fp = *(u64 *)(p->thread.sp);
25732 do {
25733- if (fp < (unsigned long)stack ||
25734- fp >= (unsigned long)stack+THREAD_SIZE)
25735+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
25736 return 0;
25737 ip = *(u64 *)(fp+8);
25738 if (!in_sched_functions(ip))
25739diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
25740index 7461f50..1334029 100644
25741--- a/arch/x86/kernel/ptrace.c
25742+++ b/arch/x86/kernel/ptrace.c
25743@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
25744 {
25745 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
25746 unsigned long sp = (unsigned long)&regs->sp;
25747- struct thread_info *tinfo;
25748
25749- if (context == (sp & ~(THREAD_SIZE - 1)))
25750+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
25751 return sp;
25752
25753- tinfo = (struct thread_info *)context;
25754- if (tinfo->previous_esp)
25755- return tinfo->previous_esp;
25756+ sp = *(unsigned long *)context;
25757+ if (sp)
25758+ return sp;
25759
25760 return (unsigned long)regs;
25761 }
25762@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
25763 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
25764 {
25765 int i;
25766- int dr7 = 0;
25767+ unsigned long dr7 = 0;
25768 struct arch_hw_breakpoint *info;
25769
25770 for (i = 0; i < HBP_NUM; i++) {
25771@@ -822,7 +821,7 @@ long arch_ptrace(struct task_struct *child, long request,
25772 unsigned long addr, unsigned long data)
25773 {
25774 int ret;
25775- unsigned long __user *datap = (unsigned long __user *)data;
25776+ unsigned long __user *datap = (__force unsigned long __user *)data;
25777
25778 switch (request) {
25779 /* read the word at location addr in the USER area. */
25780@@ -907,14 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
25781 if ((int) addr < 0)
25782 return -EIO;
25783 ret = do_get_thread_area(child, addr,
25784- (struct user_desc __user *)data);
25785+ (__force struct user_desc __user *) data);
25786 break;
25787
25788 case PTRACE_SET_THREAD_AREA:
25789 if ((int) addr < 0)
25790 return -EIO;
25791 ret = do_set_thread_area(child, addr,
25792- (struct user_desc __user *)data, 0);
25793+ (__force struct user_desc __user *) data, 0);
25794 break;
25795 #endif
25796
25797@@ -1292,7 +1291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
25798
25799 #ifdef CONFIG_X86_64
25800
25801-static struct user_regset x86_64_regsets[] __read_mostly = {
25802+static user_regset_no_const x86_64_regsets[] __read_only = {
25803 [REGSET_GENERAL] = {
25804 .core_note_type = NT_PRSTATUS,
25805 .n = sizeof(struct user_regs_struct) / sizeof(long),
25806@@ -1333,7 +1332,7 @@ static const struct user_regset_view user_x86_64_view = {
25807 #endif /* CONFIG_X86_64 */
25808
25809 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
25810-static struct user_regset x86_32_regsets[] __read_mostly = {
25811+static user_regset_no_const x86_32_regsets[] __read_only = {
25812 [REGSET_GENERAL] = {
25813 .core_note_type = NT_PRSTATUS,
25814 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
25815@@ -1386,7 +1385,7 @@ static const struct user_regset_view user_x86_32_view = {
25816 */
25817 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
25818
25819-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
25820+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
25821 {
25822 #ifdef CONFIG_X86_64
25823 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
25824@@ -1421,7 +1420,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
25825 memset(info, 0, sizeof(*info));
25826 info->si_signo = SIGTRAP;
25827 info->si_code = si_code;
25828- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
25829+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
25830 }
25831
25832 void user_single_step_siginfo(struct task_struct *tsk,
25833@@ -1450,6 +1449,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
25834 # define IS_IA32 0
25835 #endif
25836
25837+#ifdef CONFIG_GRKERNSEC_SETXID
25838+extern void gr_delayed_cred_worker(void);
25839+#endif
25840+
25841 /*
25842 * We must return the syscall number to actually look up in the table.
25843 * This can be -1L to skip running any syscall at all.
25844@@ -1460,6 +1463,11 @@ long syscall_trace_enter(struct pt_regs *regs)
25845
25846 user_exit();
25847
25848+#ifdef CONFIG_GRKERNSEC_SETXID
25849+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
25850+ gr_delayed_cred_worker();
25851+#endif
25852+
25853 /*
25854 * If we stepped into a sysenter/syscall insn, it trapped in
25855 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
25856@@ -1515,6 +1523,11 @@ void syscall_trace_leave(struct pt_regs *regs)
25857 */
25858 user_exit();
25859
25860+#ifdef CONFIG_GRKERNSEC_SETXID
25861+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
25862+ gr_delayed_cred_worker();
25863+#endif
25864+
25865 audit_syscall_exit(regs);
25866
25867 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
25868diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
25869index 2f355d2..e75ed0a 100644
25870--- a/arch/x86/kernel/pvclock.c
25871+++ b/arch/x86/kernel/pvclock.c
25872@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
25873 reset_hung_task_detector();
25874 }
25875
25876-static atomic64_t last_value = ATOMIC64_INIT(0);
25877+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
25878
25879 void pvclock_resume(void)
25880 {
25881- atomic64_set(&last_value, 0);
25882+ atomic64_set_unchecked(&last_value, 0);
25883 }
25884
25885 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
25886@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
25887 * updating at the same time, and one of them could be slightly behind,
25888 * making the assumption that last_value always go forward fail to hold.
25889 */
25890- last = atomic64_read(&last_value);
25891+ last = atomic64_read_unchecked(&last_value);
25892 do {
25893 if (ret < last)
25894 return last;
25895- last = atomic64_cmpxchg(&last_value, last, ret);
25896+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
25897 } while (unlikely(last != ret));
25898
25899 return ret;
25900diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
25901index c752cb4..866c432 100644
25902--- a/arch/x86/kernel/reboot.c
25903+++ b/arch/x86/kernel/reboot.c
25904@@ -68,6 +68,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
25905
25906 void __noreturn machine_real_restart(unsigned int type)
25907 {
25908+
25909+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
25910+ struct desc_struct *gdt;
25911+#endif
25912+
25913 local_irq_disable();
25914
25915 /*
25916@@ -95,7 +100,29 @@ void __noreturn machine_real_restart(unsigned int type)
25917
25918 /* Jump to the identity-mapped low memory code */
25919 #ifdef CONFIG_X86_32
25920- asm volatile("jmpl *%0" : :
25921+
25922+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
25923+ gdt = get_cpu_gdt_table(smp_processor_id());
25924+ pax_open_kernel();
25925+#ifdef CONFIG_PAX_MEMORY_UDEREF
25926+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
25927+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
25928+ loadsegment(ds, __KERNEL_DS);
25929+ loadsegment(es, __KERNEL_DS);
25930+ loadsegment(ss, __KERNEL_DS);
25931+#endif
25932+#ifdef CONFIG_PAX_KERNEXEC
25933+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
25934+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
25935+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
25936+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
25937+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
25938+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
25939+#endif
25940+ pax_close_kernel();
25941+#endif
25942+
25943+ asm volatile("ljmpl *%0" : :
25944 "rm" (real_mode_header->machine_real_restart_asm),
25945 "a" (type));
25946 #else
25947@@ -470,7 +497,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
25948 * try to force a triple fault and then cycle between hitting the keyboard
25949 * controller and doing that
25950 */
25951-static void native_machine_emergency_restart(void)
25952+static void __noreturn native_machine_emergency_restart(void)
25953 {
25954 int i;
25955 int attempt = 0;
25956@@ -593,13 +620,13 @@ void native_machine_shutdown(void)
25957 #endif
25958 }
25959
25960-static void __machine_emergency_restart(int emergency)
25961+static void __noreturn __machine_emergency_restart(int emergency)
25962 {
25963 reboot_emergency = emergency;
25964 machine_ops.emergency_restart();
25965 }
25966
25967-static void native_machine_restart(char *__unused)
25968+static void __noreturn native_machine_restart(char *__unused)
25969 {
25970 pr_notice("machine restart\n");
25971
25972@@ -608,7 +635,7 @@ static void native_machine_restart(char *__unused)
25973 __machine_emergency_restart(0);
25974 }
25975
25976-static void native_machine_halt(void)
25977+static void __noreturn native_machine_halt(void)
25978 {
25979 /* Stop other cpus and apics */
25980 machine_shutdown();
25981@@ -618,7 +645,7 @@ static void native_machine_halt(void)
25982 stop_this_cpu(NULL);
25983 }
25984
25985-static void native_machine_power_off(void)
25986+static void __noreturn native_machine_power_off(void)
25987 {
25988 if (pm_power_off) {
25989 if (!reboot_force)
25990@@ -627,9 +654,10 @@ static void native_machine_power_off(void)
25991 }
25992 /* A fallback in case there is no PM info available */
25993 tboot_shutdown(TB_SHUTDOWN_HALT);
25994+ unreachable();
25995 }
25996
25997-struct machine_ops machine_ops = {
25998+struct machine_ops machine_ops __read_only = {
25999 .power_off = native_machine_power_off,
26000 .shutdown = native_machine_shutdown,
26001 .emergency_restart = native_machine_emergency_restart,
26002diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
26003index c8e41e9..64049ef 100644
26004--- a/arch/x86/kernel/reboot_fixups_32.c
26005+++ b/arch/x86/kernel/reboot_fixups_32.c
26006@@ -57,7 +57,7 @@ struct device_fixup {
26007 unsigned int vendor;
26008 unsigned int device;
26009 void (*reboot_fixup)(struct pci_dev *);
26010-};
26011+} __do_const;
26012
26013 /*
26014 * PCI ids solely used for fixups_table go here
26015diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
26016index 3fd2c69..16ef367 100644
26017--- a/arch/x86/kernel/relocate_kernel_64.S
26018+++ b/arch/x86/kernel/relocate_kernel_64.S
26019@@ -11,6 +11,7 @@
26020 #include <asm/kexec.h>
26021 #include <asm/processor-flags.h>
26022 #include <asm/pgtable_types.h>
26023+#include <asm/alternative-asm.h>
26024
26025 /*
26026 * Must be relocatable PIC code callable as a C function
26027@@ -96,8 +97,7 @@ relocate_kernel:
26028
26029 /* jump to identity mapped page */
26030 addq $(identity_mapped - relocate_kernel), %r8
26031- pushq %r8
26032- ret
26033+ jmp *%r8
26034
26035 identity_mapped:
26036 /* set return address to 0 if not preserving context */
26037@@ -167,6 +167,7 @@ identity_mapped:
26038 xorl %r14d, %r14d
26039 xorl %r15d, %r15d
26040
26041+ pax_force_retaddr 0, 1
26042 ret
26043
26044 1:
26045diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
26046index cb233bc..23b4879 100644
26047--- a/arch/x86/kernel/setup.c
26048+++ b/arch/x86/kernel/setup.c
26049@@ -110,6 +110,7 @@
26050 #include <asm/mce.h>
26051 #include <asm/alternative.h>
26052 #include <asm/prom.h>
26053+#include <asm/boot.h>
26054
26055 /*
26056 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
26057@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
26058 #endif
26059
26060
26061-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
26062-__visible unsigned long mmu_cr4_features;
26063+#ifdef CONFIG_X86_64
26064+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
26065+#elif defined(CONFIG_X86_PAE)
26066+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
26067 #else
26068-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
26069+__visible unsigned long mmu_cr4_features __read_only;
26070 #endif
26071
26072+void set_in_cr4(unsigned long mask)
26073+{
26074+ unsigned long cr4 = read_cr4();
26075+
26076+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
26077+ return;
26078+
26079+ pax_open_kernel();
26080+ mmu_cr4_features |= mask;
26081+ pax_close_kernel();
26082+
26083+ if (trampoline_cr4_features)
26084+ *trampoline_cr4_features = mmu_cr4_features;
26085+ cr4 |= mask;
26086+ write_cr4(cr4);
26087+}
26088+EXPORT_SYMBOL(set_in_cr4);
26089+
26090+void clear_in_cr4(unsigned long mask)
26091+{
26092+ unsigned long cr4 = read_cr4();
26093+
26094+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
26095+ return;
26096+
26097+ pax_open_kernel();
26098+ mmu_cr4_features &= ~mask;
26099+ pax_close_kernel();
26100+
26101+ if (trampoline_cr4_features)
26102+ *trampoline_cr4_features = mmu_cr4_features;
26103+ cr4 &= ~mask;
26104+ write_cr4(cr4);
26105+}
26106+EXPORT_SYMBOL(clear_in_cr4);
26107+
26108 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
26109 int bootloader_type, bootloader_version;
26110
26111@@ -768,7 +807,7 @@ static void __init trim_bios_range(void)
26112 * area (640->1Mb) as ram even though it is not.
26113 * take them out.
26114 */
26115- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
26116+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
26117
26118 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
26119 }
26120@@ -776,7 +815,7 @@ static void __init trim_bios_range(void)
26121 /* called before trim_bios_range() to spare extra sanitize */
26122 static void __init e820_add_kernel_range(void)
26123 {
26124- u64 start = __pa_symbol(_text);
26125+ u64 start = __pa_symbol(ktla_ktva(_text));
26126 u64 size = __pa_symbol(_end) - start;
26127
26128 /*
26129@@ -838,8 +877,12 @@ static void __init trim_low_memory_range(void)
26130
26131 void __init setup_arch(char **cmdline_p)
26132 {
26133+#ifdef CONFIG_X86_32
26134+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
26135+#else
26136 memblock_reserve(__pa_symbol(_text),
26137 (unsigned long)__bss_stop - (unsigned long)_text);
26138+#endif
26139
26140 early_reserve_initrd();
26141
26142@@ -931,14 +974,14 @@ void __init setup_arch(char **cmdline_p)
26143
26144 if (!boot_params.hdr.root_flags)
26145 root_mountflags &= ~MS_RDONLY;
26146- init_mm.start_code = (unsigned long) _text;
26147- init_mm.end_code = (unsigned long) _etext;
26148+ init_mm.start_code = ktla_ktva((unsigned long) _text);
26149+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
26150 init_mm.end_data = (unsigned long) _edata;
26151 init_mm.brk = _brk_end;
26152
26153- code_resource.start = __pa_symbol(_text);
26154- code_resource.end = __pa_symbol(_etext)-1;
26155- data_resource.start = __pa_symbol(_etext);
26156+ code_resource.start = __pa_symbol(ktla_ktva(_text));
26157+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
26158+ data_resource.start = __pa_symbol(_sdata);
26159 data_resource.end = __pa_symbol(_edata)-1;
26160 bss_resource.start = __pa_symbol(__bss_start);
26161 bss_resource.end = __pa_symbol(__bss_stop)-1;
26162diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
26163index 5cdff03..80fa283 100644
26164--- a/arch/x86/kernel/setup_percpu.c
26165+++ b/arch/x86/kernel/setup_percpu.c
26166@@ -21,19 +21,17 @@
26167 #include <asm/cpu.h>
26168 #include <asm/stackprotector.h>
26169
26170-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
26171+#ifdef CONFIG_SMP
26172+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
26173 EXPORT_PER_CPU_SYMBOL(cpu_number);
26174+#endif
26175
26176-#ifdef CONFIG_X86_64
26177 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
26178-#else
26179-#define BOOT_PERCPU_OFFSET 0
26180-#endif
26181
26182 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
26183 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
26184
26185-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
26186+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
26187 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
26188 };
26189 EXPORT_SYMBOL(__per_cpu_offset);
26190@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
26191 {
26192 #ifdef CONFIG_NEED_MULTIPLE_NODES
26193 pg_data_t *last = NULL;
26194- unsigned int cpu;
26195+ int cpu;
26196
26197 for_each_possible_cpu(cpu) {
26198 int node = early_cpu_to_node(cpu);
26199@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
26200 {
26201 #ifdef CONFIG_X86_32
26202 struct desc_struct gdt;
26203+ unsigned long base = per_cpu_offset(cpu);
26204
26205- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
26206- 0x2 | DESCTYPE_S, 0x8);
26207- gdt.s = 1;
26208+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
26209+ 0x83 | DESCTYPE_S, 0xC);
26210 write_gdt_entry(get_cpu_gdt_table(cpu),
26211 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
26212 #endif
26213@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
26214 /* alrighty, percpu areas up and running */
26215 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
26216 for_each_possible_cpu(cpu) {
26217+#ifdef CONFIG_CC_STACKPROTECTOR
26218+#ifdef CONFIG_X86_32
26219+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
26220+#endif
26221+#endif
26222 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
26223 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
26224 per_cpu(cpu_number, cpu) = cpu;
26225@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
26226 */
26227 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
26228 #endif
26229+#ifdef CONFIG_CC_STACKPROTECTOR
26230+#ifdef CONFIG_X86_32
26231+ if (!cpu)
26232+ per_cpu(stack_canary.canary, cpu) = canary;
26233+#endif
26234+#endif
26235 /*
26236 * Up to this point, the boot CPU has been using .init.data
26237 * area. Reload any changed state for the boot CPU.
26238diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
26239index 9e5de68..16c53cb 100644
26240--- a/arch/x86/kernel/signal.c
26241+++ b/arch/x86/kernel/signal.c
26242@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
26243 * Align the stack pointer according to the i386 ABI,
26244 * i.e. so that on function entry ((sp + 4) & 15) == 0.
26245 */
26246- sp = ((sp + 4) & -16ul) - 4;
26247+ sp = ((sp - 12) & -16ul) - 4;
26248 #else /* !CONFIG_X86_32 */
26249 sp = round_down(sp, 16) - 8;
26250 #endif
26251@@ -298,9 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
26252 }
26253
26254 if (current->mm->context.vdso)
26255- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
26256+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
26257 else
26258- restorer = &frame->retcode;
26259+ restorer = (void __user *)&frame->retcode;
26260 if (ksig->ka.sa.sa_flags & SA_RESTORER)
26261 restorer = ksig->ka.sa.sa_restorer;
26262
26263@@ -314,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
26264 * reasons and because gdb uses it as a signature to notice
26265 * signal handler stack frames.
26266 */
26267- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
26268+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
26269
26270 if (err)
26271 return -EFAULT;
26272@@ -361,7 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
26273 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
26274
26275 /* Set up to return from userspace. */
26276- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
26277+ if (current->mm->context.vdso)
26278+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
26279+ else
26280+ restorer = (void __user *)&frame->retcode;
26281 if (ksig->ka.sa.sa_flags & SA_RESTORER)
26282 restorer = ksig->ka.sa.sa_restorer;
26283 put_user_ex(restorer, &frame->pretcode);
26284@@ -373,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
26285 * reasons and because gdb uses it as a signature to notice
26286 * signal handler stack frames.
26287 */
26288- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
26289+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
26290 } put_user_catch(err);
26291
26292 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
26293@@ -609,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
26294 {
26295 int usig = signr_convert(ksig->sig);
26296 sigset_t *set = sigmask_to_save();
26297- compat_sigset_t *cset = (compat_sigset_t *) set;
26298+ sigset_t sigcopy;
26299+ compat_sigset_t *cset;
26300+
26301+ sigcopy = *set;
26302+
26303+ cset = (compat_sigset_t *) &sigcopy;
26304
26305 /* Set up the stack frame */
26306 if (is_ia32_frame()) {
26307@@ -620,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
26308 } else if (is_x32_frame()) {
26309 return x32_setup_rt_frame(ksig, cset, regs);
26310 } else {
26311- return __setup_rt_frame(ksig->sig, ksig, set, regs);
26312+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
26313 }
26314 }
26315
26316diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
26317index 7c3a5a6..f0a8961 100644
26318--- a/arch/x86/kernel/smp.c
26319+++ b/arch/x86/kernel/smp.c
26320@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
26321
26322 __setup("nonmi_ipi", nonmi_ipi_setup);
26323
26324-struct smp_ops smp_ops = {
26325+struct smp_ops smp_ops __read_only = {
26326 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
26327 .smp_prepare_cpus = native_smp_prepare_cpus,
26328 .smp_cpus_done = native_smp_cpus_done,
26329diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
26330index 85dc05a..1241266 100644
26331--- a/arch/x86/kernel/smpboot.c
26332+++ b/arch/x86/kernel/smpboot.c
26333@@ -229,14 +229,18 @@ static void notrace start_secondary(void *unused)
26334
26335 enable_start_cpu0 = 0;
26336
26337-#ifdef CONFIG_X86_32
26338- /* switch away from the initial page table */
26339- load_cr3(swapper_pg_dir);
26340- __flush_tlb_all();
26341-#endif
26342-
26343 /* otherwise gcc will move up smp_processor_id before the cpu_init */
26344 barrier();
26345+
26346+ /* switch away from the initial page table */
26347+#ifdef CONFIG_PAX_PER_CPU_PGD
26348+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
26349+ __flush_tlb_all();
26350+#elif defined(CONFIG_X86_32)
26351+ load_cr3(swapper_pg_dir);
26352+ __flush_tlb_all();
26353+#endif
26354+
26355 /*
26356 * Check TSC synchronization with the BP:
26357 */
26358@@ -751,6 +755,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
26359 idle->thread.sp = (unsigned long) (((struct pt_regs *)
26360 (THREAD_SIZE + task_stack_page(idle))) - 1);
26361 per_cpu(current_task, cpu) = idle;
26362+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
26363
26364 #ifdef CONFIG_X86_32
26365 /* Stack for startup_32 can be just as for start_secondary onwards */
26366@@ -758,11 +763,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
26367 #else
26368 clear_tsk_thread_flag(idle, TIF_FORK);
26369 initial_gs = per_cpu_offset(cpu);
26370- per_cpu(kernel_stack, cpu) =
26371- (unsigned long)task_stack_page(idle) -
26372- KERNEL_STACK_OFFSET + THREAD_SIZE;
26373+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26374 #endif
26375+
26376+ pax_open_kernel();
26377 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
26378+ pax_close_kernel();
26379+
26380 initial_code = (unsigned long)start_secondary;
26381 stack_start = idle->thread.sp;
26382
26383@@ -911,6 +918,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
26384 /* the FPU context is blank, nobody can own it */
26385 __cpu_disable_lazy_restore(cpu);
26386
26387+#ifdef CONFIG_PAX_PER_CPU_PGD
26388+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
26389+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26390+ KERNEL_PGD_PTRS);
26391+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
26392+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26393+ KERNEL_PGD_PTRS);
26394+#endif
26395+
26396 err = do_boot_cpu(apicid, cpu, tidle);
26397 if (err) {
26398 pr_debug("do_boot_cpu failed %d\n", err);
26399diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
26400index 9b4d51d..5d28b58 100644
26401--- a/arch/x86/kernel/step.c
26402+++ b/arch/x86/kernel/step.c
26403@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
26404 struct desc_struct *desc;
26405 unsigned long base;
26406
26407- seg &= ~7UL;
26408+ seg >>= 3;
26409
26410 mutex_lock(&child->mm->context.lock);
26411- if (unlikely((seg >> 3) >= child->mm->context.size))
26412+ if (unlikely(seg >= child->mm->context.size))
26413 addr = -1L; /* bogus selector, access would fault */
26414 else {
26415 desc = child->mm->context.ldt + seg;
26416@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
26417 addr += base;
26418 }
26419 mutex_unlock(&child->mm->context.lock);
26420- }
26421+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
26422+ addr = ktla_ktva(addr);
26423
26424 return addr;
26425 }
26426@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
26427 unsigned char opcode[15];
26428 unsigned long addr = convert_ip_to_linear(child, regs);
26429
26430+ if (addr == -EINVAL)
26431+ return 0;
26432+
26433 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
26434 for (i = 0; i < copied; i++) {
26435 switch (opcode[i]) {
26436diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
26437new file mode 100644
26438index 0000000..5877189
26439--- /dev/null
26440+++ b/arch/x86/kernel/sys_i386_32.c
26441@@ -0,0 +1,189 @@
26442+/*
26443+ * This file contains various random system calls that
26444+ * have a non-standard calling sequence on the Linux/i386
26445+ * platform.
26446+ */
26447+
26448+#include <linux/errno.h>
26449+#include <linux/sched.h>
26450+#include <linux/mm.h>
26451+#include <linux/fs.h>
26452+#include <linux/smp.h>
26453+#include <linux/sem.h>
26454+#include <linux/msg.h>
26455+#include <linux/shm.h>
26456+#include <linux/stat.h>
26457+#include <linux/syscalls.h>
26458+#include <linux/mman.h>
26459+#include <linux/file.h>
26460+#include <linux/utsname.h>
26461+#include <linux/ipc.h>
26462+#include <linux/elf.h>
26463+
26464+#include <linux/uaccess.h>
26465+#include <linux/unistd.h>
26466+
26467+#include <asm/syscalls.h>
26468+
26469+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
26470+{
26471+ unsigned long pax_task_size = TASK_SIZE;
26472+
26473+#ifdef CONFIG_PAX_SEGMEXEC
26474+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
26475+ pax_task_size = SEGMEXEC_TASK_SIZE;
26476+#endif
26477+
26478+ if (flags & MAP_FIXED)
26479+ if (len > pax_task_size || addr > pax_task_size - len)
26480+ return -EINVAL;
26481+
26482+ return 0;
26483+}
26484+
26485+/*
26486+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
26487+ */
26488+static unsigned long get_align_mask(void)
26489+{
26490+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
26491+ return 0;
26492+
26493+ if (!(current->flags & PF_RANDOMIZE))
26494+ return 0;
26495+
26496+ return va_align.mask;
26497+}
26498+
26499+unsigned long
26500+arch_get_unmapped_area(struct file *filp, unsigned long addr,
26501+ unsigned long len, unsigned long pgoff, unsigned long flags)
26502+{
26503+ struct mm_struct *mm = current->mm;
26504+ struct vm_area_struct *vma;
26505+ unsigned long pax_task_size = TASK_SIZE;
26506+ struct vm_unmapped_area_info info;
26507+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26508+
26509+#ifdef CONFIG_PAX_SEGMEXEC
26510+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26511+ pax_task_size = SEGMEXEC_TASK_SIZE;
26512+#endif
26513+
26514+ pax_task_size -= PAGE_SIZE;
26515+
26516+ if (len > pax_task_size)
26517+ return -ENOMEM;
26518+
26519+ if (flags & MAP_FIXED)
26520+ return addr;
26521+
26522+#ifdef CONFIG_PAX_RANDMMAP
26523+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26524+#endif
26525+
26526+ if (addr) {
26527+ addr = PAGE_ALIGN(addr);
26528+ if (pax_task_size - len >= addr) {
26529+ vma = find_vma(mm, addr);
26530+ if (check_heap_stack_gap(vma, addr, len, offset))
26531+ return addr;
26532+ }
26533+ }
26534+
26535+ info.flags = 0;
26536+ info.length = len;
26537+ info.align_mask = filp ? get_align_mask() : 0;
26538+ info.align_offset = pgoff << PAGE_SHIFT;
26539+ info.threadstack_offset = offset;
26540+
26541+#ifdef CONFIG_PAX_PAGEEXEC
26542+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
26543+ info.low_limit = 0x00110000UL;
26544+ info.high_limit = mm->start_code;
26545+
26546+#ifdef CONFIG_PAX_RANDMMAP
26547+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26548+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
26549+#endif
26550+
26551+ if (info.low_limit < info.high_limit) {
26552+ addr = vm_unmapped_area(&info);
26553+ if (!IS_ERR_VALUE(addr))
26554+ return addr;
26555+ }
26556+ } else
26557+#endif
26558+
26559+ info.low_limit = mm->mmap_base;
26560+ info.high_limit = pax_task_size;
26561+
26562+ return vm_unmapped_area(&info);
26563+}
26564+
26565+unsigned long
26566+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26567+ const unsigned long len, const unsigned long pgoff,
26568+ const unsigned long flags)
26569+{
26570+ struct vm_area_struct *vma;
26571+ struct mm_struct *mm = current->mm;
26572+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
26573+ struct vm_unmapped_area_info info;
26574+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26575+
26576+#ifdef CONFIG_PAX_SEGMEXEC
26577+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26578+ pax_task_size = SEGMEXEC_TASK_SIZE;
26579+#endif
26580+
26581+ pax_task_size -= PAGE_SIZE;
26582+
26583+ /* requested length too big for entire address space */
26584+ if (len > pax_task_size)
26585+ return -ENOMEM;
26586+
26587+ if (flags & MAP_FIXED)
26588+ return addr;
26589+
26590+#ifdef CONFIG_PAX_PAGEEXEC
26591+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
26592+ goto bottomup;
26593+#endif
26594+
26595+#ifdef CONFIG_PAX_RANDMMAP
26596+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26597+#endif
26598+
26599+ /* requesting a specific address */
26600+ if (addr) {
26601+ addr = PAGE_ALIGN(addr);
26602+ if (pax_task_size - len >= addr) {
26603+ vma = find_vma(mm, addr);
26604+ if (check_heap_stack_gap(vma, addr, len, offset))
26605+ return addr;
26606+ }
26607+ }
26608+
26609+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
26610+ info.length = len;
26611+ info.low_limit = PAGE_SIZE;
26612+ info.high_limit = mm->mmap_base;
26613+ info.align_mask = filp ? get_align_mask() : 0;
26614+ info.align_offset = pgoff << PAGE_SHIFT;
26615+ info.threadstack_offset = offset;
26616+
26617+ addr = vm_unmapped_area(&info);
26618+ if (!(addr & ~PAGE_MASK))
26619+ return addr;
26620+ VM_BUG_ON(addr != -ENOMEM);
26621+
26622+bottomup:
26623+ /*
26624+ * A failed mmap() very likely causes application failure,
26625+ * so fall back to the bottom-up function here. This scenario
26626+ * can happen with large stack limits and large mmap()
26627+ * allocations.
26628+ */
26629+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
26630+}
26631diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
26632index 30277e2..5664a29 100644
26633--- a/arch/x86/kernel/sys_x86_64.c
26634+++ b/arch/x86/kernel/sys_x86_64.c
26635@@ -81,8 +81,8 @@ out:
26636 return error;
26637 }
26638
26639-static void find_start_end(unsigned long flags, unsigned long *begin,
26640- unsigned long *end)
26641+static void find_start_end(struct mm_struct *mm, unsigned long flags,
26642+ unsigned long *begin, unsigned long *end)
26643 {
26644 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
26645 unsigned long new_begin;
26646@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
26647 *begin = new_begin;
26648 }
26649 } else {
26650- *begin = current->mm->mmap_legacy_base;
26651+ *begin = mm->mmap_legacy_base;
26652 *end = TASK_SIZE;
26653 }
26654 }
26655@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26656 struct vm_area_struct *vma;
26657 struct vm_unmapped_area_info info;
26658 unsigned long begin, end;
26659+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26660
26661 if (flags & MAP_FIXED)
26662 return addr;
26663
26664- find_start_end(flags, &begin, &end);
26665+ find_start_end(mm, flags, &begin, &end);
26666
26667 if (len > end)
26668 return -ENOMEM;
26669
26670+#ifdef CONFIG_PAX_RANDMMAP
26671+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26672+#endif
26673+
26674 if (addr) {
26675 addr = PAGE_ALIGN(addr);
26676 vma = find_vma(mm, addr);
26677- if (end - len >= addr &&
26678- (!vma || addr + len <= vma->vm_start))
26679+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
26680 return addr;
26681 }
26682
26683@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26684 info.high_limit = end;
26685 info.align_mask = filp ? get_align_mask() : 0;
26686 info.align_offset = pgoff << PAGE_SHIFT;
26687+ info.threadstack_offset = offset;
26688 return vm_unmapped_area(&info);
26689 }
26690
26691@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26692 struct mm_struct *mm = current->mm;
26693 unsigned long addr = addr0;
26694 struct vm_unmapped_area_info info;
26695+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26696
26697 /* requested length too big for entire address space */
26698 if (len > TASK_SIZE)
26699@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26700 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
26701 goto bottomup;
26702
26703+#ifdef CONFIG_PAX_RANDMMAP
26704+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26705+#endif
26706+
26707 /* requesting a specific address */
26708 if (addr) {
26709 addr = PAGE_ALIGN(addr);
26710 vma = find_vma(mm, addr);
26711- if (TASK_SIZE - len >= addr &&
26712- (!vma || addr + len <= vma->vm_start))
26713+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
26714 return addr;
26715 }
26716
26717@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26718 info.high_limit = mm->mmap_base;
26719 info.align_mask = filp ? get_align_mask() : 0;
26720 info.align_offset = pgoff << PAGE_SHIFT;
26721+ info.threadstack_offset = offset;
26722 addr = vm_unmapped_area(&info);
26723 if (!(addr & ~PAGE_MASK))
26724 return addr;
26725diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
26726index 91a4496..bb87552 100644
26727--- a/arch/x86/kernel/tboot.c
26728+++ b/arch/x86/kernel/tboot.c
26729@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
26730
26731 void tboot_shutdown(u32 shutdown_type)
26732 {
26733- void (*shutdown)(void);
26734+ void (* __noreturn shutdown)(void);
26735
26736 if (!tboot_enabled())
26737 return;
26738@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
26739
26740 switch_to_tboot_pt();
26741
26742- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
26743+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
26744 shutdown();
26745
26746 /* should not reach here */
26747@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
26748 return -ENODEV;
26749 }
26750
26751-static atomic_t ap_wfs_count;
26752+static atomic_unchecked_t ap_wfs_count;
26753
26754 static int tboot_wait_for_aps(int num_aps)
26755 {
26756@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
26757 {
26758 switch (action) {
26759 case CPU_DYING:
26760- atomic_inc(&ap_wfs_count);
26761+ atomic_inc_unchecked(&ap_wfs_count);
26762 if (num_online_cpus() == 1)
26763- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
26764+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
26765 return NOTIFY_BAD;
26766 break;
26767 }
26768@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
26769
26770 tboot_create_trampoline();
26771
26772- atomic_set(&ap_wfs_count, 0);
26773+ atomic_set_unchecked(&ap_wfs_count, 0);
26774 register_hotcpu_notifier(&tboot_cpu_notifier);
26775
26776 #ifdef CONFIG_DEBUG_FS
26777diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
26778index 24d3c91..d06b473 100644
26779--- a/arch/x86/kernel/time.c
26780+++ b/arch/x86/kernel/time.c
26781@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
26782 {
26783 unsigned long pc = instruction_pointer(regs);
26784
26785- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
26786+ if (!user_mode(regs) && in_lock_functions(pc)) {
26787 #ifdef CONFIG_FRAME_POINTER
26788- return *(unsigned long *)(regs->bp + sizeof(long));
26789+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
26790 #else
26791 unsigned long *sp =
26792 (unsigned long *)kernel_stack_pointer(regs);
26793@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
26794 * or above a saved flags. Eflags has bits 22-31 zero,
26795 * kernel addresses don't.
26796 */
26797+
26798+#ifdef CONFIG_PAX_KERNEXEC
26799+ return ktla_ktva(sp[0]);
26800+#else
26801 if (sp[0] >> 22)
26802 return sp[0];
26803 if (sp[1] >> 22)
26804 return sp[1];
26805 #endif
26806+
26807+#endif
26808 }
26809 return pc;
26810 }
26811diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
26812index f7fec09..9991981 100644
26813--- a/arch/x86/kernel/tls.c
26814+++ b/arch/x86/kernel/tls.c
26815@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
26816 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
26817 return -EINVAL;
26818
26819+#ifdef CONFIG_PAX_SEGMEXEC
26820+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
26821+ return -EINVAL;
26822+#endif
26823+
26824 set_tls_desc(p, idx, &info, 1);
26825
26826 return 0;
26827@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
26828
26829 if (kbuf)
26830 info = kbuf;
26831- else if (__copy_from_user(infobuf, ubuf, count))
26832+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
26833 return -EFAULT;
26834 else
26835 info = infobuf;
26836diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
26837index 1c113db..287b42e 100644
26838--- a/arch/x86/kernel/tracepoint.c
26839+++ b/arch/x86/kernel/tracepoint.c
26840@@ -9,11 +9,11 @@
26841 #include <linux/atomic.h>
26842
26843 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
26844-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
26845+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
26846 (unsigned long) trace_idt_table };
26847
26848 /* No need to be aligned, but done to keep all IDTs defined the same way. */
26849-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
26850+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
26851
26852 static int trace_irq_vector_refcount;
26853 static DEFINE_MUTEX(irq_vector_mutex);
26854diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
26855index b857ed8..51ae4cb 100644
26856--- a/arch/x86/kernel/traps.c
26857+++ b/arch/x86/kernel/traps.c
26858@@ -66,7 +66,7 @@
26859 #include <asm/proto.h>
26860
26861 /* No need to be aligned, but done to keep all IDTs defined the same way. */
26862-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
26863+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
26864 #else
26865 #include <asm/processor-flags.h>
26866 #include <asm/setup.h>
26867@@ -75,7 +75,7 @@ asmlinkage int system_call(void);
26868 #endif
26869
26870 /* Must be page-aligned because the real IDT is used in a fixmap. */
26871-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
26872+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
26873
26874 DECLARE_BITMAP(used_vectors, NR_VECTORS);
26875 EXPORT_SYMBOL_GPL(used_vectors);
26876@@ -107,11 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
26877 }
26878
26879 static int __kprobes
26880-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
26881+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
26882 struct pt_regs *regs, long error_code)
26883 {
26884 #ifdef CONFIG_X86_32
26885- if (regs->flags & X86_VM_MASK) {
26886+ if (v8086_mode(regs)) {
26887 /*
26888 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
26889 * On nmi (interrupt 2), do_trap should not be called.
26890@@ -124,12 +124,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
26891 return -1;
26892 }
26893 #endif
26894- if (!user_mode(regs)) {
26895+ if (!user_mode_novm(regs)) {
26896 if (!fixup_exception(regs)) {
26897 tsk->thread.error_code = error_code;
26898 tsk->thread.trap_nr = trapnr;
26899+
26900+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26901+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
26902+ str = "PAX: suspicious stack segment fault";
26903+#endif
26904+
26905 die(str, regs, error_code);
26906 }
26907+
26908+#ifdef CONFIG_PAX_REFCOUNT
26909+ if (trapnr == 4)
26910+ pax_report_refcount_overflow(regs);
26911+#endif
26912+
26913 return 0;
26914 }
26915
26916@@ -137,7 +149,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
26917 }
26918
26919 static void __kprobes
26920-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
26921+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
26922 long error_code, siginfo_t *info)
26923 {
26924 struct task_struct *tsk = current;
26925@@ -161,7 +173,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
26926 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
26927 printk_ratelimit()) {
26928 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
26929- tsk->comm, tsk->pid, str,
26930+ tsk->comm, task_pid_nr(tsk), str,
26931 regs->ip, regs->sp, error_code);
26932 print_vma_addr(" in ", regs->ip);
26933 pr_cont("\n");
26934@@ -277,7 +289,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
26935 conditional_sti(regs);
26936
26937 #ifdef CONFIG_X86_32
26938- if (regs->flags & X86_VM_MASK) {
26939+ if (v8086_mode(regs)) {
26940 local_irq_enable();
26941 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
26942 goto exit;
26943@@ -285,18 +297,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
26944 #endif
26945
26946 tsk = current;
26947- if (!user_mode(regs)) {
26948+ if (!user_mode_novm(regs)) {
26949 if (fixup_exception(regs))
26950 goto exit;
26951
26952 tsk->thread.error_code = error_code;
26953 tsk->thread.trap_nr = X86_TRAP_GP;
26954 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
26955- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
26956+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
26957+
26958+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26959+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
26960+ die("PAX: suspicious general protection fault", regs, error_code);
26961+ else
26962+#endif
26963+
26964 die("general protection fault", regs, error_code);
26965+ }
26966 goto exit;
26967 }
26968
26969+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
26970+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
26971+ struct mm_struct *mm = tsk->mm;
26972+ unsigned long limit;
26973+
26974+ down_write(&mm->mmap_sem);
26975+ limit = mm->context.user_cs_limit;
26976+ if (limit < TASK_SIZE) {
26977+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
26978+ up_write(&mm->mmap_sem);
26979+ return;
26980+ }
26981+ up_write(&mm->mmap_sem);
26982+ }
26983+#endif
26984+
26985 tsk->thread.error_code = error_code;
26986 tsk->thread.trap_nr = X86_TRAP_GP;
26987
26988@@ -457,7 +493,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
26989 /* It's safe to allow irq's after DR6 has been saved */
26990 preempt_conditional_sti(regs);
26991
26992- if (regs->flags & X86_VM_MASK) {
26993+ if (v8086_mode(regs)) {
26994 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
26995 X86_TRAP_DB);
26996 preempt_conditional_cli(regs);
26997@@ -472,7 +508,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
26998 * We already checked v86 mode above, so we can check for kernel mode
26999 * by just checking the CPL of CS.
27000 */
27001- if ((dr6 & DR_STEP) && !user_mode(regs)) {
27002+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
27003 tsk->thread.debugreg6 &= ~DR_STEP;
27004 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
27005 regs->flags &= ~X86_EFLAGS_TF;
27006@@ -504,7 +540,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
27007 return;
27008 conditional_sti(regs);
27009
27010- if (!user_mode_vm(regs))
27011+ if (!user_mode(regs))
27012 {
27013 if (!fixup_exception(regs)) {
27014 task->thread.error_code = error_code;
27015diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
27016index 2ed8459..7cf329f 100644
27017--- a/arch/x86/kernel/uprobes.c
27018+++ b/arch/x86/kernel/uprobes.c
27019@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
27020 int ret = NOTIFY_DONE;
27021
27022 /* We are only interested in userspace traps */
27023- if (regs && !user_mode_vm(regs))
27024+ if (regs && !user_mode(regs))
27025 return NOTIFY_DONE;
27026
27027 switch (val) {
27028@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
27029
27030 if (ncopied != rasize) {
27031 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
27032- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
27033+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
27034
27035 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
27036 }
27037diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
27038index b9242ba..50c5edd 100644
27039--- a/arch/x86/kernel/verify_cpu.S
27040+++ b/arch/x86/kernel/verify_cpu.S
27041@@ -20,6 +20,7 @@
27042 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
27043 * arch/x86/kernel/trampoline_64.S: secondary processor verification
27044 * arch/x86/kernel/head_32.S: processor startup
27045+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
27046 *
27047 * verify_cpu, returns the status of longmode and SSE in register %eax.
27048 * 0: Success 1: Failure
27049diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
27050index e8edcf5..27f9344 100644
27051--- a/arch/x86/kernel/vm86_32.c
27052+++ b/arch/x86/kernel/vm86_32.c
27053@@ -44,6 +44,7 @@
27054 #include <linux/ptrace.h>
27055 #include <linux/audit.h>
27056 #include <linux/stddef.h>
27057+#include <linux/grsecurity.h>
27058
27059 #include <asm/uaccess.h>
27060 #include <asm/io.h>
27061@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
27062 do_exit(SIGSEGV);
27063 }
27064
27065- tss = &per_cpu(init_tss, get_cpu());
27066+ tss = init_tss + get_cpu();
27067 current->thread.sp0 = current->thread.saved_sp0;
27068 current->thread.sysenter_cs = __KERNEL_CS;
27069 load_sp0(tss, &current->thread);
27070@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
27071
27072 if (tsk->thread.saved_sp0)
27073 return -EPERM;
27074+
27075+#ifdef CONFIG_GRKERNSEC_VM86
27076+ if (!capable(CAP_SYS_RAWIO)) {
27077+ gr_handle_vm86();
27078+ return -EPERM;
27079+ }
27080+#endif
27081+
27082 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
27083 offsetof(struct kernel_vm86_struct, vm86plus) -
27084 sizeof(info.regs));
27085@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
27086 int tmp;
27087 struct vm86plus_struct __user *v86;
27088
27089+#ifdef CONFIG_GRKERNSEC_VM86
27090+ if (!capable(CAP_SYS_RAWIO)) {
27091+ gr_handle_vm86();
27092+ return -EPERM;
27093+ }
27094+#endif
27095+
27096 tsk = current;
27097 switch (cmd) {
27098 case VM86_REQUEST_IRQ:
27099@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
27100 tsk->thread.saved_fs = info->regs32->fs;
27101 tsk->thread.saved_gs = get_user_gs(info->regs32);
27102
27103- tss = &per_cpu(init_tss, get_cpu());
27104+ tss = init_tss + get_cpu();
27105 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
27106 if (cpu_has_sep)
27107 tsk->thread.sysenter_cs = 0;
27108@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
27109 goto cannot_handle;
27110 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
27111 goto cannot_handle;
27112- intr_ptr = (unsigned long __user *) (i << 2);
27113+ intr_ptr = (__force unsigned long __user *) (i << 2);
27114 if (get_user(segoffs, intr_ptr))
27115 goto cannot_handle;
27116 if ((segoffs >> 16) == BIOSSEG)
27117diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
27118index da6b35a..977e9cf 100644
27119--- a/arch/x86/kernel/vmlinux.lds.S
27120+++ b/arch/x86/kernel/vmlinux.lds.S
27121@@ -26,6 +26,13 @@
27122 #include <asm/page_types.h>
27123 #include <asm/cache.h>
27124 #include <asm/boot.h>
27125+#include <asm/segment.h>
27126+
27127+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27128+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
27129+#else
27130+#define __KERNEL_TEXT_OFFSET 0
27131+#endif
27132
27133 #undef i386 /* in case the preprocessor is a 32bit one */
27134
27135@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
27136
27137 PHDRS {
27138 text PT_LOAD FLAGS(5); /* R_E */
27139+#ifdef CONFIG_X86_32
27140+ module PT_LOAD FLAGS(5); /* R_E */
27141+#endif
27142+#ifdef CONFIG_XEN
27143+ rodata PT_LOAD FLAGS(5); /* R_E */
27144+#else
27145+ rodata PT_LOAD FLAGS(4); /* R__ */
27146+#endif
27147 data PT_LOAD FLAGS(6); /* RW_ */
27148-#ifdef CONFIG_X86_64
27149+ init.begin PT_LOAD FLAGS(6); /* RW_ */
27150 #ifdef CONFIG_SMP
27151 percpu PT_LOAD FLAGS(6); /* RW_ */
27152 #endif
27153+ text.init PT_LOAD FLAGS(5); /* R_E */
27154+ text.exit PT_LOAD FLAGS(5); /* R_E */
27155 init PT_LOAD FLAGS(7); /* RWE */
27156-#endif
27157 note PT_NOTE FLAGS(0); /* ___ */
27158 }
27159
27160 SECTIONS
27161 {
27162 #ifdef CONFIG_X86_32
27163- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
27164- phys_startup_32 = startup_32 - LOAD_OFFSET;
27165+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
27166 #else
27167- . = __START_KERNEL;
27168- phys_startup_64 = startup_64 - LOAD_OFFSET;
27169+ . = __START_KERNEL;
27170 #endif
27171
27172 /* Text and read-only data */
27173- .text : AT(ADDR(.text) - LOAD_OFFSET) {
27174- _text = .;
27175+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
27176 /* bootstrapping code */
27177+#ifdef CONFIG_X86_32
27178+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
27179+#else
27180+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
27181+#endif
27182+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
27183+ _text = .;
27184 HEAD_TEXT
27185 . = ALIGN(8);
27186 _stext = .;
27187@@ -104,13 +124,47 @@ SECTIONS
27188 IRQENTRY_TEXT
27189 *(.fixup)
27190 *(.gnu.warning)
27191- /* End of text section */
27192- _etext = .;
27193 } :text = 0x9090
27194
27195- NOTES :text :note
27196+ . += __KERNEL_TEXT_OFFSET;
27197
27198- EXCEPTION_TABLE(16) :text = 0x9090
27199+#ifdef CONFIG_X86_32
27200+ . = ALIGN(PAGE_SIZE);
27201+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
27202+
27203+#ifdef CONFIG_PAX_KERNEXEC
27204+ MODULES_EXEC_VADDR = .;
27205+ BYTE(0)
27206+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
27207+ . = ALIGN(HPAGE_SIZE) - 1;
27208+ MODULES_EXEC_END = .;
27209+#endif
27210+
27211+ } :module
27212+#endif
27213+
27214+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
27215+ /* End of text section */
27216+ BYTE(0)
27217+ _etext = . - __KERNEL_TEXT_OFFSET;
27218+ }
27219+
27220+#ifdef CONFIG_X86_32
27221+ . = ALIGN(PAGE_SIZE);
27222+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
27223+ . = ALIGN(PAGE_SIZE);
27224+ *(.empty_zero_page)
27225+ *(.initial_pg_fixmap)
27226+ *(.initial_pg_pmd)
27227+ *(.initial_page_table)
27228+ *(.swapper_pg_dir)
27229+ } :rodata
27230+#endif
27231+
27232+ . = ALIGN(PAGE_SIZE);
27233+ NOTES :rodata :note
27234+
27235+ EXCEPTION_TABLE(16) :rodata
27236
27237 #if defined(CONFIG_DEBUG_RODATA)
27238 /* .text should occupy whole number of pages */
27239@@ -122,16 +176,20 @@ SECTIONS
27240
27241 /* Data */
27242 .data : AT(ADDR(.data) - LOAD_OFFSET) {
27243+
27244+#ifdef CONFIG_PAX_KERNEXEC
27245+ . = ALIGN(HPAGE_SIZE);
27246+#else
27247+ . = ALIGN(PAGE_SIZE);
27248+#endif
27249+
27250 /* Start of data section */
27251 _sdata = .;
27252
27253 /* init_task */
27254 INIT_TASK_DATA(THREAD_SIZE)
27255
27256-#ifdef CONFIG_X86_32
27257- /* 32 bit has nosave before _edata */
27258 NOSAVE_DATA
27259-#endif
27260
27261 PAGE_ALIGNED_DATA(PAGE_SIZE)
27262
27263@@ -172,12 +230,19 @@ SECTIONS
27264 #endif /* CONFIG_X86_64 */
27265
27266 /* Init code and data - will be freed after init */
27267- . = ALIGN(PAGE_SIZE);
27268 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
27269+ BYTE(0)
27270+
27271+#ifdef CONFIG_PAX_KERNEXEC
27272+ . = ALIGN(HPAGE_SIZE);
27273+#else
27274+ . = ALIGN(PAGE_SIZE);
27275+#endif
27276+
27277 __init_begin = .; /* paired with __init_end */
27278- }
27279+ } :init.begin
27280
27281-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
27282+#ifdef CONFIG_SMP
27283 /*
27284 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
27285 * output PHDR, so the next output section - .init.text - should
27286@@ -186,12 +251,27 @@ SECTIONS
27287 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
27288 #endif
27289
27290- INIT_TEXT_SECTION(PAGE_SIZE)
27291-#ifdef CONFIG_X86_64
27292- :init
27293-#endif
27294+ . = ALIGN(PAGE_SIZE);
27295+ init_begin = .;
27296+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
27297+ VMLINUX_SYMBOL(_sinittext) = .;
27298+ INIT_TEXT
27299+ VMLINUX_SYMBOL(_einittext) = .;
27300+ . = ALIGN(PAGE_SIZE);
27301+ } :text.init
27302
27303- INIT_DATA_SECTION(16)
27304+ /*
27305+ * .exit.text is discard at runtime, not link time, to deal with
27306+ * references from .altinstructions and .eh_frame
27307+ */
27308+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
27309+ EXIT_TEXT
27310+ . = ALIGN(16);
27311+ } :text.exit
27312+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
27313+
27314+ . = ALIGN(PAGE_SIZE);
27315+ INIT_DATA_SECTION(16) :init
27316
27317 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
27318 __x86_cpu_dev_start = .;
27319@@ -262,19 +342,12 @@ SECTIONS
27320 }
27321
27322 . = ALIGN(8);
27323- /*
27324- * .exit.text is discard at runtime, not link time, to deal with
27325- * references from .altinstructions and .eh_frame
27326- */
27327- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
27328- EXIT_TEXT
27329- }
27330
27331 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
27332 EXIT_DATA
27333 }
27334
27335-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
27336+#ifndef CONFIG_SMP
27337 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
27338 #endif
27339
27340@@ -293,16 +366,10 @@ SECTIONS
27341 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
27342 __smp_locks = .;
27343 *(.smp_locks)
27344- . = ALIGN(PAGE_SIZE);
27345 __smp_locks_end = .;
27346+ . = ALIGN(PAGE_SIZE);
27347 }
27348
27349-#ifdef CONFIG_X86_64
27350- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
27351- NOSAVE_DATA
27352- }
27353-#endif
27354-
27355 /* BSS */
27356 . = ALIGN(PAGE_SIZE);
27357 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
27358@@ -318,6 +385,7 @@ SECTIONS
27359 __brk_base = .;
27360 . += 64 * 1024; /* 64k alignment slop space */
27361 *(.brk_reservation) /* areas brk users have reserved */
27362+ . = ALIGN(HPAGE_SIZE);
27363 __brk_limit = .;
27364 }
27365
27366@@ -344,13 +412,12 @@ SECTIONS
27367 * for the boot processor.
27368 */
27369 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
27370-INIT_PER_CPU(gdt_page);
27371 INIT_PER_CPU(irq_stack_union);
27372
27373 /*
27374 * Build-time check on the image size:
27375 */
27376-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
27377+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
27378 "kernel image bigger than KERNEL_IMAGE_SIZE");
27379
27380 #ifdef CONFIG_SMP
27381diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
27382index 1f96f93..d5c8f7a 100644
27383--- a/arch/x86/kernel/vsyscall_64.c
27384+++ b/arch/x86/kernel/vsyscall_64.c
27385@@ -56,15 +56,13 @@
27386 DEFINE_VVAR(int, vgetcpu_mode);
27387 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
27388
27389-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
27390+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
27391
27392 static int __init vsyscall_setup(char *str)
27393 {
27394 if (str) {
27395 if (!strcmp("emulate", str))
27396 vsyscall_mode = EMULATE;
27397- else if (!strcmp("native", str))
27398- vsyscall_mode = NATIVE;
27399 else if (!strcmp("none", str))
27400 vsyscall_mode = NONE;
27401 else
27402@@ -323,8 +321,7 @@ do_ret:
27403 return true;
27404
27405 sigsegv:
27406- force_sig(SIGSEGV, current);
27407- return true;
27408+ do_group_exit(SIGKILL);
27409 }
27410
27411 /*
27412@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
27413 extern char __vvar_page;
27414 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
27415
27416- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
27417- vsyscall_mode == NATIVE
27418- ? PAGE_KERNEL_VSYSCALL
27419- : PAGE_KERNEL_VVAR);
27420+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
27421 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
27422 (unsigned long)VSYSCALL_START);
27423
27424diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
27425index 04068192..4d75aa6 100644
27426--- a/arch/x86/kernel/x8664_ksyms_64.c
27427+++ b/arch/x86/kernel/x8664_ksyms_64.c
27428@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
27429 EXPORT_SYMBOL(copy_user_generic_unrolled);
27430 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
27431 EXPORT_SYMBOL(__copy_user_nocache);
27432-EXPORT_SYMBOL(_copy_from_user);
27433-EXPORT_SYMBOL(_copy_to_user);
27434
27435 EXPORT_SYMBOL(copy_page);
27436 EXPORT_SYMBOL(clear_page);
27437@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
27438 EXPORT_SYMBOL(___preempt_schedule_context);
27439 #endif
27440 #endif
27441+
27442+#ifdef CONFIG_PAX_PER_CPU_PGD
27443+EXPORT_SYMBOL(cpu_pgd);
27444+#endif
27445diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
27446index 021783b..6511282 100644
27447--- a/arch/x86/kernel/x86_init.c
27448+++ b/arch/x86/kernel/x86_init.c
27449@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
27450 static void default_nmi_init(void) { };
27451 static int default_i8042_detect(void) { return 1; };
27452
27453-struct x86_platform_ops x86_platform = {
27454+struct x86_platform_ops x86_platform __read_only = {
27455 .calibrate_tsc = native_calibrate_tsc,
27456 .get_wallclock = mach_get_cmos_time,
27457 .set_wallclock = mach_set_rtc_mmss,
27458@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
27459 EXPORT_SYMBOL_GPL(x86_platform);
27460
27461 #if defined(CONFIG_PCI_MSI)
27462-struct x86_msi_ops x86_msi = {
27463+struct x86_msi_ops x86_msi __read_only = {
27464 .setup_msi_irqs = native_setup_msi_irqs,
27465 .compose_msi_msg = native_compose_msi_msg,
27466 .teardown_msi_irq = native_teardown_msi_irq,
27467@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
27468 }
27469 #endif
27470
27471-struct x86_io_apic_ops x86_io_apic_ops = {
27472+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
27473 .init = native_io_apic_init_mappings,
27474 .read = native_io_apic_read,
27475 .write = native_io_apic_write,
27476diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
27477index 422fd82..b2d262e 100644
27478--- a/arch/x86/kernel/xsave.c
27479+++ b/arch/x86/kernel/xsave.c
27480@@ -164,18 +164,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
27481
27482 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
27483 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
27484- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
27485+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
27486
27487 if (!use_xsave())
27488 return err;
27489
27490- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
27491+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
27492
27493 /*
27494 * Read the xstate_bv which we copied (directly from the cpu or
27495 * from the state in task struct) to the user buffers.
27496 */
27497- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
27498+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
27499
27500 /*
27501 * For legacy compatible, we always set FP/SSE bits in the bit
27502@@ -190,7 +190,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
27503 */
27504 xstate_bv |= XSTATE_FPSSE;
27505
27506- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
27507+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
27508
27509 return err;
27510 }
27511@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
27512 {
27513 int err;
27514
27515+ buf = (struct xsave_struct __user *)____m(buf);
27516 if (use_xsave())
27517 err = xsave_user(buf);
27518 else if (use_fxsr())
27519@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
27520 */
27521 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
27522 {
27523+ buf = (void __user *)____m(buf);
27524 if (use_xsave()) {
27525 if ((unsigned long)buf % 64 || fx_only) {
27526 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
27527diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
27528index c697625..a032162 100644
27529--- a/arch/x86/kvm/cpuid.c
27530+++ b/arch/x86/kvm/cpuid.c
27531@@ -156,15 +156,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
27532 struct kvm_cpuid2 *cpuid,
27533 struct kvm_cpuid_entry2 __user *entries)
27534 {
27535- int r;
27536+ int r, i;
27537
27538 r = -E2BIG;
27539 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
27540 goto out;
27541 r = -EFAULT;
27542- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
27543- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
27544+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
27545 goto out;
27546+ for (i = 0; i < cpuid->nent; ++i) {
27547+ struct kvm_cpuid_entry2 cpuid_entry;
27548+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
27549+ goto out;
27550+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
27551+ }
27552 vcpu->arch.cpuid_nent = cpuid->nent;
27553 kvm_apic_set_version(vcpu);
27554 kvm_x86_ops->cpuid_update(vcpu);
27555@@ -179,15 +184,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
27556 struct kvm_cpuid2 *cpuid,
27557 struct kvm_cpuid_entry2 __user *entries)
27558 {
27559- int r;
27560+ int r, i;
27561
27562 r = -E2BIG;
27563 if (cpuid->nent < vcpu->arch.cpuid_nent)
27564 goto out;
27565 r = -EFAULT;
27566- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
27567- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
27568+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
27569 goto out;
27570+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
27571+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
27572+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
27573+ goto out;
27574+ }
27575 return 0;
27576
27577 out:
27578diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
27579index d86ff15..e77b023 100644
27580--- a/arch/x86/kvm/lapic.c
27581+++ b/arch/x86/kvm/lapic.c
27582@@ -55,7 +55,7 @@
27583 #define APIC_BUS_CYCLE_NS 1
27584
27585 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
27586-#define apic_debug(fmt, arg...)
27587+#define apic_debug(fmt, arg...) do {} while (0)
27588
27589 #define APIC_LVT_NUM 6
27590 /* 14 is the version for Xeon and Pentium 8.4.8*/
27591diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
27592index ad75d77..a679d32 100644
27593--- a/arch/x86/kvm/paging_tmpl.h
27594+++ b/arch/x86/kvm/paging_tmpl.h
27595@@ -331,7 +331,7 @@ retry_walk:
27596 if (unlikely(kvm_is_error_hva(host_addr)))
27597 goto error;
27598
27599- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
27600+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
27601 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
27602 goto error;
27603 walker->ptep_user[walker->level - 1] = ptep_user;
27604diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
27605index c7168a5..09070fc 100644
27606--- a/arch/x86/kvm/svm.c
27607+++ b/arch/x86/kvm/svm.c
27608@@ -3497,7 +3497,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
27609 int cpu = raw_smp_processor_id();
27610
27611 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
27612+
27613+ pax_open_kernel();
27614 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
27615+ pax_close_kernel();
27616+
27617 load_TR_desc();
27618 }
27619
27620@@ -3898,6 +3902,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
27621 #endif
27622 #endif
27623
27624+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
27625+ __set_fs(current_thread_info()->addr_limit);
27626+#endif
27627+
27628 reload_tss(vcpu);
27629
27630 local_irq_disable();
27631diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
27632index da7837e..86c6ebf 100644
27633--- a/arch/x86/kvm/vmx.c
27634+++ b/arch/x86/kvm/vmx.c
27635@@ -1316,12 +1316,12 @@ static void vmcs_write64(unsigned long field, u64 value)
27636 #endif
27637 }
27638
27639-static void vmcs_clear_bits(unsigned long field, u32 mask)
27640+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
27641 {
27642 vmcs_writel(field, vmcs_readl(field) & ~mask);
27643 }
27644
27645-static void vmcs_set_bits(unsigned long field, u32 mask)
27646+static void vmcs_set_bits(unsigned long field, unsigned long mask)
27647 {
27648 vmcs_writel(field, vmcs_readl(field) | mask);
27649 }
27650@@ -1522,7 +1522,11 @@ static void reload_tss(void)
27651 struct desc_struct *descs;
27652
27653 descs = (void *)gdt->address;
27654+
27655+ pax_open_kernel();
27656 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
27657+ pax_close_kernel();
27658+
27659 load_TR_desc();
27660 }
27661
27662@@ -1746,6 +1750,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
27663 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
27664 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
27665
27666+#ifdef CONFIG_PAX_PER_CPU_PGD
27667+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
27668+#endif
27669+
27670 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
27671 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
27672 vmx->loaded_vmcs->cpu = cpu;
27673@@ -2033,7 +2041,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
27674 * reads and returns guest's timestamp counter "register"
27675 * guest_tsc = host_tsc + tsc_offset -- 21.3
27676 */
27677-static u64 guest_read_tsc(void)
27678+static u64 __intentional_overflow(-1) guest_read_tsc(void)
27679 {
27680 u64 host_tsc, tsc_offset;
27681
27682@@ -2987,8 +2995,11 @@ static __init int hardware_setup(void)
27683 if (!cpu_has_vmx_flexpriority())
27684 flexpriority_enabled = 0;
27685
27686- if (!cpu_has_vmx_tpr_shadow())
27687- kvm_x86_ops->update_cr8_intercept = NULL;
27688+ if (!cpu_has_vmx_tpr_shadow()) {
27689+ pax_open_kernel();
27690+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
27691+ pax_close_kernel();
27692+ }
27693
27694 if (enable_ept && !cpu_has_vmx_ept_2m_page())
27695 kvm_disable_largepages();
27696@@ -2999,13 +3010,15 @@ static __init int hardware_setup(void)
27697 if (!cpu_has_vmx_apicv())
27698 enable_apicv = 0;
27699
27700+ pax_open_kernel();
27701 if (enable_apicv)
27702- kvm_x86_ops->update_cr8_intercept = NULL;
27703+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
27704 else {
27705- kvm_x86_ops->hwapic_irr_update = NULL;
27706- kvm_x86_ops->deliver_posted_interrupt = NULL;
27707- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
27708+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
27709+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
27710+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
27711 }
27712+ pax_close_kernel();
27713
27714 if (nested)
27715 nested_vmx_setup_ctls_msrs();
27716@@ -4134,7 +4147,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
27717
27718 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
27719 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
27720+
27721+#ifndef CONFIG_PAX_PER_CPU_PGD
27722 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
27723+#endif
27724
27725 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
27726 #ifdef CONFIG_X86_64
27727@@ -4156,7 +4172,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
27728 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
27729 vmx->host_idt_base = dt.address;
27730
27731- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
27732+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
27733
27734 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
27735 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
27736@@ -7219,6 +7235,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27737 "jmp 2f \n\t"
27738 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
27739 "2: "
27740+
27741+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27742+ "ljmp %[cs],$3f\n\t"
27743+ "3: "
27744+#endif
27745+
27746 /* Save guest registers, load host registers, keep flags */
27747 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
27748 "pop %0 \n\t"
27749@@ -7271,6 +7293,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27750 #endif
27751 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
27752 [wordsize]"i"(sizeof(ulong))
27753+
27754+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27755+ ,[cs]"i"(__KERNEL_CS)
27756+#endif
27757+
27758 : "cc", "memory"
27759 #ifdef CONFIG_X86_64
27760 , "rax", "rbx", "rdi", "rsi"
27761@@ -7284,7 +7311,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27762 if (debugctlmsr)
27763 update_debugctlmsr(debugctlmsr);
27764
27765-#ifndef CONFIG_X86_64
27766+#ifdef CONFIG_X86_32
27767 /*
27768 * The sysexit path does not restore ds/es, so we must set them to
27769 * a reasonable value ourselves.
27770@@ -7293,8 +7320,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27771 * may be executed in interrupt context, which saves and restore segments
27772 * around it, nullifying its effect.
27773 */
27774- loadsegment(ds, __USER_DS);
27775- loadsegment(es, __USER_DS);
27776+ loadsegment(ds, __KERNEL_DS);
27777+ loadsegment(es, __KERNEL_DS);
27778+ loadsegment(ss, __KERNEL_DS);
27779+
27780+#ifdef CONFIG_PAX_KERNEXEC
27781+ loadsegment(fs, __KERNEL_PERCPU);
27782+#endif
27783+
27784+#ifdef CONFIG_PAX_MEMORY_UDEREF
27785+ __set_fs(current_thread_info()->addr_limit);
27786+#endif
27787+
27788 #endif
27789
27790 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
27791diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
27792index d89d51b..f3c612a 100644
27793--- a/arch/x86/kvm/x86.c
27794+++ b/arch/x86/kvm/x86.c
27795@@ -1791,8 +1791,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
27796 {
27797 struct kvm *kvm = vcpu->kvm;
27798 int lm = is_long_mode(vcpu);
27799- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
27800- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
27801+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
27802+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
27803 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
27804 : kvm->arch.xen_hvm_config.blob_size_32;
27805 u32 page_num = data & ~PAGE_MASK;
27806@@ -2676,6 +2676,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
27807 if (n < msr_list.nmsrs)
27808 goto out;
27809 r = -EFAULT;
27810+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
27811+ goto out;
27812 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
27813 num_msrs_to_save * sizeof(u32)))
27814 goto out;
27815@@ -5485,7 +5487,7 @@ static struct notifier_block pvclock_gtod_notifier = {
27816 };
27817 #endif
27818
27819-int kvm_arch_init(void *opaque)
27820+int kvm_arch_init(const void *opaque)
27821 {
27822 int r;
27823 struct kvm_x86_ops *ops = opaque;
27824diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
27825index bdf8532..f63c587 100644
27826--- a/arch/x86/lguest/boot.c
27827+++ b/arch/x86/lguest/boot.c
27828@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
27829 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
27830 * Launcher to reboot us.
27831 */
27832-static void lguest_restart(char *reason)
27833+static __noreturn void lguest_restart(char *reason)
27834 {
27835 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
27836+ BUG();
27837 }
27838
27839 /*G:050
27840diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
27841index 00933d5..3a64af9 100644
27842--- a/arch/x86/lib/atomic64_386_32.S
27843+++ b/arch/x86/lib/atomic64_386_32.S
27844@@ -48,6 +48,10 @@ BEGIN(read)
27845 movl (v), %eax
27846 movl 4(v), %edx
27847 RET_ENDP
27848+BEGIN(read_unchecked)
27849+ movl (v), %eax
27850+ movl 4(v), %edx
27851+RET_ENDP
27852 #undef v
27853
27854 #define v %esi
27855@@ -55,6 +59,10 @@ BEGIN(set)
27856 movl %ebx, (v)
27857 movl %ecx, 4(v)
27858 RET_ENDP
27859+BEGIN(set_unchecked)
27860+ movl %ebx, (v)
27861+ movl %ecx, 4(v)
27862+RET_ENDP
27863 #undef v
27864
27865 #define v %esi
27866@@ -70,6 +78,20 @@ RET_ENDP
27867 BEGIN(add)
27868 addl %eax, (v)
27869 adcl %edx, 4(v)
27870+
27871+#ifdef CONFIG_PAX_REFCOUNT
27872+ jno 0f
27873+ subl %eax, (v)
27874+ sbbl %edx, 4(v)
27875+ int $4
27876+0:
27877+ _ASM_EXTABLE(0b, 0b)
27878+#endif
27879+
27880+RET_ENDP
27881+BEGIN(add_unchecked)
27882+ addl %eax, (v)
27883+ adcl %edx, 4(v)
27884 RET_ENDP
27885 #undef v
27886
27887@@ -77,6 +99,24 @@ RET_ENDP
27888 BEGIN(add_return)
27889 addl (v), %eax
27890 adcl 4(v), %edx
27891+
27892+#ifdef CONFIG_PAX_REFCOUNT
27893+ into
27894+1234:
27895+ _ASM_EXTABLE(1234b, 2f)
27896+#endif
27897+
27898+ movl %eax, (v)
27899+ movl %edx, 4(v)
27900+
27901+#ifdef CONFIG_PAX_REFCOUNT
27902+2:
27903+#endif
27904+
27905+RET_ENDP
27906+BEGIN(add_return_unchecked)
27907+ addl (v), %eax
27908+ adcl 4(v), %edx
27909 movl %eax, (v)
27910 movl %edx, 4(v)
27911 RET_ENDP
27912@@ -86,6 +126,20 @@ RET_ENDP
27913 BEGIN(sub)
27914 subl %eax, (v)
27915 sbbl %edx, 4(v)
27916+
27917+#ifdef CONFIG_PAX_REFCOUNT
27918+ jno 0f
27919+ addl %eax, (v)
27920+ adcl %edx, 4(v)
27921+ int $4
27922+0:
27923+ _ASM_EXTABLE(0b, 0b)
27924+#endif
27925+
27926+RET_ENDP
27927+BEGIN(sub_unchecked)
27928+ subl %eax, (v)
27929+ sbbl %edx, 4(v)
27930 RET_ENDP
27931 #undef v
27932
27933@@ -96,6 +150,27 @@ BEGIN(sub_return)
27934 sbbl $0, %edx
27935 addl (v), %eax
27936 adcl 4(v), %edx
27937+
27938+#ifdef CONFIG_PAX_REFCOUNT
27939+ into
27940+1234:
27941+ _ASM_EXTABLE(1234b, 2f)
27942+#endif
27943+
27944+ movl %eax, (v)
27945+ movl %edx, 4(v)
27946+
27947+#ifdef CONFIG_PAX_REFCOUNT
27948+2:
27949+#endif
27950+
27951+RET_ENDP
27952+BEGIN(sub_return_unchecked)
27953+ negl %edx
27954+ negl %eax
27955+ sbbl $0, %edx
27956+ addl (v), %eax
27957+ adcl 4(v), %edx
27958 movl %eax, (v)
27959 movl %edx, 4(v)
27960 RET_ENDP
27961@@ -105,6 +180,20 @@ RET_ENDP
27962 BEGIN(inc)
27963 addl $1, (v)
27964 adcl $0, 4(v)
27965+
27966+#ifdef CONFIG_PAX_REFCOUNT
27967+ jno 0f
27968+ subl $1, (v)
27969+ sbbl $0, 4(v)
27970+ int $4
27971+0:
27972+ _ASM_EXTABLE(0b, 0b)
27973+#endif
27974+
27975+RET_ENDP
27976+BEGIN(inc_unchecked)
27977+ addl $1, (v)
27978+ adcl $0, 4(v)
27979 RET_ENDP
27980 #undef v
27981
27982@@ -114,6 +203,26 @@ BEGIN(inc_return)
27983 movl 4(v), %edx
27984 addl $1, %eax
27985 adcl $0, %edx
27986+
27987+#ifdef CONFIG_PAX_REFCOUNT
27988+ into
27989+1234:
27990+ _ASM_EXTABLE(1234b, 2f)
27991+#endif
27992+
27993+ movl %eax, (v)
27994+ movl %edx, 4(v)
27995+
27996+#ifdef CONFIG_PAX_REFCOUNT
27997+2:
27998+#endif
27999+
28000+RET_ENDP
28001+BEGIN(inc_return_unchecked)
28002+ movl (v), %eax
28003+ movl 4(v), %edx
28004+ addl $1, %eax
28005+ adcl $0, %edx
28006 movl %eax, (v)
28007 movl %edx, 4(v)
28008 RET_ENDP
28009@@ -123,6 +232,20 @@ RET_ENDP
28010 BEGIN(dec)
28011 subl $1, (v)
28012 sbbl $0, 4(v)
28013+
28014+#ifdef CONFIG_PAX_REFCOUNT
28015+ jno 0f
28016+ addl $1, (v)
28017+ adcl $0, 4(v)
28018+ int $4
28019+0:
28020+ _ASM_EXTABLE(0b, 0b)
28021+#endif
28022+
28023+RET_ENDP
28024+BEGIN(dec_unchecked)
28025+ subl $1, (v)
28026+ sbbl $0, 4(v)
28027 RET_ENDP
28028 #undef v
28029
28030@@ -132,6 +255,26 @@ BEGIN(dec_return)
28031 movl 4(v), %edx
28032 subl $1, %eax
28033 sbbl $0, %edx
28034+
28035+#ifdef CONFIG_PAX_REFCOUNT
28036+ into
28037+1234:
28038+ _ASM_EXTABLE(1234b, 2f)
28039+#endif
28040+
28041+ movl %eax, (v)
28042+ movl %edx, 4(v)
28043+
28044+#ifdef CONFIG_PAX_REFCOUNT
28045+2:
28046+#endif
28047+
28048+RET_ENDP
28049+BEGIN(dec_return_unchecked)
28050+ movl (v), %eax
28051+ movl 4(v), %edx
28052+ subl $1, %eax
28053+ sbbl $0, %edx
28054 movl %eax, (v)
28055 movl %edx, 4(v)
28056 RET_ENDP
28057@@ -143,6 +286,13 @@ BEGIN(add_unless)
28058 adcl %edx, %edi
28059 addl (v), %eax
28060 adcl 4(v), %edx
28061+
28062+#ifdef CONFIG_PAX_REFCOUNT
28063+ into
28064+1234:
28065+ _ASM_EXTABLE(1234b, 2f)
28066+#endif
28067+
28068 cmpl %eax, %ecx
28069 je 3f
28070 1:
28071@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
28072 1:
28073 addl $1, %eax
28074 adcl $0, %edx
28075+
28076+#ifdef CONFIG_PAX_REFCOUNT
28077+ into
28078+1234:
28079+ _ASM_EXTABLE(1234b, 2f)
28080+#endif
28081+
28082 movl %eax, (v)
28083 movl %edx, 4(v)
28084 movl $1, %eax
28085@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
28086 movl 4(v), %edx
28087 subl $1, %eax
28088 sbbl $0, %edx
28089+
28090+#ifdef CONFIG_PAX_REFCOUNT
28091+ into
28092+1234:
28093+ _ASM_EXTABLE(1234b, 1f)
28094+#endif
28095+
28096 js 1f
28097 movl %eax, (v)
28098 movl %edx, 4(v)
28099diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
28100index f5cc9eb..51fa319 100644
28101--- a/arch/x86/lib/atomic64_cx8_32.S
28102+++ b/arch/x86/lib/atomic64_cx8_32.S
28103@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
28104 CFI_STARTPROC
28105
28106 read64 %ecx
28107+ pax_force_retaddr
28108 ret
28109 CFI_ENDPROC
28110 ENDPROC(atomic64_read_cx8)
28111
28112+ENTRY(atomic64_read_unchecked_cx8)
28113+ CFI_STARTPROC
28114+
28115+ read64 %ecx
28116+ pax_force_retaddr
28117+ ret
28118+ CFI_ENDPROC
28119+ENDPROC(atomic64_read_unchecked_cx8)
28120+
28121 ENTRY(atomic64_set_cx8)
28122 CFI_STARTPROC
28123
28124@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
28125 cmpxchg8b (%esi)
28126 jne 1b
28127
28128+ pax_force_retaddr
28129 ret
28130 CFI_ENDPROC
28131 ENDPROC(atomic64_set_cx8)
28132
28133+ENTRY(atomic64_set_unchecked_cx8)
28134+ CFI_STARTPROC
28135+
28136+1:
28137+/* we don't need LOCK_PREFIX since aligned 64-bit writes
28138+ * are atomic on 586 and newer */
28139+ cmpxchg8b (%esi)
28140+ jne 1b
28141+
28142+ pax_force_retaddr
28143+ ret
28144+ CFI_ENDPROC
28145+ENDPROC(atomic64_set_unchecked_cx8)
28146+
28147 ENTRY(atomic64_xchg_cx8)
28148 CFI_STARTPROC
28149
28150@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
28151 cmpxchg8b (%esi)
28152 jne 1b
28153
28154+ pax_force_retaddr
28155 ret
28156 CFI_ENDPROC
28157 ENDPROC(atomic64_xchg_cx8)
28158
28159-.macro addsub_return func ins insc
28160-ENTRY(atomic64_\func\()_return_cx8)
28161+.macro addsub_return func ins insc unchecked=""
28162+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
28163 CFI_STARTPROC
28164 SAVE ebp
28165 SAVE ebx
28166@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
28167 movl %edx, %ecx
28168 \ins\()l %esi, %ebx
28169 \insc\()l %edi, %ecx
28170+
28171+.ifb \unchecked
28172+#ifdef CONFIG_PAX_REFCOUNT
28173+ into
28174+2:
28175+ _ASM_EXTABLE(2b, 3f)
28176+#endif
28177+.endif
28178+
28179 LOCK_PREFIX
28180 cmpxchg8b (%ebp)
28181 jne 1b
28182-
28183-10:
28184 movl %ebx, %eax
28185 movl %ecx, %edx
28186+
28187+.ifb \unchecked
28188+#ifdef CONFIG_PAX_REFCOUNT
28189+3:
28190+#endif
28191+.endif
28192+
28193 RESTORE edi
28194 RESTORE esi
28195 RESTORE ebx
28196 RESTORE ebp
28197+ pax_force_retaddr
28198 ret
28199 CFI_ENDPROC
28200-ENDPROC(atomic64_\func\()_return_cx8)
28201+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
28202 .endm
28203
28204 addsub_return add add adc
28205 addsub_return sub sub sbb
28206+addsub_return add add adc _unchecked
28207+addsub_return sub sub sbb _unchecked
28208
28209-.macro incdec_return func ins insc
28210-ENTRY(atomic64_\func\()_return_cx8)
28211+.macro incdec_return func ins insc unchecked=""
28212+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
28213 CFI_STARTPROC
28214 SAVE ebx
28215
28216@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
28217 movl %edx, %ecx
28218 \ins\()l $1, %ebx
28219 \insc\()l $0, %ecx
28220+
28221+.ifb \unchecked
28222+#ifdef CONFIG_PAX_REFCOUNT
28223+ into
28224+2:
28225+ _ASM_EXTABLE(2b, 3f)
28226+#endif
28227+.endif
28228+
28229 LOCK_PREFIX
28230 cmpxchg8b (%esi)
28231 jne 1b
28232
28233-10:
28234 movl %ebx, %eax
28235 movl %ecx, %edx
28236+
28237+.ifb \unchecked
28238+#ifdef CONFIG_PAX_REFCOUNT
28239+3:
28240+#endif
28241+.endif
28242+
28243 RESTORE ebx
28244+ pax_force_retaddr
28245 ret
28246 CFI_ENDPROC
28247-ENDPROC(atomic64_\func\()_return_cx8)
28248+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
28249 .endm
28250
28251 incdec_return inc add adc
28252 incdec_return dec sub sbb
28253+incdec_return inc add adc _unchecked
28254+incdec_return dec sub sbb _unchecked
28255
28256 ENTRY(atomic64_dec_if_positive_cx8)
28257 CFI_STARTPROC
28258@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
28259 movl %edx, %ecx
28260 subl $1, %ebx
28261 sbb $0, %ecx
28262+
28263+#ifdef CONFIG_PAX_REFCOUNT
28264+ into
28265+1234:
28266+ _ASM_EXTABLE(1234b, 2f)
28267+#endif
28268+
28269 js 2f
28270 LOCK_PREFIX
28271 cmpxchg8b (%esi)
28272@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
28273 movl %ebx, %eax
28274 movl %ecx, %edx
28275 RESTORE ebx
28276+ pax_force_retaddr
28277 ret
28278 CFI_ENDPROC
28279 ENDPROC(atomic64_dec_if_positive_cx8)
28280@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
28281 movl %edx, %ecx
28282 addl %ebp, %ebx
28283 adcl %edi, %ecx
28284+
28285+#ifdef CONFIG_PAX_REFCOUNT
28286+ into
28287+1234:
28288+ _ASM_EXTABLE(1234b, 3f)
28289+#endif
28290+
28291 LOCK_PREFIX
28292 cmpxchg8b (%esi)
28293 jne 1b
28294@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
28295 CFI_ADJUST_CFA_OFFSET -8
28296 RESTORE ebx
28297 RESTORE ebp
28298+ pax_force_retaddr
28299 ret
28300 4:
28301 cmpl %edx, 4(%esp)
28302@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
28303 xorl %ecx, %ecx
28304 addl $1, %ebx
28305 adcl %edx, %ecx
28306+
28307+#ifdef CONFIG_PAX_REFCOUNT
28308+ into
28309+1234:
28310+ _ASM_EXTABLE(1234b, 3f)
28311+#endif
28312+
28313 LOCK_PREFIX
28314 cmpxchg8b (%esi)
28315 jne 1b
28316@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
28317 movl $1, %eax
28318 3:
28319 RESTORE ebx
28320+ pax_force_retaddr
28321 ret
28322 CFI_ENDPROC
28323 ENDPROC(atomic64_inc_not_zero_cx8)
28324diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
28325index e78b8ee..7e173a8 100644
28326--- a/arch/x86/lib/checksum_32.S
28327+++ b/arch/x86/lib/checksum_32.S
28328@@ -29,7 +29,8 @@
28329 #include <asm/dwarf2.h>
28330 #include <asm/errno.h>
28331 #include <asm/asm.h>
28332-
28333+#include <asm/segment.h>
28334+
28335 /*
28336 * computes a partial checksum, e.g. for TCP/UDP fragments
28337 */
28338@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
28339
28340 #define ARGBASE 16
28341 #define FP 12
28342-
28343-ENTRY(csum_partial_copy_generic)
28344+
28345+ENTRY(csum_partial_copy_generic_to_user)
28346 CFI_STARTPROC
28347+
28348+#ifdef CONFIG_PAX_MEMORY_UDEREF
28349+ pushl_cfi %gs
28350+ popl_cfi %es
28351+ jmp csum_partial_copy_generic
28352+#endif
28353+
28354+ENTRY(csum_partial_copy_generic_from_user)
28355+
28356+#ifdef CONFIG_PAX_MEMORY_UDEREF
28357+ pushl_cfi %gs
28358+ popl_cfi %ds
28359+#endif
28360+
28361+ENTRY(csum_partial_copy_generic)
28362 subl $4,%esp
28363 CFI_ADJUST_CFA_OFFSET 4
28364 pushl_cfi %edi
28365@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
28366 jmp 4f
28367 SRC(1: movw (%esi), %bx )
28368 addl $2, %esi
28369-DST( movw %bx, (%edi) )
28370+DST( movw %bx, %es:(%edi) )
28371 addl $2, %edi
28372 addw %bx, %ax
28373 adcl $0, %eax
28374@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
28375 SRC(1: movl (%esi), %ebx )
28376 SRC( movl 4(%esi), %edx )
28377 adcl %ebx, %eax
28378-DST( movl %ebx, (%edi) )
28379+DST( movl %ebx, %es:(%edi) )
28380 adcl %edx, %eax
28381-DST( movl %edx, 4(%edi) )
28382+DST( movl %edx, %es:4(%edi) )
28383
28384 SRC( movl 8(%esi), %ebx )
28385 SRC( movl 12(%esi), %edx )
28386 adcl %ebx, %eax
28387-DST( movl %ebx, 8(%edi) )
28388+DST( movl %ebx, %es:8(%edi) )
28389 adcl %edx, %eax
28390-DST( movl %edx, 12(%edi) )
28391+DST( movl %edx, %es:12(%edi) )
28392
28393 SRC( movl 16(%esi), %ebx )
28394 SRC( movl 20(%esi), %edx )
28395 adcl %ebx, %eax
28396-DST( movl %ebx, 16(%edi) )
28397+DST( movl %ebx, %es:16(%edi) )
28398 adcl %edx, %eax
28399-DST( movl %edx, 20(%edi) )
28400+DST( movl %edx, %es:20(%edi) )
28401
28402 SRC( movl 24(%esi), %ebx )
28403 SRC( movl 28(%esi), %edx )
28404 adcl %ebx, %eax
28405-DST( movl %ebx, 24(%edi) )
28406+DST( movl %ebx, %es:24(%edi) )
28407 adcl %edx, %eax
28408-DST( movl %edx, 28(%edi) )
28409+DST( movl %edx, %es:28(%edi) )
28410
28411 lea 32(%esi), %esi
28412 lea 32(%edi), %edi
28413@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
28414 shrl $2, %edx # This clears CF
28415 SRC(3: movl (%esi), %ebx )
28416 adcl %ebx, %eax
28417-DST( movl %ebx, (%edi) )
28418+DST( movl %ebx, %es:(%edi) )
28419 lea 4(%esi), %esi
28420 lea 4(%edi), %edi
28421 dec %edx
28422@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
28423 jb 5f
28424 SRC( movw (%esi), %cx )
28425 leal 2(%esi), %esi
28426-DST( movw %cx, (%edi) )
28427+DST( movw %cx, %es:(%edi) )
28428 leal 2(%edi), %edi
28429 je 6f
28430 shll $16,%ecx
28431 SRC(5: movb (%esi), %cl )
28432-DST( movb %cl, (%edi) )
28433+DST( movb %cl, %es:(%edi) )
28434 6: addl %ecx, %eax
28435 adcl $0, %eax
28436 7:
28437@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
28438
28439 6001:
28440 movl ARGBASE+20(%esp), %ebx # src_err_ptr
28441- movl $-EFAULT, (%ebx)
28442+ movl $-EFAULT, %ss:(%ebx)
28443
28444 # zero the complete destination - computing the rest
28445 # is too much work
28446@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
28447
28448 6002:
28449 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
28450- movl $-EFAULT,(%ebx)
28451+ movl $-EFAULT,%ss:(%ebx)
28452 jmp 5000b
28453
28454 .previous
28455
28456+ pushl_cfi %ss
28457+ popl_cfi %ds
28458+ pushl_cfi %ss
28459+ popl_cfi %es
28460 popl_cfi %ebx
28461 CFI_RESTORE ebx
28462 popl_cfi %esi
28463@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
28464 popl_cfi %ecx # equivalent to addl $4,%esp
28465 ret
28466 CFI_ENDPROC
28467-ENDPROC(csum_partial_copy_generic)
28468+ENDPROC(csum_partial_copy_generic_to_user)
28469
28470 #else
28471
28472 /* Version for PentiumII/PPro */
28473
28474 #define ROUND1(x) \
28475+ nop; nop; nop; \
28476 SRC(movl x(%esi), %ebx ) ; \
28477 addl %ebx, %eax ; \
28478- DST(movl %ebx, x(%edi) ) ;
28479+ DST(movl %ebx, %es:x(%edi)) ;
28480
28481 #define ROUND(x) \
28482+ nop; nop; nop; \
28483 SRC(movl x(%esi), %ebx ) ; \
28484 adcl %ebx, %eax ; \
28485- DST(movl %ebx, x(%edi) ) ;
28486+ DST(movl %ebx, %es:x(%edi)) ;
28487
28488 #define ARGBASE 12
28489-
28490-ENTRY(csum_partial_copy_generic)
28491+
28492+ENTRY(csum_partial_copy_generic_to_user)
28493 CFI_STARTPROC
28494+
28495+#ifdef CONFIG_PAX_MEMORY_UDEREF
28496+ pushl_cfi %gs
28497+ popl_cfi %es
28498+ jmp csum_partial_copy_generic
28499+#endif
28500+
28501+ENTRY(csum_partial_copy_generic_from_user)
28502+
28503+#ifdef CONFIG_PAX_MEMORY_UDEREF
28504+ pushl_cfi %gs
28505+ popl_cfi %ds
28506+#endif
28507+
28508+ENTRY(csum_partial_copy_generic)
28509 pushl_cfi %ebx
28510 CFI_REL_OFFSET ebx, 0
28511 pushl_cfi %edi
28512@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
28513 subl %ebx, %edi
28514 lea -1(%esi),%edx
28515 andl $-32,%edx
28516- lea 3f(%ebx,%ebx), %ebx
28517+ lea 3f(%ebx,%ebx,2), %ebx
28518 testl %esi, %esi
28519 jmp *%ebx
28520 1: addl $64,%esi
28521@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
28522 jb 5f
28523 SRC( movw (%esi), %dx )
28524 leal 2(%esi), %esi
28525-DST( movw %dx, (%edi) )
28526+DST( movw %dx, %es:(%edi) )
28527 leal 2(%edi), %edi
28528 je 6f
28529 shll $16,%edx
28530 5:
28531 SRC( movb (%esi), %dl )
28532-DST( movb %dl, (%edi) )
28533+DST( movb %dl, %es:(%edi) )
28534 6: addl %edx, %eax
28535 adcl $0, %eax
28536 7:
28537 .section .fixup, "ax"
28538 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
28539- movl $-EFAULT, (%ebx)
28540+ movl $-EFAULT, %ss:(%ebx)
28541 # zero the complete destination (computing the rest is too much work)
28542 movl ARGBASE+8(%esp),%edi # dst
28543 movl ARGBASE+12(%esp),%ecx # len
28544@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
28545 rep; stosb
28546 jmp 7b
28547 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
28548- movl $-EFAULT, (%ebx)
28549+ movl $-EFAULT, %ss:(%ebx)
28550 jmp 7b
28551 .previous
28552
28553+#ifdef CONFIG_PAX_MEMORY_UDEREF
28554+ pushl_cfi %ss
28555+ popl_cfi %ds
28556+ pushl_cfi %ss
28557+ popl_cfi %es
28558+#endif
28559+
28560 popl_cfi %esi
28561 CFI_RESTORE esi
28562 popl_cfi %edi
28563@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
28564 CFI_RESTORE ebx
28565 ret
28566 CFI_ENDPROC
28567-ENDPROC(csum_partial_copy_generic)
28568+ENDPROC(csum_partial_copy_generic_to_user)
28569
28570 #undef ROUND
28571 #undef ROUND1
28572diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
28573index f2145cf..cea889d 100644
28574--- a/arch/x86/lib/clear_page_64.S
28575+++ b/arch/x86/lib/clear_page_64.S
28576@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
28577 movl $4096/8,%ecx
28578 xorl %eax,%eax
28579 rep stosq
28580+ pax_force_retaddr
28581 ret
28582 CFI_ENDPROC
28583 ENDPROC(clear_page_c)
28584@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
28585 movl $4096,%ecx
28586 xorl %eax,%eax
28587 rep stosb
28588+ pax_force_retaddr
28589 ret
28590 CFI_ENDPROC
28591 ENDPROC(clear_page_c_e)
28592@@ -43,6 +45,7 @@ ENTRY(clear_page)
28593 leaq 64(%rdi),%rdi
28594 jnz .Lloop
28595 nop
28596+ pax_force_retaddr
28597 ret
28598 CFI_ENDPROC
28599 .Lclear_page_end:
28600@@ -58,7 +61,7 @@ ENDPROC(clear_page)
28601
28602 #include <asm/cpufeature.h>
28603
28604- .section .altinstr_replacement,"ax"
28605+ .section .altinstr_replacement,"a"
28606 1: .byte 0xeb /* jmp <disp8> */
28607 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
28608 2: .byte 0xeb /* jmp <disp8> */
28609diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
28610index 1e572c5..2a162cd 100644
28611--- a/arch/x86/lib/cmpxchg16b_emu.S
28612+++ b/arch/x86/lib/cmpxchg16b_emu.S
28613@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
28614
28615 popf
28616 mov $1, %al
28617+ pax_force_retaddr
28618 ret
28619
28620 not_same:
28621 popf
28622 xor %al,%al
28623+ pax_force_retaddr
28624 ret
28625
28626 CFI_ENDPROC
28627diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
28628index 176cca6..e0d658e 100644
28629--- a/arch/x86/lib/copy_page_64.S
28630+++ b/arch/x86/lib/copy_page_64.S
28631@@ -9,6 +9,7 @@ copy_page_rep:
28632 CFI_STARTPROC
28633 movl $4096/8, %ecx
28634 rep movsq
28635+ pax_force_retaddr
28636 ret
28637 CFI_ENDPROC
28638 ENDPROC(copy_page_rep)
28639@@ -24,8 +25,8 @@ ENTRY(copy_page)
28640 CFI_ADJUST_CFA_OFFSET 2*8
28641 movq %rbx, (%rsp)
28642 CFI_REL_OFFSET rbx, 0
28643- movq %r12, 1*8(%rsp)
28644- CFI_REL_OFFSET r12, 1*8
28645+ movq %r13, 1*8(%rsp)
28646+ CFI_REL_OFFSET r13, 1*8
28647
28648 movl $(4096/64)-5, %ecx
28649 .p2align 4
28650@@ -38,7 +39,7 @@ ENTRY(copy_page)
28651 movq 0x8*4(%rsi), %r9
28652 movq 0x8*5(%rsi), %r10
28653 movq 0x8*6(%rsi), %r11
28654- movq 0x8*7(%rsi), %r12
28655+ movq 0x8*7(%rsi), %r13
28656
28657 prefetcht0 5*64(%rsi)
28658
28659@@ -49,7 +50,7 @@ ENTRY(copy_page)
28660 movq %r9, 0x8*4(%rdi)
28661 movq %r10, 0x8*5(%rdi)
28662 movq %r11, 0x8*6(%rdi)
28663- movq %r12, 0x8*7(%rdi)
28664+ movq %r13, 0x8*7(%rdi)
28665
28666 leaq 64 (%rsi), %rsi
28667 leaq 64 (%rdi), %rdi
28668@@ -68,7 +69,7 @@ ENTRY(copy_page)
28669 movq 0x8*4(%rsi), %r9
28670 movq 0x8*5(%rsi), %r10
28671 movq 0x8*6(%rsi), %r11
28672- movq 0x8*7(%rsi), %r12
28673+ movq 0x8*7(%rsi), %r13
28674
28675 movq %rax, 0x8*0(%rdi)
28676 movq %rbx, 0x8*1(%rdi)
28677@@ -77,7 +78,7 @@ ENTRY(copy_page)
28678 movq %r9, 0x8*4(%rdi)
28679 movq %r10, 0x8*5(%rdi)
28680 movq %r11, 0x8*6(%rdi)
28681- movq %r12, 0x8*7(%rdi)
28682+ movq %r13, 0x8*7(%rdi)
28683
28684 leaq 64(%rdi), %rdi
28685 leaq 64(%rsi), %rsi
28686@@ -85,10 +86,11 @@ ENTRY(copy_page)
28687
28688 movq (%rsp), %rbx
28689 CFI_RESTORE rbx
28690- movq 1*8(%rsp), %r12
28691- CFI_RESTORE r12
28692+ movq 1*8(%rsp), %r13
28693+ CFI_RESTORE r13
28694 addq $2*8, %rsp
28695 CFI_ADJUST_CFA_OFFSET -2*8
28696+ pax_force_retaddr
28697 ret
28698 .Lcopy_page_end:
28699 CFI_ENDPROC
28700@@ -99,7 +101,7 @@ ENDPROC(copy_page)
28701
28702 #include <asm/cpufeature.h>
28703
28704- .section .altinstr_replacement,"ax"
28705+ .section .altinstr_replacement,"a"
28706 1: .byte 0xeb /* jmp <disp8> */
28707 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
28708 2:
28709diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
28710index a30ca15..407412b 100644
28711--- a/arch/x86/lib/copy_user_64.S
28712+++ b/arch/x86/lib/copy_user_64.S
28713@@ -18,31 +18,7 @@
28714 #include <asm/alternative-asm.h>
28715 #include <asm/asm.h>
28716 #include <asm/smap.h>
28717-
28718-/*
28719- * By placing feature2 after feature1 in altinstructions section, we logically
28720- * implement:
28721- * If CPU has feature2, jmp to alt2 is used
28722- * else if CPU has feature1, jmp to alt1 is used
28723- * else jmp to orig is used.
28724- */
28725- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
28726-0:
28727- .byte 0xe9 /* 32bit jump */
28728- .long \orig-1f /* by default jump to orig */
28729-1:
28730- .section .altinstr_replacement,"ax"
28731-2: .byte 0xe9 /* near jump with 32bit immediate */
28732- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
28733-3: .byte 0xe9 /* near jump with 32bit immediate */
28734- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
28735- .previous
28736-
28737- .section .altinstructions,"a"
28738- altinstruction_entry 0b,2b,\feature1,5,5
28739- altinstruction_entry 0b,3b,\feature2,5,5
28740- .previous
28741- .endm
28742+#include <asm/pgtable.h>
28743
28744 .macro ALIGN_DESTINATION
28745 #ifdef FIX_ALIGNMENT
28746@@ -70,52 +46,6 @@
28747 #endif
28748 .endm
28749
28750-/* Standard copy_to_user with segment limit checking */
28751-ENTRY(_copy_to_user)
28752- CFI_STARTPROC
28753- GET_THREAD_INFO(%rax)
28754- movq %rdi,%rcx
28755- addq %rdx,%rcx
28756- jc bad_to_user
28757- cmpq TI_addr_limit(%rax),%rcx
28758- ja bad_to_user
28759- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
28760- copy_user_generic_unrolled,copy_user_generic_string, \
28761- copy_user_enhanced_fast_string
28762- CFI_ENDPROC
28763-ENDPROC(_copy_to_user)
28764-
28765-/* Standard copy_from_user with segment limit checking */
28766-ENTRY(_copy_from_user)
28767- CFI_STARTPROC
28768- GET_THREAD_INFO(%rax)
28769- movq %rsi,%rcx
28770- addq %rdx,%rcx
28771- jc bad_from_user
28772- cmpq TI_addr_limit(%rax),%rcx
28773- ja bad_from_user
28774- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
28775- copy_user_generic_unrolled,copy_user_generic_string, \
28776- copy_user_enhanced_fast_string
28777- CFI_ENDPROC
28778-ENDPROC(_copy_from_user)
28779-
28780- .section .fixup,"ax"
28781- /* must zero dest */
28782-ENTRY(bad_from_user)
28783-bad_from_user:
28784- CFI_STARTPROC
28785- movl %edx,%ecx
28786- xorl %eax,%eax
28787- rep
28788- stosb
28789-bad_to_user:
28790- movl %edx,%eax
28791- ret
28792- CFI_ENDPROC
28793-ENDPROC(bad_from_user)
28794- .previous
28795-
28796 /*
28797 * copy_user_generic_unrolled - memory copy with exception handling.
28798 * This version is for CPUs like P4 that don't have efficient micro
28799@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
28800 */
28801 ENTRY(copy_user_generic_unrolled)
28802 CFI_STARTPROC
28803+ ASM_PAX_OPEN_USERLAND
28804 ASM_STAC
28805 cmpl $8,%edx
28806 jb 20f /* less then 8 bytes, go to byte copy loop */
28807@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
28808 jnz 21b
28809 23: xor %eax,%eax
28810 ASM_CLAC
28811+ ASM_PAX_CLOSE_USERLAND
28812+ pax_force_retaddr
28813 ret
28814
28815 .section .fixup,"ax"
28816@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
28817 */
28818 ENTRY(copy_user_generic_string)
28819 CFI_STARTPROC
28820+ ASM_PAX_OPEN_USERLAND
28821 ASM_STAC
28822 andl %edx,%edx
28823 jz 4f
28824@@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
28825 movsb
28826 4: xorl %eax,%eax
28827 ASM_CLAC
28828+ ASM_PAX_CLOSE_USERLAND
28829+ pax_force_retaddr
28830 ret
28831
28832 .section .fixup,"ax"
28833@@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
28834 */
28835 ENTRY(copy_user_enhanced_fast_string)
28836 CFI_STARTPROC
28837+ ASM_PAX_OPEN_USERLAND
28838 ASM_STAC
28839 andl %edx,%edx
28840 jz 2f
28841@@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
28842 movsb
28843 2: xorl %eax,%eax
28844 ASM_CLAC
28845+ ASM_PAX_CLOSE_USERLAND
28846+ pax_force_retaddr
28847 ret
28848
28849 .section .fixup,"ax"
28850diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
28851index 6a4f43c..c70fb52 100644
28852--- a/arch/x86/lib/copy_user_nocache_64.S
28853+++ b/arch/x86/lib/copy_user_nocache_64.S
28854@@ -8,6 +8,7 @@
28855
28856 #include <linux/linkage.h>
28857 #include <asm/dwarf2.h>
28858+#include <asm/alternative-asm.h>
28859
28860 #define FIX_ALIGNMENT 1
28861
28862@@ -16,6 +17,7 @@
28863 #include <asm/thread_info.h>
28864 #include <asm/asm.h>
28865 #include <asm/smap.h>
28866+#include <asm/pgtable.h>
28867
28868 .macro ALIGN_DESTINATION
28869 #ifdef FIX_ALIGNMENT
28870@@ -49,6 +51,16 @@
28871 */
28872 ENTRY(__copy_user_nocache)
28873 CFI_STARTPROC
28874+
28875+#ifdef CONFIG_PAX_MEMORY_UDEREF
28876+ mov pax_user_shadow_base,%rcx
28877+ cmp %rcx,%rsi
28878+ jae 1f
28879+ add %rcx,%rsi
28880+1:
28881+#endif
28882+
28883+ ASM_PAX_OPEN_USERLAND
28884 ASM_STAC
28885 cmpl $8,%edx
28886 jb 20f /* less then 8 bytes, go to byte copy loop */
28887@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
28888 jnz 21b
28889 23: xorl %eax,%eax
28890 ASM_CLAC
28891+ ASM_PAX_CLOSE_USERLAND
28892 sfence
28893+ pax_force_retaddr
28894 ret
28895
28896 .section .fixup,"ax"
28897diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
28898index 2419d5f..fe52d0e 100644
28899--- a/arch/x86/lib/csum-copy_64.S
28900+++ b/arch/x86/lib/csum-copy_64.S
28901@@ -9,6 +9,7 @@
28902 #include <asm/dwarf2.h>
28903 #include <asm/errno.h>
28904 #include <asm/asm.h>
28905+#include <asm/alternative-asm.h>
28906
28907 /*
28908 * Checksum copy with exception handling.
28909@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
28910 CFI_ADJUST_CFA_OFFSET 7*8
28911 movq %rbx, 2*8(%rsp)
28912 CFI_REL_OFFSET rbx, 2*8
28913- movq %r12, 3*8(%rsp)
28914- CFI_REL_OFFSET r12, 3*8
28915+ movq %r15, 3*8(%rsp)
28916+ CFI_REL_OFFSET r15, 3*8
28917 movq %r14, 4*8(%rsp)
28918 CFI_REL_OFFSET r14, 4*8
28919 movq %r13, 5*8(%rsp)
28920@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
28921 movl %edx, %ecx
28922
28923 xorl %r9d, %r9d
28924- movq %rcx, %r12
28925+ movq %rcx, %r15
28926
28927- shrq $6, %r12
28928+ shrq $6, %r15
28929 jz .Lhandle_tail /* < 64 */
28930
28931 clc
28932
28933 /* main loop. clear in 64 byte blocks */
28934 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
28935- /* r11: temp3, rdx: temp4, r12 loopcnt */
28936+ /* r11: temp3, rdx: temp4, r15 loopcnt */
28937 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
28938 .p2align 4
28939 .Lloop:
28940@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
28941 adcq %r14, %rax
28942 adcq %r13, %rax
28943
28944- decl %r12d
28945+ decl %r15d
28946
28947 dest
28948 movq %rbx, (%rsi)
28949@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
28950 .Lende:
28951 movq 2*8(%rsp), %rbx
28952 CFI_RESTORE rbx
28953- movq 3*8(%rsp), %r12
28954- CFI_RESTORE r12
28955+ movq 3*8(%rsp), %r15
28956+ CFI_RESTORE r15
28957 movq 4*8(%rsp), %r14
28958 CFI_RESTORE r14
28959 movq 5*8(%rsp), %r13
28960@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
28961 CFI_RESTORE rbp
28962 addq $7*8, %rsp
28963 CFI_ADJUST_CFA_OFFSET -7*8
28964+ pax_force_retaddr
28965 ret
28966 CFI_RESTORE_STATE
28967
28968diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
28969index 7609e0e..b449b98 100644
28970--- a/arch/x86/lib/csum-wrappers_64.c
28971+++ b/arch/x86/lib/csum-wrappers_64.c
28972@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
28973 len -= 2;
28974 }
28975 }
28976+ pax_open_userland();
28977 stac();
28978- isum = csum_partial_copy_generic((__force const void *)src,
28979+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
28980 dst, len, isum, errp, NULL);
28981 clac();
28982+ pax_close_userland();
28983 if (unlikely(*errp))
28984 goto out_err;
28985
28986@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
28987 }
28988
28989 *errp = 0;
28990+ pax_open_userland();
28991 stac();
28992- ret = csum_partial_copy_generic(src, (void __force *)dst,
28993+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
28994 len, isum, NULL, errp);
28995 clac();
28996+ pax_close_userland();
28997 return ret;
28998 }
28999 EXPORT_SYMBOL(csum_partial_copy_to_user);
29000diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
29001index a451235..1daa956 100644
29002--- a/arch/x86/lib/getuser.S
29003+++ b/arch/x86/lib/getuser.S
29004@@ -33,17 +33,40 @@
29005 #include <asm/thread_info.h>
29006 #include <asm/asm.h>
29007 #include <asm/smap.h>
29008+#include <asm/segment.h>
29009+#include <asm/pgtable.h>
29010+#include <asm/alternative-asm.h>
29011+
29012+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
29013+#define __copyuser_seg gs;
29014+#else
29015+#define __copyuser_seg
29016+#endif
29017
29018 .text
29019 ENTRY(__get_user_1)
29020 CFI_STARTPROC
29021+
29022+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29023 GET_THREAD_INFO(%_ASM_DX)
29024 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29025 jae bad_get_user
29026 ASM_STAC
29027-1: movzbl (%_ASM_AX),%edx
29028+
29029+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29030+ mov pax_user_shadow_base,%_ASM_DX
29031+ cmp %_ASM_DX,%_ASM_AX
29032+ jae 1234f
29033+ add %_ASM_DX,%_ASM_AX
29034+1234:
29035+#endif
29036+
29037+#endif
29038+
29039+1: __copyuser_seg movzbl (%_ASM_AX),%edx
29040 xor %eax,%eax
29041 ASM_CLAC
29042+ pax_force_retaddr
29043 ret
29044 CFI_ENDPROC
29045 ENDPROC(__get_user_1)
29046@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
29047 ENTRY(__get_user_2)
29048 CFI_STARTPROC
29049 add $1,%_ASM_AX
29050+
29051+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29052 jc bad_get_user
29053 GET_THREAD_INFO(%_ASM_DX)
29054 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29055 jae bad_get_user
29056 ASM_STAC
29057-2: movzwl -1(%_ASM_AX),%edx
29058+
29059+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29060+ mov pax_user_shadow_base,%_ASM_DX
29061+ cmp %_ASM_DX,%_ASM_AX
29062+ jae 1234f
29063+ add %_ASM_DX,%_ASM_AX
29064+1234:
29065+#endif
29066+
29067+#endif
29068+
29069+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
29070 xor %eax,%eax
29071 ASM_CLAC
29072+ pax_force_retaddr
29073 ret
29074 CFI_ENDPROC
29075 ENDPROC(__get_user_2)
29076@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
29077 ENTRY(__get_user_4)
29078 CFI_STARTPROC
29079 add $3,%_ASM_AX
29080+
29081+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29082 jc bad_get_user
29083 GET_THREAD_INFO(%_ASM_DX)
29084 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29085 jae bad_get_user
29086 ASM_STAC
29087-3: movl -3(%_ASM_AX),%edx
29088+
29089+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29090+ mov pax_user_shadow_base,%_ASM_DX
29091+ cmp %_ASM_DX,%_ASM_AX
29092+ jae 1234f
29093+ add %_ASM_DX,%_ASM_AX
29094+1234:
29095+#endif
29096+
29097+#endif
29098+
29099+3: __copyuser_seg movl -3(%_ASM_AX),%edx
29100 xor %eax,%eax
29101 ASM_CLAC
29102+ pax_force_retaddr
29103 ret
29104 CFI_ENDPROC
29105 ENDPROC(__get_user_4)
29106@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
29107 GET_THREAD_INFO(%_ASM_DX)
29108 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29109 jae bad_get_user
29110+
29111+#ifdef CONFIG_PAX_MEMORY_UDEREF
29112+ mov pax_user_shadow_base,%_ASM_DX
29113+ cmp %_ASM_DX,%_ASM_AX
29114+ jae 1234f
29115+ add %_ASM_DX,%_ASM_AX
29116+1234:
29117+#endif
29118+
29119 ASM_STAC
29120 4: movq -7(%_ASM_AX),%rdx
29121 xor %eax,%eax
29122 ASM_CLAC
29123+ pax_force_retaddr
29124 ret
29125 #else
29126 add $7,%_ASM_AX
29127@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
29128 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29129 jae bad_get_user_8
29130 ASM_STAC
29131-4: movl -7(%_ASM_AX),%edx
29132-5: movl -3(%_ASM_AX),%ecx
29133+4: __copyuser_seg movl -7(%_ASM_AX),%edx
29134+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
29135 xor %eax,%eax
29136 ASM_CLAC
29137+ pax_force_retaddr
29138 ret
29139 #endif
29140 CFI_ENDPROC
29141@@ -113,6 +175,7 @@ bad_get_user:
29142 xor %edx,%edx
29143 mov $(-EFAULT),%_ASM_AX
29144 ASM_CLAC
29145+ pax_force_retaddr
29146 ret
29147 CFI_ENDPROC
29148 END(bad_get_user)
29149@@ -124,6 +187,7 @@ bad_get_user_8:
29150 xor %ecx,%ecx
29151 mov $(-EFAULT),%_ASM_AX
29152 ASM_CLAC
29153+ pax_force_retaddr
29154 ret
29155 CFI_ENDPROC
29156 END(bad_get_user_8)
29157diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
29158index 54fcffe..7be149e 100644
29159--- a/arch/x86/lib/insn.c
29160+++ b/arch/x86/lib/insn.c
29161@@ -20,8 +20,10 @@
29162
29163 #ifdef __KERNEL__
29164 #include <linux/string.h>
29165+#include <asm/pgtable_types.h>
29166 #else
29167 #include <string.h>
29168+#define ktla_ktva(addr) addr
29169 #endif
29170 #include <asm/inat.h>
29171 #include <asm/insn.h>
29172@@ -53,8 +55,8 @@
29173 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
29174 {
29175 memset(insn, 0, sizeof(*insn));
29176- insn->kaddr = kaddr;
29177- insn->next_byte = kaddr;
29178+ insn->kaddr = ktla_ktva(kaddr);
29179+ insn->next_byte = ktla_ktva(kaddr);
29180 insn->x86_64 = x86_64 ? 1 : 0;
29181 insn->opnd_bytes = 4;
29182 if (x86_64)
29183diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
29184index 05a95e7..326f2fa 100644
29185--- a/arch/x86/lib/iomap_copy_64.S
29186+++ b/arch/x86/lib/iomap_copy_64.S
29187@@ -17,6 +17,7 @@
29188
29189 #include <linux/linkage.h>
29190 #include <asm/dwarf2.h>
29191+#include <asm/alternative-asm.h>
29192
29193 /*
29194 * override generic version in lib/iomap_copy.c
29195@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
29196 CFI_STARTPROC
29197 movl %edx,%ecx
29198 rep movsd
29199+ pax_force_retaddr
29200 ret
29201 CFI_ENDPROC
29202 ENDPROC(__iowrite32_copy)
29203diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
29204index 56313a3..0db417e 100644
29205--- a/arch/x86/lib/memcpy_64.S
29206+++ b/arch/x86/lib/memcpy_64.S
29207@@ -24,7 +24,7 @@
29208 * This gets patched over the unrolled variant (below) via the
29209 * alternative instructions framework:
29210 */
29211- .section .altinstr_replacement, "ax", @progbits
29212+ .section .altinstr_replacement, "a", @progbits
29213 .Lmemcpy_c:
29214 movq %rdi, %rax
29215 movq %rdx, %rcx
29216@@ -33,6 +33,7 @@
29217 rep movsq
29218 movl %edx, %ecx
29219 rep movsb
29220+ pax_force_retaddr
29221 ret
29222 .Lmemcpy_e:
29223 .previous
29224@@ -44,11 +45,12 @@
29225 * This gets patched over the unrolled variant (below) via the
29226 * alternative instructions framework:
29227 */
29228- .section .altinstr_replacement, "ax", @progbits
29229+ .section .altinstr_replacement, "a", @progbits
29230 .Lmemcpy_c_e:
29231 movq %rdi, %rax
29232 movq %rdx, %rcx
29233 rep movsb
29234+ pax_force_retaddr
29235 ret
29236 .Lmemcpy_e_e:
29237 .previous
29238@@ -136,6 +138,7 @@ ENTRY(memcpy)
29239 movq %r9, 1*8(%rdi)
29240 movq %r10, -2*8(%rdi, %rdx)
29241 movq %r11, -1*8(%rdi, %rdx)
29242+ pax_force_retaddr
29243 retq
29244 .p2align 4
29245 .Lless_16bytes:
29246@@ -148,6 +151,7 @@ ENTRY(memcpy)
29247 movq -1*8(%rsi, %rdx), %r9
29248 movq %r8, 0*8(%rdi)
29249 movq %r9, -1*8(%rdi, %rdx)
29250+ pax_force_retaddr
29251 retq
29252 .p2align 4
29253 .Lless_8bytes:
29254@@ -161,6 +165,7 @@ ENTRY(memcpy)
29255 movl -4(%rsi, %rdx), %r8d
29256 movl %ecx, (%rdi)
29257 movl %r8d, -4(%rdi, %rdx)
29258+ pax_force_retaddr
29259 retq
29260 .p2align 4
29261 .Lless_3bytes:
29262@@ -179,6 +184,7 @@ ENTRY(memcpy)
29263 movb %cl, (%rdi)
29264
29265 .Lend:
29266+ pax_force_retaddr
29267 retq
29268 CFI_ENDPROC
29269 ENDPROC(memcpy)
29270diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
29271index 65268a6..dd1de11 100644
29272--- a/arch/x86/lib/memmove_64.S
29273+++ b/arch/x86/lib/memmove_64.S
29274@@ -202,14 +202,16 @@ ENTRY(memmove)
29275 movb (%rsi), %r11b
29276 movb %r11b, (%rdi)
29277 13:
29278+ pax_force_retaddr
29279 retq
29280 CFI_ENDPROC
29281
29282- .section .altinstr_replacement,"ax"
29283+ .section .altinstr_replacement,"a"
29284 .Lmemmove_begin_forward_efs:
29285 /* Forward moving data. */
29286 movq %rdx, %rcx
29287 rep movsb
29288+ pax_force_retaddr
29289 retq
29290 .Lmemmove_end_forward_efs:
29291 .previous
29292diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
29293index 2dcb380..2eb79fe 100644
29294--- a/arch/x86/lib/memset_64.S
29295+++ b/arch/x86/lib/memset_64.S
29296@@ -16,7 +16,7 @@
29297 *
29298 * rax original destination
29299 */
29300- .section .altinstr_replacement, "ax", @progbits
29301+ .section .altinstr_replacement, "a", @progbits
29302 .Lmemset_c:
29303 movq %rdi,%r9
29304 movq %rdx,%rcx
29305@@ -30,6 +30,7 @@
29306 movl %edx,%ecx
29307 rep stosb
29308 movq %r9,%rax
29309+ pax_force_retaddr
29310 ret
29311 .Lmemset_e:
29312 .previous
29313@@ -45,13 +46,14 @@
29314 *
29315 * rax original destination
29316 */
29317- .section .altinstr_replacement, "ax", @progbits
29318+ .section .altinstr_replacement, "a", @progbits
29319 .Lmemset_c_e:
29320 movq %rdi,%r9
29321 movb %sil,%al
29322 movq %rdx,%rcx
29323 rep stosb
29324 movq %r9,%rax
29325+ pax_force_retaddr
29326 ret
29327 .Lmemset_e_e:
29328 .previous
29329@@ -118,6 +120,7 @@ ENTRY(__memset)
29330
29331 .Lende:
29332 movq %r10,%rax
29333+ pax_force_retaddr
29334 ret
29335
29336 CFI_RESTORE_STATE
29337diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
29338index c9f2d9b..e7fd2c0 100644
29339--- a/arch/x86/lib/mmx_32.c
29340+++ b/arch/x86/lib/mmx_32.c
29341@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
29342 {
29343 void *p;
29344 int i;
29345+ unsigned long cr0;
29346
29347 if (unlikely(in_interrupt()))
29348 return __memcpy(to, from, len);
29349@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
29350 kernel_fpu_begin();
29351
29352 __asm__ __volatile__ (
29353- "1: prefetch (%0)\n" /* This set is 28 bytes */
29354- " prefetch 64(%0)\n"
29355- " prefetch 128(%0)\n"
29356- " prefetch 192(%0)\n"
29357- " prefetch 256(%0)\n"
29358+ "1: prefetch (%1)\n" /* This set is 28 bytes */
29359+ " prefetch 64(%1)\n"
29360+ " prefetch 128(%1)\n"
29361+ " prefetch 192(%1)\n"
29362+ " prefetch 256(%1)\n"
29363 "2: \n"
29364 ".section .fixup, \"ax\"\n"
29365- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29366+ "3: \n"
29367+
29368+#ifdef CONFIG_PAX_KERNEXEC
29369+ " movl %%cr0, %0\n"
29370+ " movl %0, %%eax\n"
29371+ " andl $0xFFFEFFFF, %%eax\n"
29372+ " movl %%eax, %%cr0\n"
29373+#endif
29374+
29375+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29376+
29377+#ifdef CONFIG_PAX_KERNEXEC
29378+ " movl %0, %%cr0\n"
29379+#endif
29380+
29381 " jmp 2b\n"
29382 ".previous\n"
29383 _ASM_EXTABLE(1b, 3b)
29384- : : "r" (from));
29385+ : "=&r" (cr0) : "r" (from) : "ax");
29386
29387 for ( ; i > 5; i--) {
29388 __asm__ __volatile__ (
29389- "1: prefetch 320(%0)\n"
29390- "2: movq (%0), %%mm0\n"
29391- " movq 8(%0), %%mm1\n"
29392- " movq 16(%0), %%mm2\n"
29393- " movq 24(%0), %%mm3\n"
29394- " movq %%mm0, (%1)\n"
29395- " movq %%mm1, 8(%1)\n"
29396- " movq %%mm2, 16(%1)\n"
29397- " movq %%mm3, 24(%1)\n"
29398- " movq 32(%0), %%mm0\n"
29399- " movq 40(%0), %%mm1\n"
29400- " movq 48(%0), %%mm2\n"
29401- " movq 56(%0), %%mm3\n"
29402- " movq %%mm0, 32(%1)\n"
29403- " movq %%mm1, 40(%1)\n"
29404- " movq %%mm2, 48(%1)\n"
29405- " movq %%mm3, 56(%1)\n"
29406+ "1: prefetch 320(%1)\n"
29407+ "2: movq (%1), %%mm0\n"
29408+ " movq 8(%1), %%mm1\n"
29409+ " movq 16(%1), %%mm2\n"
29410+ " movq 24(%1), %%mm3\n"
29411+ " movq %%mm0, (%2)\n"
29412+ " movq %%mm1, 8(%2)\n"
29413+ " movq %%mm2, 16(%2)\n"
29414+ " movq %%mm3, 24(%2)\n"
29415+ " movq 32(%1), %%mm0\n"
29416+ " movq 40(%1), %%mm1\n"
29417+ " movq 48(%1), %%mm2\n"
29418+ " movq 56(%1), %%mm3\n"
29419+ " movq %%mm0, 32(%2)\n"
29420+ " movq %%mm1, 40(%2)\n"
29421+ " movq %%mm2, 48(%2)\n"
29422+ " movq %%mm3, 56(%2)\n"
29423 ".section .fixup, \"ax\"\n"
29424- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29425+ "3:\n"
29426+
29427+#ifdef CONFIG_PAX_KERNEXEC
29428+ " movl %%cr0, %0\n"
29429+ " movl %0, %%eax\n"
29430+ " andl $0xFFFEFFFF, %%eax\n"
29431+ " movl %%eax, %%cr0\n"
29432+#endif
29433+
29434+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29435+
29436+#ifdef CONFIG_PAX_KERNEXEC
29437+ " movl %0, %%cr0\n"
29438+#endif
29439+
29440 " jmp 2b\n"
29441 ".previous\n"
29442 _ASM_EXTABLE(1b, 3b)
29443- : : "r" (from), "r" (to) : "memory");
29444+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29445
29446 from += 64;
29447 to += 64;
29448@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
29449 static void fast_copy_page(void *to, void *from)
29450 {
29451 int i;
29452+ unsigned long cr0;
29453
29454 kernel_fpu_begin();
29455
29456@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
29457 * but that is for later. -AV
29458 */
29459 __asm__ __volatile__(
29460- "1: prefetch (%0)\n"
29461- " prefetch 64(%0)\n"
29462- " prefetch 128(%0)\n"
29463- " prefetch 192(%0)\n"
29464- " prefetch 256(%0)\n"
29465+ "1: prefetch (%1)\n"
29466+ " prefetch 64(%1)\n"
29467+ " prefetch 128(%1)\n"
29468+ " prefetch 192(%1)\n"
29469+ " prefetch 256(%1)\n"
29470 "2: \n"
29471 ".section .fixup, \"ax\"\n"
29472- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29473+ "3: \n"
29474+
29475+#ifdef CONFIG_PAX_KERNEXEC
29476+ " movl %%cr0, %0\n"
29477+ " movl %0, %%eax\n"
29478+ " andl $0xFFFEFFFF, %%eax\n"
29479+ " movl %%eax, %%cr0\n"
29480+#endif
29481+
29482+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29483+
29484+#ifdef CONFIG_PAX_KERNEXEC
29485+ " movl %0, %%cr0\n"
29486+#endif
29487+
29488 " jmp 2b\n"
29489 ".previous\n"
29490- _ASM_EXTABLE(1b, 3b) : : "r" (from));
29491+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
29492
29493 for (i = 0; i < (4096-320)/64; i++) {
29494 __asm__ __volatile__ (
29495- "1: prefetch 320(%0)\n"
29496- "2: movq (%0), %%mm0\n"
29497- " movntq %%mm0, (%1)\n"
29498- " movq 8(%0), %%mm1\n"
29499- " movntq %%mm1, 8(%1)\n"
29500- " movq 16(%0), %%mm2\n"
29501- " movntq %%mm2, 16(%1)\n"
29502- " movq 24(%0), %%mm3\n"
29503- " movntq %%mm3, 24(%1)\n"
29504- " movq 32(%0), %%mm4\n"
29505- " movntq %%mm4, 32(%1)\n"
29506- " movq 40(%0), %%mm5\n"
29507- " movntq %%mm5, 40(%1)\n"
29508- " movq 48(%0), %%mm6\n"
29509- " movntq %%mm6, 48(%1)\n"
29510- " movq 56(%0), %%mm7\n"
29511- " movntq %%mm7, 56(%1)\n"
29512+ "1: prefetch 320(%1)\n"
29513+ "2: movq (%1), %%mm0\n"
29514+ " movntq %%mm0, (%2)\n"
29515+ " movq 8(%1), %%mm1\n"
29516+ " movntq %%mm1, 8(%2)\n"
29517+ " movq 16(%1), %%mm2\n"
29518+ " movntq %%mm2, 16(%2)\n"
29519+ " movq 24(%1), %%mm3\n"
29520+ " movntq %%mm3, 24(%2)\n"
29521+ " movq 32(%1), %%mm4\n"
29522+ " movntq %%mm4, 32(%2)\n"
29523+ " movq 40(%1), %%mm5\n"
29524+ " movntq %%mm5, 40(%2)\n"
29525+ " movq 48(%1), %%mm6\n"
29526+ " movntq %%mm6, 48(%2)\n"
29527+ " movq 56(%1), %%mm7\n"
29528+ " movntq %%mm7, 56(%2)\n"
29529 ".section .fixup, \"ax\"\n"
29530- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29531+ "3:\n"
29532+
29533+#ifdef CONFIG_PAX_KERNEXEC
29534+ " movl %%cr0, %0\n"
29535+ " movl %0, %%eax\n"
29536+ " andl $0xFFFEFFFF, %%eax\n"
29537+ " movl %%eax, %%cr0\n"
29538+#endif
29539+
29540+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29541+
29542+#ifdef CONFIG_PAX_KERNEXEC
29543+ " movl %0, %%cr0\n"
29544+#endif
29545+
29546 " jmp 2b\n"
29547 ".previous\n"
29548- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
29549+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29550
29551 from += 64;
29552 to += 64;
29553@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
29554 static void fast_copy_page(void *to, void *from)
29555 {
29556 int i;
29557+ unsigned long cr0;
29558
29559 kernel_fpu_begin();
29560
29561 __asm__ __volatile__ (
29562- "1: prefetch (%0)\n"
29563- " prefetch 64(%0)\n"
29564- " prefetch 128(%0)\n"
29565- " prefetch 192(%0)\n"
29566- " prefetch 256(%0)\n"
29567+ "1: prefetch (%1)\n"
29568+ " prefetch 64(%1)\n"
29569+ " prefetch 128(%1)\n"
29570+ " prefetch 192(%1)\n"
29571+ " prefetch 256(%1)\n"
29572 "2: \n"
29573 ".section .fixup, \"ax\"\n"
29574- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29575+ "3: \n"
29576+
29577+#ifdef CONFIG_PAX_KERNEXEC
29578+ " movl %%cr0, %0\n"
29579+ " movl %0, %%eax\n"
29580+ " andl $0xFFFEFFFF, %%eax\n"
29581+ " movl %%eax, %%cr0\n"
29582+#endif
29583+
29584+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29585+
29586+#ifdef CONFIG_PAX_KERNEXEC
29587+ " movl %0, %%cr0\n"
29588+#endif
29589+
29590 " jmp 2b\n"
29591 ".previous\n"
29592- _ASM_EXTABLE(1b, 3b) : : "r" (from));
29593+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
29594
29595 for (i = 0; i < 4096/64; i++) {
29596 __asm__ __volatile__ (
29597- "1: prefetch 320(%0)\n"
29598- "2: movq (%0), %%mm0\n"
29599- " movq 8(%0), %%mm1\n"
29600- " movq 16(%0), %%mm2\n"
29601- " movq 24(%0), %%mm3\n"
29602- " movq %%mm0, (%1)\n"
29603- " movq %%mm1, 8(%1)\n"
29604- " movq %%mm2, 16(%1)\n"
29605- " movq %%mm3, 24(%1)\n"
29606- " movq 32(%0), %%mm0\n"
29607- " movq 40(%0), %%mm1\n"
29608- " movq 48(%0), %%mm2\n"
29609- " movq 56(%0), %%mm3\n"
29610- " movq %%mm0, 32(%1)\n"
29611- " movq %%mm1, 40(%1)\n"
29612- " movq %%mm2, 48(%1)\n"
29613- " movq %%mm3, 56(%1)\n"
29614+ "1: prefetch 320(%1)\n"
29615+ "2: movq (%1), %%mm0\n"
29616+ " movq 8(%1), %%mm1\n"
29617+ " movq 16(%1), %%mm2\n"
29618+ " movq 24(%1), %%mm3\n"
29619+ " movq %%mm0, (%2)\n"
29620+ " movq %%mm1, 8(%2)\n"
29621+ " movq %%mm2, 16(%2)\n"
29622+ " movq %%mm3, 24(%2)\n"
29623+ " movq 32(%1), %%mm0\n"
29624+ " movq 40(%1), %%mm1\n"
29625+ " movq 48(%1), %%mm2\n"
29626+ " movq 56(%1), %%mm3\n"
29627+ " movq %%mm0, 32(%2)\n"
29628+ " movq %%mm1, 40(%2)\n"
29629+ " movq %%mm2, 48(%2)\n"
29630+ " movq %%mm3, 56(%2)\n"
29631 ".section .fixup, \"ax\"\n"
29632- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29633+ "3:\n"
29634+
29635+#ifdef CONFIG_PAX_KERNEXEC
29636+ " movl %%cr0, %0\n"
29637+ " movl %0, %%eax\n"
29638+ " andl $0xFFFEFFFF, %%eax\n"
29639+ " movl %%eax, %%cr0\n"
29640+#endif
29641+
29642+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29643+
29644+#ifdef CONFIG_PAX_KERNEXEC
29645+ " movl %0, %%cr0\n"
29646+#endif
29647+
29648 " jmp 2b\n"
29649 ".previous\n"
29650 _ASM_EXTABLE(1b, 3b)
29651- : : "r" (from), "r" (to) : "memory");
29652+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29653
29654 from += 64;
29655 to += 64;
29656diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
29657index f6d13ee..d789440 100644
29658--- a/arch/x86/lib/msr-reg.S
29659+++ b/arch/x86/lib/msr-reg.S
29660@@ -3,6 +3,7 @@
29661 #include <asm/dwarf2.h>
29662 #include <asm/asm.h>
29663 #include <asm/msr.h>
29664+#include <asm/alternative-asm.h>
29665
29666 #ifdef CONFIG_X86_64
29667 /*
29668@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
29669 movl %edi, 28(%r10)
29670 popq_cfi %rbp
29671 popq_cfi %rbx
29672+ pax_force_retaddr
29673 ret
29674 3:
29675 CFI_RESTORE_STATE
29676diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
29677index fc6ba17..d4d989d 100644
29678--- a/arch/x86/lib/putuser.S
29679+++ b/arch/x86/lib/putuser.S
29680@@ -16,7 +16,9 @@
29681 #include <asm/errno.h>
29682 #include <asm/asm.h>
29683 #include <asm/smap.h>
29684-
29685+#include <asm/segment.h>
29686+#include <asm/pgtable.h>
29687+#include <asm/alternative-asm.h>
29688
29689 /*
29690 * __put_user_X
29691@@ -30,57 +32,125 @@
29692 * as they get called from within inline assembly.
29693 */
29694
29695-#define ENTER CFI_STARTPROC ; \
29696- GET_THREAD_INFO(%_ASM_BX)
29697-#define EXIT ASM_CLAC ; \
29698- ret ; \
29699+#define ENTER CFI_STARTPROC
29700+#define EXIT ASM_CLAC ; \
29701+ pax_force_retaddr ; \
29702+ ret ; \
29703 CFI_ENDPROC
29704
29705+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29706+#define _DEST %_ASM_CX,%_ASM_BX
29707+#else
29708+#define _DEST %_ASM_CX
29709+#endif
29710+
29711+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
29712+#define __copyuser_seg gs;
29713+#else
29714+#define __copyuser_seg
29715+#endif
29716+
29717 .text
29718 ENTRY(__put_user_1)
29719 ENTER
29720+
29721+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29722+ GET_THREAD_INFO(%_ASM_BX)
29723 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
29724 jae bad_put_user
29725 ASM_STAC
29726-1: movb %al,(%_ASM_CX)
29727+
29728+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29729+ mov pax_user_shadow_base,%_ASM_BX
29730+ cmp %_ASM_BX,%_ASM_CX
29731+ jb 1234f
29732+ xor %ebx,%ebx
29733+1234:
29734+#endif
29735+
29736+#endif
29737+
29738+1: __copyuser_seg movb %al,(_DEST)
29739 xor %eax,%eax
29740 EXIT
29741 ENDPROC(__put_user_1)
29742
29743 ENTRY(__put_user_2)
29744 ENTER
29745+
29746+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29747+ GET_THREAD_INFO(%_ASM_BX)
29748 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
29749 sub $1,%_ASM_BX
29750 cmp %_ASM_BX,%_ASM_CX
29751 jae bad_put_user
29752 ASM_STAC
29753-2: movw %ax,(%_ASM_CX)
29754+
29755+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29756+ mov pax_user_shadow_base,%_ASM_BX
29757+ cmp %_ASM_BX,%_ASM_CX
29758+ jb 1234f
29759+ xor %ebx,%ebx
29760+1234:
29761+#endif
29762+
29763+#endif
29764+
29765+2: __copyuser_seg movw %ax,(_DEST)
29766 xor %eax,%eax
29767 EXIT
29768 ENDPROC(__put_user_2)
29769
29770 ENTRY(__put_user_4)
29771 ENTER
29772+
29773+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29774+ GET_THREAD_INFO(%_ASM_BX)
29775 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
29776 sub $3,%_ASM_BX
29777 cmp %_ASM_BX,%_ASM_CX
29778 jae bad_put_user
29779 ASM_STAC
29780-3: movl %eax,(%_ASM_CX)
29781+
29782+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29783+ mov pax_user_shadow_base,%_ASM_BX
29784+ cmp %_ASM_BX,%_ASM_CX
29785+ jb 1234f
29786+ xor %ebx,%ebx
29787+1234:
29788+#endif
29789+
29790+#endif
29791+
29792+3: __copyuser_seg movl %eax,(_DEST)
29793 xor %eax,%eax
29794 EXIT
29795 ENDPROC(__put_user_4)
29796
29797 ENTRY(__put_user_8)
29798 ENTER
29799+
29800+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29801+ GET_THREAD_INFO(%_ASM_BX)
29802 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
29803 sub $7,%_ASM_BX
29804 cmp %_ASM_BX,%_ASM_CX
29805 jae bad_put_user
29806 ASM_STAC
29807-4: mov %_ASM_AX,(%_ASM_CX)
29808+
29809+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29810+ mov pax_user_shadow_base,%_ASM_BX
29811+ cmp %_ASM_BX,%_ASM_CX
29812+ jb 1234f
29813+ xor %ebx,%ebx
29814+1234:
29815+#endif
29816+
29817+#endif
29818+
29819+4: __copyuser_seg mov %_ASM_AX,(_DEST)
29820 #ifdef CONFIG_X86_32
29821-5: movl %edx,4(%_ASM_CX)
29822+5: __copyuser_seg movl %edx,4(_DEST)
29823 #endif
29824 xor %eax,%eax
29825 EXIT
29826diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
29827index 1cad221..de671ee 100644
29828--- a/arch/x86/lib/rwlock.S
29829+++ b/arch/x86/lib/rwlock.S
29830@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
29831 FRAME
29832 0: LOCK_PREFIX
29833 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29834+
29835+#ifdef CONFIG_PAX_REFCOUNT
29836+ jno 1234f
29837+ LOCK_PREFIX
29838+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29839+ int $4
29840+1234:
29841+ _ASM_EXTABLE(1234b, 1234b)
29842+#endif
29843+
29844 1: rep; nop
29845 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
29846 jne 1b
29847 LOCK_PREFIX
29848 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29849+
29850+#ifdef CONFIG_PAX_REFCOUNT
29851+ jno 1234f
29852+ LOCK_PREFIX
29853+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29854+ int $4
29855+1234:
29856+ _ASM_EXTABLE(1234b, 1234b)
29857+#endif
29858+
29859 jnz 0b
29860 ENDFRAME
29861+ pax_force_retaddr
29862 ret
29863 CFI_ENDPROC
29864 END(__write_lock_failed)
29865@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
29866 FRAME
29867 0: LOCK_PREFIX
29868 READ_LOCK_SIZE(inc) (%__lock_ptr)
29869+
29870+#ifdef CONFIG_PAX_REFCOUNT
29871+ jno 1234f
29872+ LOCK_PREFIX
29873+ READ_LOCK_SIZE(dec) (%__lock_ptr)
29874+ int $4
29875+1234:
29876+ _ASM_EXTABLE(1234b, 1234b)
29877+#endif
29878+
29879 1: rep; nop
29880 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
29881 js 1b
29882 LOCK_PREFIX
29883 READ_LOCK_SIZE(dec) (%__lock_ptr)
29884+
29885+#ifdef CONFIG_PAX_REFCOUNT
29886+ jno 1234f
29887+ LOCK_PREFIX
29888+ READ_LOCK_SIZE(inc) (%__lock_ptr)
29889+ int $4
29890+1234:
29891+ _ASM_EXTABLE(1234b, 1234b)
29892+#endif
29893+
29894 js 0b
29895 ENDFRAME
29896+ pax_force_retaddr
29897 ret
29898 CFI_ENDPROC
29899 END(__read_lock_failed)
29900diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
29901index 5dff5f0..cadebf4 100644
29902--- a/arch/x86/lib/rwsem.S
29903+++ b/arch/x86/lib/rwsem.S
29904@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
29905 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29906 CFI_RESTORE __ASM_REG(dx)
29907 restore_common_regs
29908+ pax_force_retaddr
29909 ret
29910 CFI_ENDPROC
29911 ENDPROC(call_rwsem_down_read_failed)
29912@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
29913 movq %rax,%rdi
29914 call rwsem_down_write_failed
29915 restore_common_regs
29916+ pax_force_retaddr
29917 ret
29918 CFI_ENDPROC
29919 ENDPROC(call_rwsem_down_write_failed)
29920@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
29921 movq %rax,%rdi
29922 call rwsem_wake
29923 restore_common_regs
29924-1: ret
29925+1: pax_force_retaddr
29926+ ret
29927 CFI_ENDPROC
29928 ENDPROC(call_rwsem_wake)
29929
29930@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
29931 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29932 CFI_RESTORE __ASM_REG(dx)
29933 restore_common_regs
29934+ pax_force_retaddr
29935 ret
29936 CFI_ENDPROC
29937 ENDPROC(call_rwsem_downgrade_wake)
29938diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
29939index a63efd6..8149fbe 100644
29940--- a/arch/x86/lib/thunk_64.S
29941+++ b/arch/x86/lib/thunk_64.S
29942@@ -8,6 +8,7 @@
29943 #include <linux/linkage.h>
29944 #include <asm/dwarf2.h>
29945 #include <asm/calling.h>
29946+#include <asm/alternative-asm.h>
29947
29948 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
29949 .macro THUNK name, func, put_ret_addr_in_rdi=0
29950@@ -15,11 +16,11 @@
29951 \name:
29952 CFI_STARTPROC
29953
29954- /* this one pushes 9 elems, the next one would be %rIP */
29955- SAVE_ARGS
29956+ /* this one pushes 15+1 elems, the next one would be %rIP */
29957+ SAVE_ARGS 8
29958
29959 .if \put_ret_addr_in_rdi
29960- movq_cfi_restore 9*8, rdi
29961+ movq_cfi_restore RIP, rdi
29962 .endif
29963
29964 call \func
29965@@ -38,8 +39,9 @@
29966
29967 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
29968 CFI_STARTPROC
29969- SAVE_ARGS
29970+ SAVE_ARGS 8
29971 restore:
29972- RESTORE_ARGS
29973+ RESTORE_ARGS 1,8
29974+ pax_force_retaddr
29975 ret
29976 CFI_ENDPROC
29977diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
29978index e2f5e21..4b22130 100644
29979--- a/arch/x86/lib/usercopy_32.c
29980+++ b/arch/x86/lib/usercopy_32.c
29981@@ -42,11 +42,13 @@ do { \
29982 int __d0; \
29983 might_fault(); \
29984 __asm__ __volatile__( \
29985+ __COPYUSER_SET_ES \
29986 ASM_STAC "\n" \
29987 "0: rep; stosl\n" \
29988 " movl %2,%0\n" \
29989 "1: rep; stosb\n" \
29990 "2: " ASM_CLAC "\n" \
29991+ __COPYUSER_RESTORE_ES \
29992 ".section .fixup,\"ax\"\n" \
29993 "3: lea 0(%2,%0,4),%0\n" \
29994 " jmp 2b\n" \
29995@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
29996
29997 #ifdef CONFIG_X86_INTEL_USERCOPY
29998 static unsigned long
29999-__copy_user_intel(void __user *to, const void *from, unsigned long size)
30000+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
30001 {
30002 int d0, d1;
30003 __asm__ __volatile__(
30004@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
30005 " .align 2,0x90\n"
30006 "3: movl 0(%4), %%eax\n"
30007 "4: movl 4(%4), %%edx\n"
30008- "5: movl %%eax, 0(%3)\n"
30009- "6: movl %%edx, 4(%3)\n"
30010+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
30011+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
30012 "7: movl 8(%4), %%eax\n"
30013 "8: movl 12(%4),%%edx\n"
30014- "9: movl %%eax, 8(%3)\n"
30015- "10: movl %%edx, 12(%3)\n"
30016+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
30017+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
30018 "11: movl 16(%4), %%eax\n"
30019 "12: movl 20(%4), %%edx\n"
30020- "13: movl %%eax, 16(%3)\n"
30021- "14: movl %%edx, 20(%3)\n"
30022+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
30023+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
30024 "15: movl 24(%4), %%eax\n"
30025 "16: movl 28(%4), %%edx\n"
30026- "17: movl %%eax, 24(%3)\n"
30027- "18: movl %%edx, 28(%3)\n"
30028+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
30029+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
30030 "19: movl 32(%4), %%eax\n"
30031 "20: movl 36(%4), %%edx\n"
30032- "21: movl %%eax, 32(%3)\n"
30033- "22: movl %%edx, 36(%3)\n"
30034+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
30035+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
30036 "23: movl 40(%4), %%eax\n"
30037 "24: movl 44(%4), %%edx\n"
30038- "25: movl %%eax, 40(%3)\n"
30039- "26: movl %%edx, 44(%3)\n"
30040+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
30041+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
30042 "27: movl 48(%4), %%eax\n"
30043 "28: movl 52(%4), %%edx\n"
30044- "29: movl %%eax, 48(%3)\n"
30045- "30: movl %%edx, 52(%3)\n"
30046+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
30047+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
30048 "31: movl 56(%4), %%eax\n"
30049 "32: movl 60(%4), %%edx\n"
30050- "33: movl %%eax, 56(%3)\n"
30051- "34: movl %%edx, 60(%3)\n"
30052+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
30053+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
30054 " addl $-64, %0\n"
30055 " addl $64, %4\n"
30056 " addl $64, %3\n"
30057@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
30058 " shrl $2, %0\n"
30059 " andl $3, %%eax\n"
30060 " cld\n"
30061+ __COPYUSER_SET_ES
30062 "99: rep; movsl\n"
30063 "36: movl %%eax, %0\n"
30064 "37: rep; movsb\n"
30065 "100:\n"
30066+ __COPYUSER_RESTORE_ES
30067+ ".section .fixup,\"ax\"\n"
30068+ "101: lea 0(%%eax,%0,4),%0\n"
30069+ " jmp 100b\n"
30070+ ".previous\n"
30071+ _ASM_EXTABLE(1b,100b)
30072+ _ASM_EXTABLE(2b,100b)
30073+ _ASM_EXTABLE(3b,100b)
30074+ _ASM_EXTABLE(4b,100b)
30075+ _ASM_EXTABLE(5b,100b)
30076+ _ASM_EXTABLE(6b,100b)
30077+ _ASM_EXTABLE(7b,100b)
30078+ _ASM_EXTABLE(8b,100b)
30079+ _ASM_EXTABLE(9b,100b)
30080+ _ASM_EXTABLE(10b,100b)
30081+ _ASM_EXTABLE(11b,100b)
30082+ _ASM_EXTABLE(12b,100b)
30083+ _ASM_EXTABLE(13b,100b)
30084+ _ASM_EXTABLE(14b,100b)
30085+ _ASM_EXTABLE(15b,100b)
30086+ _ASM_EXTABLE(16b,100b)
30087+ _ASM_EXTABLE(17b,100b)
30088+ _ASM_EXTABLE(18b,100b)
30089+ _ASM_EXTABLE(19b,100b)
30090+ _ASM_EXTABLE(20b,100b)
30091+ _ASM_EXTABLE(21b,100b)
30092+ _ASM_EXTABLE(22b,100b)
30093+ _ASM_EXTABLE(23b,100b)
30094+ _ASM_EXTABLE(24b,100b)
30095+ _ASM_EXTABLE(25b,100b)
30096+ _ASM_EXTABLE(26b,100b)
30097+ _ASM_EXTABLE(27b,100b)
30098+ _ASM_EXTABLE(28b,100b)
30099+ _ASM_EXTABLE(29b,100b)
30100+ _ASM_EXTABLE(30b,100b)
30101+ _ASM_EXTABLE(31b,100b)
30102+ _ASM_EXTABLE(32b,100b)
30103+ _ASM_EXTABLE(33b,100b)
30104+ _ASM_EXTABLE(34b,100b)
30105+ _ASM_EXTABLE(35b,100b)
30106+ _ASM_EXTABLE(36b,100b)
30107+ _ASM_EXTABLE(37b,100b)
30108+ _ASM_EXTABLE(99b,101b)
30109+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
30110+ : "1"(to), "2"(from), "0"(size)
30111+ : "eax", "edx", "memory");
30112+ return size;
30113+}
30114+
30115+static unsigned long
30116+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
30117+{
30118+ int d0, d1;
30119+ __asm__ __volatile__(
30120+ " .align 2,0x90\n"
30121+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
30122+ " cmpl $67, %0\n"
30123+ " jbe 3f\n"
30124+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
30125+ " .align 2,0x90\n"
30126+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
30127+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
30128+ "5: movl %%eax, 0(%3)\n"
30129+ "6: movl %%edx, 4(%3)\n"
30130+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
30131+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
30132+ "9: movl %%eax, 8(%3)\n"
30133+ "10: movl %%edx, 12(%3)\n"
30134+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
30135+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
30136+ "13: movl %%eax, 16(%3)\n"
30137+ "14: movl %%edx, 20(%3)\n"
30138+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
30139+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
30140+ "17: movl %%eax, 24(%3)\n"
30141+ "18: movl %%edx, 28(%3)\n"
30142+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
30143+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
30144+ "21: movl %%eax, 32(%3)\n"
30145+ "22: movl %%edx, 36(%3)\n"
30146+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
30147+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
30148+ "25: movl %%eax, 40(%3)\n"
30149+ "26: movl %%edx, 44(%3)\n"
30150+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
30151+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
30152+ "29: movl %%eax, 48(%3)\n"
30153+ "30: movl %%edx, 52(%3)\n"
30154+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
30155+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
30156+ "33: movl %%eax, 56(%3)\n"
30157+ "34: movl %%edx, 60(%3)\n"
30158+ " addl $-64, %0\n"
30159+ " addl $64, %4\n"
30160+ " addl $64, %3\n"
30161+ " cmpl $63, %0\n"
30162+ " ja 1b\n"
30163+ "35: movl %0, %%eax\n"
30164+ " shrl $2, %0\n"
30165+ " andl $3, %%eax\n"
30166+ " cld\n"
30167+ "99: rep; "__copyuser_seg" movsl\n"
30168+ "36: movl %%eax, %0\n"
30169+ "37: rep; "__copyuser_seg" movsb\n"
30170+ "100:\n"
30171 ".section .fixup,\"ax\"\n"
30172 "101: lea 0(%%eax,%0,4),%0\n"
30173 " jmp 100b\n"
30174@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
30175 int d0, d1;
30176 __asm__ __volatile__(
30177 " .align 2,0x90\n"
30178- "0: movl 32(%4), %%eax\n"
30179+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30180 " cmpl $67, %0\n"
30181 " jbe 2f\n"
30182- "1: movl 64(%4), %%eax\n"
30183+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30184 " .align 2,0x90\n"
30185- "2: movl 0(%4), %%eax\n"
30186- "21: movl 4(%4), %%edx\n"
30187+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30188+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30189 " movl %%eax, 0(%3)\n"
30190 " movl %%edx, 4(%3)\n"
30191- "3: movl 8(%4), %%eax\n"
30192- "31: movl 12(%4),%%edx\n"
30193+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30194+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30195 " movl %%eax, 8(%3)\n"
30196 " movl %%edx, 12(%3)\n"
30197- "4: movl 16(%4), %%eax\n"
30198- "41: movl 20(%4), %%edx\n"
30199+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30200+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30201 " movl %%eax, 16(%3)\n"
30202 " movl %%edx, 20(%3)\n"
30203- "10: movl 24(%4), %%eax\n"
30204- "51: movl 28(%4), %%edx\n"
30205+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30206+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30207 " movl %%eax, 24(%3)\n"
30208 " movl %%edx, 28(%3)\n"
30209- "11: movl 32(%4), %%eax\n"
30210- "61: movl 36(%4), %%edx\n"
30211+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30212+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30213 " movl %%eax, 32(%3)\n"
30214 " movl %%edx, 36(%3)\n"
30215- "12: movl 40(%4), %%eax\n"
30216- "71: movl 44(%4), %%edx\n"
30217+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30218+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30219 " movl %%eax, 40(%3)\n"
30220 " movl %%edx, 44(%3)\n"
30221- "13: movl 48(%4), %%eax\n"
30222- "81: movl 52(%4), %%edx\n"
30223+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30224+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30225 " movl %%eax, 48(%3)\n"
30226 " movl %%edx, 52(%3)\n"
30227- "14: movl 56(%4), %%eax\n"
30228- "91: movl 60(%4), %%edx\n"
30229+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30230+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30231 " movl %%eax, 56(%3)\n"
30232 " movl %%edx, 60(%3)\n"
30233 " addl $-64, %0\n"
30234@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
30235 " shrl $2, %0\n"
30236 " andl $3, %%eax\n"
30237 " cld\n"
30238- "6: rep; movsl\n"
30239+ "6: rep; "__copyuser_seg" movsl\n"
30240 " movl %%eax,%0\n"
30241- "7: rep; movsb\n"
30242+ "7: rep; "__copyuser_seg" movsb\n"
30243 "8:\n"
30244 ".section .fixup,\"ax\"\n"
30245 "9: lea 0(%%eax,%0,4),%0\n"
30246@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
30247
30248 __asm__ __volatile__(
30249 " .align 2,0x90\n"
30250- "0: movl 32(%4), %%eax\n"
30251+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30252 " cmpl $67, %0\n"
30253 " jbe 2f\n"
30254- "1: movl 64(%4), %%eax\n"
30255+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30256 " .align 2,0x90\n"
30257- "2: movl 0(%4), %%eax\n"
30258- "21: movl 4(%4), %%edx\n"
30259+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30260+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30261 " movnti %%eax, 0(%3)\n"
30262 " movnti %%edx, 4(%3)\n"
30263- "3: movl 8(%4), %%eax\n"
30264- "31: movl 12(%4),%%edx\n"
30265+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30266+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30267 " movnti %%eax, 8(%3)\n"
30268 " movnti %%edx, 12(%3)\n"
30269- "4: movl 16(%4), %%eax\n"
30270- "41: movl 20(%4), %%edx\n"
30271+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30272+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30273 " movnti %%eax, 16(%3)\n"
30274 " movnti %%edx, 20(%3)\n"
30275- "10: movl 24(%4), %%eax\n"
30276- "51: movl 28(%4), %%edx\n"
30277+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30278+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30279 " movnti %%eax, 24(%3)\n"
30280 " movnti %%edx, 28(%3)\n"
30281- "11: movl 32(%4), %%eax\n"
30282- "61: movl 36(%4), %%edx\n"
30283+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30284+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30285 " movnti %%eax, 32(%3)\n"
30286 " movnti %%edx, 36(%3)\n"
30287- "12: movl 40(%4), %%eax\n"
30288- "71: movl 44(%4), %%edx\n"
30289+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30290+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30291 " movnti %%eax, 40(%3)\n"
30292 " movnti %%edx, 44(%3)\n"
30293- "13: movl 48(%4), %%eax\n"
30294- "81: movl 52(%4), %%edx\n"
30295+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30296+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30297 " movnti %%eax, 48(%3)\n"
30298 " movnti %%edx, 52(%3)\n"
30299- "14: movl 56(%4), %%eax\n"
30300- "91: movl 60(%4), %%edx\n"
30301+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30302+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30303 " movnti %%eax, 56(%3)\n"
30304 " movnti %%edx, 60(%3)\n"
30305 " addl $-64, %0\n"
30306@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
30307 " shrl $2, %0\n"
30308 " andl $3, %%eax\n"
30309 " cld\n"
30310- "6: rep; movsl\n"
30311+ "6: rep; "__copyuser_seg" movsl\n"
30312 " movl %%eax,%0\n"
30313- "7: rep; movsb\n"
30314+ "7: rep; "__copyuser_seg" movsb\n"
30315 "8:\n"
30316 ".section .fixup,\"ax\"\n"
30317 "9: lea 0(%%eax,%0,4),%0\n"
30318@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
30319
30320 __asm__ __volatile__(
30321 " .align 2,0x90\n"
30322- "0: movl 32(%4), %%eax\n"
30323+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30324 " cmpl $67, %0\n"
30325 " jbe 2f\n"
30326- "1: movl 64(%4), %%eax\n"
30327+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30328 " .align 2,0x90\n"
30329- "2: movl 0(%4), %%eax\n"
30330- "21: movl 4(%4), %%edx\n"
30331+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30332+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30333 " movnti %%eax, 0(%3)\n"
30334 " movnti %%edx, 4(%3)\n"
30335- "3: movl 8(%4), %%eax\n"
30336- "31: movl 12(%4),%%edx\n"
30337+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30338+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30339 " movnti %%eax, 8(%3)\n"
30340 " movnti %%edx, 12(%3)\n"
30341- "4: movl 16(%4), %%eax\n"
30342- "41: movl 20(%4), %%edx\n"
30343+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30344+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30345 " movnti %%eax, 16(%3)\n"
30346 " movnti %%edx, 20(%3)\n"
30347- "10: movl 24(%4), %%eax\n"
30348- "51: movl 28(%4), %%edx\n"
30349+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30350+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30351 " movnti %%eax, 24(%3)\n"
30352 " movnti %%edx, 28(%3)\n"
30353- "11: movl 32(%4), %%eax\n"
30354- "61: movl 36(%4), %%edx\n"
30355+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30356+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30357 " movnti %%eax, 32(%3)\n"
30358 " movnti %%edx, 36(%3)\n"
30359- "12: movl 40(%4), %%eax\n"
30360- "71: movl 44(%4), %%edx\n"
30361+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30362+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30363 " movnti %%eax, 40(%3)\n"
30364 " movnti %%edx, 44(%3)\n"
30365- "13: movl 48(%4), %%eax\n"
30366- "81: movl 52(%4), %%edx\n"
30367+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30368+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30369 " movnti %%eax, 48(%3)\n"
30370 " movnti %%edx, 52(%3)\n"
30371- "14: movl 56(%4), %%eax\n"
30372- "91: movl 60(%4), %%edx\n"
30373+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30374+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30375 " movnti %%eax, 56(%3)\n"
30376 " movnti %%edx, 60(%3)\n"
30377 " addl $-64, %0\n"
30378@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
30379 " shrl $2, %0\n"
30380 " andl $3, %%eax\n"
30381 " cld\n"
30382- "6: rep; movsl\n"
30383+ "6: rep; "__copyuser_seg" movsl\n"
30384 " movl %%eax,%0\n"
30385- "7: rep; movsb\n"
30386+ "7: rep; "__copyuser_seg" movsb\n"
30387 "8:\n"
30388 ".section .fixup,\"ax\"\n"
30389 "9: lea 0(%%eax,%0,4),%0\n"
30390@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
30391 */
30392 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
30393 unsigned long size);
30394-unsigned long __copy_user_intel(void __user *to, const void *from,
30395+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
30396+ unsigned long size);
30397+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
30398 unsigned long size);
30399 unsigned long __copy_user_zeroing_intel_nocache(void *to,
30400 const void __user *from, unsigned long size);
30401 #endif /* CONFIG_X86_INTEL_USERCOPY */
30402
30403 /* Generic arbitrary sized copy. */
30404-#define __copy_user(to, from, size) \
30405+#define __copy_user(to, from, size, prefix, set, restore) \
30406 do { \
30407 int __d0, __d1, __d2; \
30408 __asm__ __volatile__( \
30409+ set \
30410 " cmp $7,%0\n" \
30411 " jbe 1f\n" \
30412 " movl %1,%0\n" \
30413 " negl %0\n" \
30414 " andl $7,%0\n" \
30415 " subl %0,%3\n" \
30416- "4: rep; movsb\n" \
30417+ "4: rep; "prefix"movsb\n" \
30418 " movl %3,%0\n" \
30419 " shrl $2,%0\n" \
30420 " andl $3,%3\n" \
30421 " .align 2,0x90\n" \
30422- "0: rep; movsl\n" \
30423+ "0: rep; "prefix"movsl\n" \
30424 " movl %3,%0\n" \
30425- "1: rep; movsb\n" \
30426+ "1: rep; "prefix"movsb\n" \
30427 "2:\n" \
30428+ restore \
30429 ".section .fixup,\"ax\"\n" \
30430 "5: addl %3,%0\n" \
30431 " jmp 2b\n" \
30432@@ -538,14 +650,14 @@ do { \
30433 " negl %0\n" \
30434 " andl $7,%0\n" \
30435 " subl %0,%3\n" \
30436- "4: rep; movsb\n" \
30437+ "4: rep; "__copyuser_seg"movsb\n" \
30438 " movl %3,%0\n" \
30439 " shrl $2,%0\n" \
30440 " andl $3,%3\n" \
30441 " .align 2,0x90\n" \
30442- "0: rep; movsl\n" \
30443+ "0: rep; "__copyuser_seg"movsl\n" \
30444 " movl %3,%0\n" \
30445- "1: rep; movsb\n" \
30446+ "1: rep; "__copyuser_seg"movsb\n" \
30447 "2:\n" \
30448 ".section .fixup,\"ax\"\n" \
30449 "5: addl %3,%0\n" \
30450@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
30451 {
30452 stac();
30453 if (movsl_is_ok(to, from, n))
30454- __copy_user(to, from, n);
30455+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
30456 else
30457- n = __copy_user_intel(to, from, n);
30458+ n = __generic_copy_to_user_intel(to, from, n);
30459 clac();
30460 return n;
30461 }
30462@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
30463 {
30464 stac();
30465 if (movsl_is_ok(to, from, n))
30466- __copy_user(to, from, n);
30467+ __copy_user(to, from, n, __copyuser_seg, "", "");
30468 else
30469- n = __copy_user_intel((void __user *)to,
30470- (const void *)from, n);
30471+ n = __generic_copy_from_user_intel(to, from, n);
30472 clac();
30473 return n;
30474 }
30475@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
30476 if (n > 64 && cpu_has_xmm2)
30477 n = __copy_user_intel_nocache(to, from, n);
30478 else
30479- __copy_user(to, from, n);
30480+ __copy_user(to, from, n, __copyuser_seg, "", "");
30481 #else
30482- __copy_user(to, from, n);
30483+ __copy_user(to, from, n, __copyuser_seg, "", "");
30484 #endif
30485 clac();
30486 return n;
30487 }
30488 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
30489
30490-/**
30491- * copy_to_user: - Copy a block of data into user space.
30492- * @to: Destination address, in user space.
30493- * @from: Source address, in kernel space.
30494- * @n: Number of bytes to copy.
30495- *
30496- * Context: User context only. This function may sleep.
30497- *
30498- * Copy data from kernel space to user space.
30499- *
30500- * Returns number of bytes that could not be copied.
30501- * On success, this will be zero.
30502- */
30503-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
30504+#ifdef CONFIG_PAX_MEMORY_UDEREF
30505+void __set_fs(mm_segment_t x)
30506 {
30507- if (access_ok(VERIFY_WRITE, to, n))
30508- n = __copy_to_user(to, from, n);
30509- return n;
30510+ switch (x.seg) {
30511+ case 0:
30512+ loadsegment(gs, 0);
30513+ break;
30514+ case TASK_SIZE_MAX:
30515+ loadsegment(gs, __USER_DS);
30516+ break;
30517+ case -1UL:
30518+ loadsegment(gs, __KERNEL_DS);
30519+ break;
30520+ default:
30521+ BUG();
30522+ }
30523 }
30524-EXPORT_SYMBOL(_copy_to_user);
30525+EXPORT_SYMBOL(__set_fs);
30526
30527-/**
30528- * copy_from_user: - Copy a block of data from user space.
30529- * @to: Destination address, in kernel space.
30530- * @from: Source address, in user space.
30531- * @n: Number of bytes to copy.
30532- *
30533- * Context: User context only. This function may sleep.
30534- *
30535- * Copy data from user space to kernel space.
30536- *
30537- * Returns number of bytes that could not be copied.
30538- * On success, this will be zero.
30539- *
30540- * If some data could not be copied, this function will pad the copied
30541- * data to the requested size using zero bytes.
30542- */
30543-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
30544+void set_fs(mm_segment_t x)
30545 {
30546- if (access_ok(VERIFY_READ, from, n))
30547- n = __copy_from_user(to, from, n);
30548- else
30549- memset(to, 0, n);
30550- return n;
30551+ current_thread_info()->addr_limit = x;
30552+ __set_fs(x);
30553 }
30554-EXPORT_SYMBOL(_copy_from_user);
30555+EXPORT_SYMBOL(set_fs);
30556+#endif
30557diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
30558index c905e89..01ab928 100644
30559--- a/arch/x86/lib/usercopy_64.c
30560+++ b/arch/x86/lib/usercopy_64.c
30561@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
30562 might_fault();
30563 /* no memory constraint because it doesn't change any memory gcc knows
30564 about */
30565+ pax_open_userland();
30566 stac();
30567 asm volatile(
30568 " testq %[size8],%[size8]\n"
30569@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
30570 _ASM_EXTABLE(0b,3b)
30571 _ASM_EXTABLE(1b,2b)
30572 : [size8] "=&c"(size), [dst] "=&D" (__d0)
30573- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
30574+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
30575 [zero] "r" (0UL), [eight] "r" (8UL));
30576 clac();
30577+ pax_close_userland();
30578 return size;
30579 }
30580 EXPORT_SYMBOL(__clear_user);
30581@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
30582 }
30583 EXPORT_SYMBOL(clear_user);
30584
30585-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
30586+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
30587 {
30588- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
30589- return copy_user_generic((__force void *)to, (__force void *)from, len);
30590- }
30591- return len;
30592+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
30593+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
30594+ return len;
30595 }
30596 EXPORT_SYMBOL(copy_in_user);
30597
30598@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
30599 * it is not necessary to optimize tail handling.
30600 */
30601 __visible unsigned long
30602-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
30603+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
30604 {
30605 char c;
30606 unsigned zero_len;
30607
30608+ clac();
30609+ pax_close_userland();
30610 for (; len; --len, to++) {
30611 if (__get_user_nocheck(c, from++, sizeof(char)))
30612 break;
30613@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
30614 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
30615 if (__put_user_nocheck(c, to++, sizeof(char)))
30616 break;
30617- clac();
30618 return len;
30619 }
30620diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
30621index 6a19ad9..1c48f9a 100644
30622--- a/arch/x86/mm/Makefile
30623+++ b/arch/x86/mm/Makefile
30624@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
30625 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
30626
30627 obj-$(CONFIG_MEMTEST) += memtest.o
30628+
30629+quote:="
30630+obj-$(CONFIG_X86_64) += uderef_64.o
30631+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
30632diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
30633index 903ec1e..c4166b2 100644
30634--- a/arch/x86/mm/extable.c
30635+++ b/arch/x86/mm/extable.c
30636@@ -6,12 +6,24 @@
30637 static inline unsigned long
30638 ex_insn_addr(const struct exception_table_entry *x)
30639 {
30640- return (unsigned long)&x->insn + x->insn;
30641+ unsigned long reloc = 0;
30642+
30643+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30644+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30645+#endif
30646+
30647+ return (unsigned long)&x->insn + x->insn + reloc;
30648 }
30649 static inline unsigned long
30650 ex_fixup_addr(const struct exception_table_entry *x)
30651 {
30652- return (unsigned long)&x->fixup + x->fixup;
30653+ unsigned long reloc = 0;
30654+
30655+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30656+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30657+#endif
30658+
30659+ return (unsigned long)&x->fixup + x->fixup + reloc;
30660 }
30661
30662 int fixup_exception(struct pt_regs *regs)
30663@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
30664 unsigned long new_ip;
30665
30666 #ifdef CONFIG_PNPBIOS
30667- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
30668+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
30669 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
30670 extern u32 pnp_bios_is_utter_crap;
30671 pnp_bios_is_utter_crap = 1;
30672@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
30673 i += 4;
30674 p->fixup -= i;
30675 i += 4;
30676+
30677+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30678+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
30679+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30680+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30681+#endif
30682+
30683 }
30684 }
30685
30686diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
30687index 9d591c8..2e61790 100644
30688--- a/arch/x86/mm/fault.c
30689+++ b/arch/x86/mm/fault.c
30690@@ -14,11 +14,18 @@
30691 #include <linux/hugetlb.h> /* hstate_index_to_shift */
30692 #include <linux/prefetch.h> /* prefetchw */
30693 #include <linux/context_tracking.h> /* exception_enter(), ... */
30694+#include <linux/unistd.h>
30695+#include <linux/compiler.h>
30696
30697 #include <asm/traps.h> /* dotraplinkage, ... */
30698 #include <asm/pgalloc.h> /* pgd_*(), ... */
30699 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
30700 #include <asm/fixmap.h> /* VSYSCALL_START */
30701+#include <asm/tlbflush.h>
30702+
30703+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30704+#include <asm/stacktrace.h>
30705+#endif
30706
30707 #define CREATE_TRACE_POINTS
30708 #include <asm/trace/exceptions.h>
30709@@ -59,7 +66,7 @@ static inline int __kprobes kprobes_fault(struct pt_regs *regs)
30710 int ret = 0;
30711
30712 /* kprobe_running() needs smp_processor_id() */
30713- if (kprobes_built_in() && !user_mode_vm(regs)) {
30714+ if (kprobes_built_in() && !user_mode(regs)) {
30715 preempt_disable();
30716 if (kprobe_running() && kprobe_fault_handler(regs, 14))
30717 ret = 1;
30718@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
30719 return !instr_lo || (instr_lo>>1) == 1;
30720 case 0x00:
30721 /* Prefetch instruction is 0x0F0D or 0x0F18 */
30722- if (probe_kernel_address(instr, opcode))
30723+ if (user_mode(regs)) {
30724+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
30725+ return 0;
30726+ } else if (probe_kernel_address(instr, opcode))
30727 return 0;
30728
30729 *prefetch = (instr_lo == 0xF) &&
30730@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
30731 while (instr < max_instr) {
30732 unsigned char opcode;
30733
30734- if (probe_kernel_address(instr, opcode))
30735+ if (user_mode(regs)) {
30736+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
30737+ break;
30738+ } else if (probe_kernel_address(instr, opcode))
30739 break;
30740
30741 instr++;
30742@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
30743 force_sig_info(si_signo, &info, tsk);
30744 }
30745
30746+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30747+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
30748+#endif
30749+
30750+#ifdef CONFIG_PAX_EMUTRAMP
30751+static int pax_handle_fetch_fault(struct pt_regs *regs);
30752+#endif
30753+
30754+#ifdef CONFIG_PAX_PAGEEXEC
30755+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
30756+{
30757+ pgd_t *pgd;
30758+ pud_t *pud;
30759+ pmd_t *pmd;
30760+
30761+ pgd = pgd_offset(mm, address);
30762+ if (!pgd_present(*pgd))
30763+ return NULL;
30764+ pud = pud_offset(pgd, address);
30765+ if (!pud_present(*pud))
30766+ return NULL;
30767+ pmd = pmd_offset(pud, address);
30768+ if (!pmd_present(*pmd))
30769+ return NULL;
30770+ return pmd;
30771+}
30772+#endif
30773+
30774 DEFINE_SPINLOCK(pgd_lock);
30775 LIST_HEAD(pgd_list);
30776
30777@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
30778 for (address = VMALLOC_START & PMD_MASK;
30779 address >= TASK_SIZE && address < FIXADDR_TOP;
30780 address += PMD_SIZE) {
30781+
30782+#ifdef CONFIG_PAX_PER_CPU_PGD
30783+ unsigned long cpu;
30784+#else
30785 struct page *page;
30786+#endif
30787
30788 spin_lock(&pgd_lock);
30789+
30790+#ifdef CONFIG_PAX_PER_CPU_PGD
30791+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
30792+ pgd_t *pgd = get_cpu_pgd(cpu, user);
30793+ pmd_t *ret;
30794+
30795+ ret = vmalloc_sync_one(pgd, address);
30796+ if (!ret)
30797+ break;
30798+ pgd = get_cpu_pgd(cpu, kernel);
30799+#else
30800 list_for_each_entry(page, &pgd_list, lru) {
30801+ pgd_t *pgd;
30802 spinlock_t *pgt_lock;
30803 pmd_t *ret;
30804
30805@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
30806 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
30807
30808 spin_lock(pgt_lock);
30809- ret = vmalloc_sync_one(page_address(page), address);
30810+ pgd = page_address(page);
30811+#endif
30812+
30813+ ret = vmalloc_sync_one(pgd, address);
30814+
30815+#ifndef CONFIG_PAX_PER_CPU_PGD
30816 spin_unlock(pgt_lock);
30817+#endif
30818
30819 if (!ret)
30820 break;
30821@@ -281,6 +345,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
30822 * an interrupt in the middle of a task switch..
30823 */
30824 pgd_paddr = read_cr3();
30825+
30826+#ifdef CONFIG_PAX_PER_CPU_PGD
30827+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
30828+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
30829+#endif
30830+
30831 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
30832 if (!pmd_k)
30833 return -1;
30834@@ -376,11 +446,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
30835 * happen within a race in page table update. In the later
30836 * case just flush:
30837 */
30838- pgd = pgd_offset(current->active_mm, address);
30839+
30840 pgd_ref = pgd_offset_k(address);
30841 if (pgd_none(*pgd_ref))
30842 return -1;
30843
30844+#ifdef CONFIG_PAX_PER_CPU_PGD
30845+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
30846+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
30847+ if (pgd_none(*pgd)) {
30848+ set_pgd(pgd, *pgd_ref);
30849+ arch_flush_lazy_mmu_mode();
30850+ } else {
30851+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
30852+ }
30853+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
30854+#else
30855+ pgd = pgd_offset(current->active_mm, address);
30856+#endif
30857+
30858 if (pgd_none(*pgd)) {
30859 set_pgd(pgd, *pgd_ref);
30860 arch_flush_lazy_mmu_mode();
30861@@ -546,7 +630,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
30862 static int is_errata100(struct pt_regs *regs, unsigned long address)
30863 {
30864 #ifdef CONFIG_X86_64
30865- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
30866+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
30867 return 1;
30868 #endif
30869 return 0;
30870@@ -573,7 +657,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
30871 }
30872
30873 static const char nx_warning[] = KERN_CRIT
30874-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
30875+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
30876
30877 static void
30878 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30879@@ -582,15 +666,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30880 if (!oops_may_print())
30881 return;
30882
30883- if (error_code & PF_INSTR) {
30884+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
30885 unsigned int level;
30886
30887 pte_t *pte = lookup_address(address, &level);
30888
30889 if (pte && pte_present(*pte) && !pte_exec(*pte))
30890- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
30891+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
30892 }
30893
30894+#ifdef CONFIG_PAX_KERNEXEC
30895+ if (init_mm.start_code <= address && address < init_mm.end_code) {
30896+ if (current->signal->curr_ip)
30897+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
30898+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
30899+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30900+ else
30901+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
30902+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30903+ }
30904+#endif
30905+
30906 printk(KERN_ALERT "BUG: unable to handle kernel ");
30907 if (address < PAGE_SIZE)
30908 printk(KERN_CONT "NULL pointer dereference");
30909@@ -771,6 +867,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
30910 return;
30911 }
30912 #endif
30913+
30914+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30915+ if (pax_is_fetch_fault(regs, error_code, address)) {
30916+
30917+#ifdef CONFIG_PAX_EMUTRAMP
30918+ switch (pax_handle_fetch_fault(regs)) {
30919+ case 2:
30920+ return;
30921+ }
30922+#endif
30923+
30924+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30925+ do_group_exit(SIGKILL);
30926+ }
30927+#endif
30928+
30929 /* Kernel addresses are always protection faults: */
30930 if (address >= TASK_SIZE)
30931 error_code |= PF_PROT;
30932@@ -856,7 +968,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
30933 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
30934 printk(KERN_ERR
30935 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
30936- tsk->comm, tsk->pid, address);
30937+ tsk->comm, task_pid_nr(tsk), address);
30938 code = BUS_MCEERR_AR;
30939 }
30940 #endif
30941@@ -910,6 +1022,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
30942 return 1;
30943 }
30944
30945+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
30946+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
30947+{
30948+ pte_t *pte;
30949+ pmd_t *pmd;
30950+ spinlock_t *ptl;
30951+ unsigned char pte_mask;
30952+
30953+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
30954+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
30955+ return 0;
30956+
30957+ /* PaX: it's our fault, let's handle it if we can */
30958+
30959+ /* PaX: take a look at read faults before acquiring any locks */
30960+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
30961+ /* instruction fetch attempt from a protected page in user mode */
30962+ up_read(&mm->mmap_sem);
30963+
30964+#ifdef CONFIG_PAX_EMUTRAMP
30965+ switch (pax_handle_fetch_fault(regs)) {
30966+ case 2:
30967+ return 1;
30968+ }
30969+#endif
30970+
30971+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30972+ do_group_exit(SIGKILL);
30973+ }
30974+
30975+ pmd = pax_get_pmd(mm, address);
30976+ if (unlikely(!pmd))
30977+ return 0;
30978+
30979+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
30980+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
30981+ pte_unmap_unlock(pte, ptl);
30982+ return 0;
30983+ }
30984+
30985+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
30986+ /* write attempt to a protected page in user mode */
30987+ pte_unmap_unlock(pte, ptl);
30988+ return 0;
30989+ }
30990+
30991+#ifdef CONFIG_SMP
30992+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
30993+#else
30994+ if (likely(address > get_limit(regs->cs)))
30995+#endif
30996+ {
30997+ set_pte(pte, pte_mkread(*pte));
30998+ __flush_tlb_one(address);
30999+ pte_unmap_unlock(pte, ptl);
31000+ up_read(&mm->mmap_sem);
31001+ return 1;
31002+ }
31003+
31004+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
31005+
31006+ /*
31007+ * PaX: fill DTLB with user rights and retry
31008+ */
31009+ __asm__ __volatile__ (
31010+ "orb %2,(%1)\n"
31011+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
31012+/*
31013+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
31014+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
31015+ * page fault when examined during a TLB load attempt. this is true not only
31016+ * for PTEs holding a non-present entry but also present entries that will
31017+ * raise a page fault (such as those set up by PaX, or the copy-on-write
31018+ * mechanism). in effect it means that we do *not* need to flush the TLBs
31019+ * for our target pages since their PTEs are simply not in the TLBs at all.
31020+
31021+ * the best thing in omitting it is that we gain around 15-20% speed in the
31022+ * fast path of the page fault handler and can get rid of tracing since we
31023+ * can no longer flush unintended entries.
31024+ */
31025+ "invlpg (%0)\n"
31026+#endif
31027+ __copyuser_seg"testb $0,(%0)\n"
31028+ "xorb %3,(%1)\n"
31029+ :
31030+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
31031+ : "memory", "cc");
31032+ pte_unmap_unlock(pte, ptl);
31033+ up_read(&mm->mmap_sem);
31034+ return 1;
31035+}
31036+#endif
31037+
31038 /*
31039 * Handle a spurious fault caused by a stale TLB entry.
31040 *
31041@@ -976,6 +1181,9 @@ int show_unhandled_signals = 1;
31042 static inline int
31043 access_error(unsigned long error_code, struct vm_area_struct *vma)
31044 {
31045+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
31046+ return 1;
31047+
31048 if (error_code & PF_WRITE) {
31049 /* write, present and write, not present: */
31050 if (unlikely(!(vma->vm_flags & VM_WRITE)))
31051@@ -1004,7 +1212,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
31052 if (error_code & PF_USER)
31053 return false;
31054
31055- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
31056+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
31057 return false;
31058
31059 return true;
31060@@ -1031,6 +1239,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
31061 /* Get the faulting address: */
31062 address = read_cr2();
31063
31064+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31065+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
31066+ if (!search_exception_tables(regs->ip)) {
31067+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
31068+ bad_area_nosemaphore(regs, error_code, address);
31069+ return;
31070+ }
31071+ if (address < pax_user_shadow_base) {
31072+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
31073+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
31074+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
31075+ } else
31076+ address -= pax_user_shadow_base;
31077+ }
31078+#endif
31079+
31080 /*
31081 * Detect and handle instructions that would cause a page fault for
31082 * both a tracked kernel page and a userspace page.
31083@@ -1110,7 +1334,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
31084 * User-mode registers count as a user access even for any
31085 * potential system fault or CPU buglet:
31086 */
31087- if (user_mode_vm(regs)) {
31088+ if (user_mode(regs)) {
31089 local_irq_enable();
31090 error_code |= PF_USER;
31091 flags |= FAULT_FLAG_USER;
31092@@ -1157,6 +1381,11 @@ retry:
31093 might_sleep();
31094 }
31095
31096+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
31097+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
31098+ return;
31099+#endif
31100+
31101 vma = find_vma(mm, address);
31102 if (unlikely(!vma)) {
31103 bad_area(regs, error_code, address);
31104@@ -1168,18 +1397,24 @@ retry:
31105 bad_area(regs, error_code, address);
31106 return;
31107 }
31108- if (error_code & PF_USER) {
31109- /*
31110- * Accessing the stack below %sp is always a bug.
31111- * The large cushion allows instructions like enter
31112- * and pusha to work. ("enter $65535, $31" pushes
31113- * 32 pointers and then decrements %sp by 65535.)
31114- */
31115- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
31116- bad_area(regs, error_code, address);
31117- return;
31118- }
31119+ /*
31120+ * Accessing the stack below %sp is always a bug.
31121+ * The large cushion allows instructions like enter
31122+ * and pusha to work. ("enter $65535, $31" pushes
31123+ * 32 pointers and then decrements %sp by 65535.)
31124+ */
31125+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
31126+ bad_area(regs, error_code, address);
31127+ return;
31128 }
31129+
31130+#ifdef CONFIG_PAX_SEGMEXEC
31131+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
31132+ bad_area(regs, error_code, address);
31133+ return;
31134+ }
31135+#endif
31136+
31137 if (unlikely(expand_stack(vma, address))) {
31138 bad_area(regs, error_code, address);
31139 return;
31140@@ -1273,3 +1508,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
31141 __do_page_fault(regs, error_code);
31142 exception_exit(prev_state);
31143 }
31144+
31145+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31146+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
31147+{
31148+ struct mm_struct *mm = current->mm;
31149+ unsigned long ip = regs->ip;
31150+
31151+ if (v8086_mode(regs))
31152+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
31153+
31154+#ifdef CONFIG_PAX_PAGEEXEC
31155+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
31156+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
31157+ return true;
31158+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
31159+ return true;
31160+ return false;
31161+ }
31162+#endif
31163+
31164+#ifdef CONFIG_PAX_SEGMEXEC
31165+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
31166+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
31167+ return true;
31168+ return false;
31169+ }
31170+#endif
31171+
31172+ return false;
31173+}
31174+#endif
31175+
31176+#ifdef CONFIG_PAX_EMUTRAMP
31177+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
31178+{
31179+ int err;
31180+
31181+ do { /* PaX: libffi trampoline emulation */
31182+ unsigned char mov, jmp;
31183+ unsigned int addr1, addr2;
31184+
31185+#ifdef CONFIG_X86_64
31186+ if ((regs->ip + 9) >> 32)
31187+ break;
31188+#endif
31189+
31190+ err = get_user(mov, (unsigned char __user *)regs->ip);
31191+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31192+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
31193+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31194+
31195+ if (err)
31196+ break;
31197+
31198+ if (mov == 0xB8 && jmp == 0xE9) {
31199+ regs->ax = addr1;
31200+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
31201+ return 2;
31202+ }
31203+ } while (0);
31204+
31205+ do { /* PaX: gcc trampoline emulation #1 */
31206+ unsigned char mov1, mov2;
31207+ unsigned short jmp;
31208+ unsigned int addr1, addr2;
31209+
31210+#ifdef CONFIG_X86_64
31211+ if ((regs->ip + 11) >> 32)
31212+ break;
31213+#endif
31214+
31215+ err = get_user(mov1, (unsigned char __user *)regs->ip);
31216+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31217+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
31218+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31219+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
31220+
31221+ if (err)
31222+ break;
31223+
31224+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
31225+ regs->cx = addr1;
31226+ regs->ax = addr2;
31227+ regs->ip = addr2;
31228+ return 2;
31229+ }
31230+ } while (0);
31231+
31232+ do { /* PaX: gcc trampoline emulation #2 */
31233+ unsigned char mov, jmp;
31234+ unsigned int addr1, addr2;
31235+
31236+#ifdef CONFIG_X86_64
31237+ if ((regs->ip + 9) >> 32)
31238+ break;
31239+#endif
31240+
31241+ err = get_user(mov, (unsigned char __user *)regs->ip);
31242+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31243+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
31244+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31245+
31246+ if (err)
31247+ break;
31248+
31249+ if (mov == 0xB9 && jmp == 0xE9) {
31250+ regs->cx = addr1;
31251+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
31252+ return 2;
31253+ }
31254+ } while (0);
31255+
31256+ return 1; /* PaX in action */
31257+}
31258+
31259+#ifdef CONFIG_X86_64
31260+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
31261+{
31262+ int err;
31263+
31264+ do { /* PaX: libffi trampoline emulation */
31265+ unsigned short mov1, mov2, jmp1;
31266+ unsigned char stcclc, jmp2;
31267+ unsigned long addr1, addr2;
31268+
31269+ err = get_user(mov1, (unsigned short __user *)regs->ip);
31270+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
31271+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
31272+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
31273+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
31274+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
31275+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
31276+
31277+ if (err)
31278+ break;
31279+
31280+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31281+ regs->r11 = addr1;
31282+ regs->r10 = addr2;
31283+ if (stcclc == 0xF8)
31284+ regs->flags &= ~X86_EFLAGS_CF;
31285+ else
31286+ regs->flags |= X86_EFLAGS_CF;
31287+ regs->ip = addr1;
31288+ return 2;
31289+ }
31290+ } while (0);
31291+
31292+ do { /* PaX: gcc trampoline emulation #1 */
31293+ unsigned short mov1, mov2, jmp1;
31294+ unsigned char jmp2;
31295+ unsigned int addr1;
31296+ unsigned long addr2;
31297+
31298+ err = get_user(mov1, (unsigned short __user *)regs->ip);
31299+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
31300+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
31301+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
31302+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
31303+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
31304+
31305+ if (err)
31306+ break;
31307+
31308+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31309+ regs->r11 = addr1;
31310+ regs->r10 = addr2;
31311+ regs->ip = addr1;
31312+ return 2;
31313+ }
31314+ } while (0);
31315+
31316+ do { /* PaX: gcc trampoline emulation #2 */
31317+ unsigned short mov1, mov2, jmp1;
31318+ unsigned char jmp2;
31319+ unsigned long addr1, addr2;
31320+
31321+ err = get_user(mov1, (unsigned short __user *)regs->ip);
31322+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
31323+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
31324+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
31325+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
31326+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
31327+
31328+ if (err)
31329+ break;
31330+
31331+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31332+ regs->r11 = addr1;
31333+ regs->r10 = addr2;
31334+ regs->ip = addr1;
31335+ return 2;
31336+ }
31337+ } while (0);
31338+
31339+ return 1; /* PaX in action */
31340+}
31341+#endif
31342+
31343+/*
31344+ * PaX: decide what to do with offenders (regs->ip = fault address)
31345+ *
31346+ * returns 1 when task should be killed
31347+ * 2 when gcc trampoline was detected
31348+ */
31349+static int pax_handle_fetch_fault(struct pt_regs *regs)
31350+{
31351+ if (v8086_mode(regs))
31352+ return 1;
31353+
31354+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
31355+ return 1;
31356+
31357+#ifdef CONFIG_X86_32
31358+ return pax_handle_fetch_fault_32(regs);
31359+#else
31360+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
31361+ return pax_handle_fetch_fault_32(regs);
31362+ else
31363+ return pax_handle_fetch_fault_64(regs);
31364+#endif
31365+}
31366+#endif
31367+
31368+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31369+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
31370+{
31371+ long i;
31372+
31373+ printk(KERN_ERR "PAX: bytes at PC: ");
31374+ for (i = 0; i < 20; i++) {
31375+ unsigned char c;
31376+ if (get_user(c, (unsigned char __force_user *)pc+i))
31377+ printk(KERN_CONT "?? ");
31378+ else
31379+ printk(KERN_CONT "%02x ", c);
31380+ }
31381+ printk("\n");
31382+
31383+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
31384+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
31385+ unsigned long c;
31386+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
31387+#ifdef CONFIG_X86_32
31388+ printk(KERN_CONT "???????? ");
31389+#else
31390+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
31391+ printk(KERN_CONT "???????? ???????? ");
31392+ else
31393+ printk(KERN_CONT "???????????????? ");
31394+#endif
31395+ } else {
31396+#ifdef CONFIG_X86_64
31397+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
31398+ printk(KERN_CONT "%08x ", (unsigned int)c);
31399+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
31400+ } else
31401+#endif
31402+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
31403+ }
31404+ }
31405+ printk("\n");
31406+}
31407+#endif
31408+
31409+/**
31410+ * probe_kernel_write(): safely attempt to write to a location
31411+ * @dst: address to write to
31412+ * @src: pointer to the data that shall be written
31413+ * @size: size of the data chunk
31414+ *
31415+ * Safely write to address @dst from the buffer at @src. If a kernel fault
31416+ * happens, handle that and return -EFAULT.
31417+ */
31418+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
31419+{
31420+ long ret;
31421+ mm_segment_t old_fs = get_fs();
31422+
31423+ set_fs(KERNEL_DS);
31424+ pagefault_disable();
31425+ pax_open_kernel();
31426+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
31427+ pax_close_kernel();
31428+ pagefault_enable();
31429+ set_fs(old_fs);
31430+
31431+ return ret ? -EFAULT : 0;
31432+}
31433diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
31434index 0596e8e..9de0b1c 100644
31435--- a/arch/x86/mm/gup.c
31436+++ b/arch/x86/mm/gup.c
31437@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
31438 addr = start;
31439 len = (unsigned long) nr_pages << PAGE_SHIFT;
31440 end = start + len;
31441- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31442+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31443 (void __user *)start, len)))
31444 return 0;
31445
31446@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
31447 goto slow_irqon;
31448 #endif
31449
31450+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31451+ (void __user *)start, len)))
31452+ return 0;
31453+
31454 /*
31455 * XXX: batch / limit 'nr', to avoid large irq off latency
31456 * needs some instrumenting to determine the common sizes used by
31457diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
31458index 4500142..53a363c 100644
31459--- a/arch/x86/mm/highmem_32.c
31460+++ b/arch/x86/mm/highmem_32.c
31461@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
31462 idx = type + KM_TYPE_NR*smp_processor_id();
31463 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
31464 BUG_ON(!pte_none(*(kmap_pte-idx)));
31465+
31466+ pax_open_kernel();
31467 set_pte(kmap_pte-idx, mk_pte(page, prot));
31468+ pax_close_kernel();
31469+
31470 arch_flush_lazy_mmu_mode();
31471
31472 return (void *)vaddr;
31473diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
31474index 9d980d8..6bbfacb 100644
31475--- a/arch/x86/mm/hugetlbpage.c
31476+++ b/arch/x86/mm/hugetlbpage.c
31477@@ -92,23 +92,30 @@ int pmd_huge_support(void)
31478 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
31479 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
31480 unsigned long addr, unsigned long len,
31481- unsigned long pgoff, unsigned long flags)
31482+ unsigned long pgoff, unsigned long flags, unsigned long offset)
31483 {
31484 struct hstate *h = hstate_file(file);
31485 struct vm_unmapped_area_info info;
31486-
31487+
31488 info.flags = 0;
31489 info.length = len;
31490 info.low_limit = TASK_UNMAPPED_BASE;
31491+
31492+#ifdef CONFIG_PAX_RANDMMAP
31493+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
31494+ info.low_limit += current->mm->delta_mmap;
31495+#endif
31496+
31497 info.high_limit = TASK_SIZE;
31498 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
31499 info.align_offset = 0;
31500+ info.threadstack_offset = offset;
31501 return vm_unmapped_area(&info);
31502 }
31503
31504 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
31505 unsigned long addr0, unsigned long len,
31506- unsigned long pgoff, unsigned long flags)
31507+ unsigned long pgoff, unsigned long flags, unsigned long offset)
31508 {
31509 struct hstate *h = hstate_file(file);
31510 struct vm_unmapped_area_info info;
31511@@ -120,6 +127,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
31512 info.high_limit = current->mm->mmap_base;
31513 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
31514 info.align_offset = 0;
31515+ info.threadstack_offset = offset;
31516 addr = vm_unmapped_area(&info);
31517
31518 /*
31519@@ -132,6 +140,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
31520 VM_BUG_ON(addr != -ENOMEM);
31521 info.flags = 0;
31522 info.low_limit = TASK_UNMAPPED_BASE;
31523+
31524+#ifdef CONFIG_PAX_RANDMMAP
31525+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
31526+ info.low_limit += current->mm->delta_mmap;
31527+#endif
31528+
31529 info.high_limit = TASK_SIZE;
31530 addr = vm_unmapped_area(&info);
31531 }
31532@@ -146,10 +160,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
31533 struct hstate *h = hstate_file(file);
31534 struct mm_struct *mm = current->mm;
31535 struct vm_area_struct *vma;
31536+ unsigned long pax_task_size = TASK_SIZE;
31537+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
31538
31539 if (len & ~huge_page_mask(h))
31540 return -EINVAL;
31541- if (len > TASK_SIZE)
31542+
31543+#ifdef CONFIG_PAX_SEGMEXEC
31544+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
31545+ pax_task_size = SEGMEXEC_TASK_SIZE;
31546+#endif
31547+
31548+ pax_task_size -= PAGE_SIZE;
31549+
31550+ if (len > pax_task_size)
31551 return -ENOMEM;
31552
31553 if (flags & MAP_FIXED) {
31554@@ -158,19 +182,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
31555 return addr;
31556 }
31557
31558+#ifdef CONFIG_PAX_RANDMMAP
31559+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31560+#endif
31561+
31562 if (addr) {
31563 addr = ALIGN(addr, huge_page_size(h));
31564 vma = find_vma(mm, addr);
31565- if (TASK_SIZE - len >= addr &&
31566- (!vma || addr + len <= vma->vm_start))
31567+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
31568 return addr;
31569 }
31570 if (mm->get_unmapped_area == arch_get_unmapped_area)
31571 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
31572- pgoff, flags);
31573+ pgoff, flags, offset);
31574 else
31575 return hugetlb_get_unmapped_area_topdown(file, addr, len,
31576- pgoff, flags);
31577+ pgoff, flags, offset);
31578 }
31579
31580 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
31581diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
31582index f971306..e83e0f6 100644
31583--- a/arch/x86/mm/init.c
31584+++ b/arch/x86/mm/init.c
31585@@ -4,6 +4,7 @@
31586 #include <linux/swap.h>
31587 #include <linux/memblock.h>
31588 #include <linux/bootmem.h> /* for max_low_pfn */
31589+#include <linux/tboot.h>
31590
31591 #include <asm/cacheflush.h>
31592 #include <asm/e820.h>
31593@@ -17,6 +18,8 @@
31594 #include <asm/proto.h>
31595 #include <asm/dma.h> /* for MAX_DMA_PFN */
31596 #include <asm/microcode.h>
31597+#include <asm/desc.h>
31598+#include <asm/bios_ebda.h>
31599
31600 #include "mm_internal.h"
31601
31602@@ -563,7 +566,18 @@ void __init init_mem_mapping(void)
31603 early_ioremap_page_table_range_init();
31604 #endif
31605
31606+#ifdef CONFIG_PAX_PER_CPU_PGD
31607+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
31608+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
31609+ KERNEL_PGD_PTRS);
31610+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
31611+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
31612+ KERNEL_PGD_PTRS);
31613+ load_cr3(get_cpu_pgd(0, kernel));
31614+#else
31615 load_cr3(swapper_pg_dir);
31616+#endif
31617+
31618 __flush_tlb_all();
31619
31620 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
31621@@ -579,10 +593,40 @@ void __init init_mem_mapping(void)
31622 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
31623 * mmio resources as well as potential bios/acpi data regions.
31624 */
31625+
31626+#ifdef CONFIG_GRKERNSEC_KMEM
31627+static unsigned int ebda_start __read_only;
31628+static unsigned int ebda_end __read_only;
31629+#endif
31630+
31631 int devmem_is_allowed(unsigned long pagenr)
31632 {
31633- if (pagenr < 256)
31634+#ifdef CONFIG_GRKERNSEC_KMEM
31635+ /* allow BDA */
31636+ if (!pagenr)
31637 return 1;
31638+ /* allow EBDA */
31639+ if (pagenr >= ebda_start && pagenr < ebda_end)
31640+ return 1;
31641+ /* if tboot is in use, allow access to its hardcoded serial log range */
31642+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
31643+ return 1;
31644+#else
31645+ if (!pagenr)
31646+ return 1;
31647+#ifdef CONFIG_VM86
31648+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
31649+ return 1;
31650+#endif
31651+#endif
31652+
31653+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
31654+ return 1;
31655+#ifdef CONFIG_GRKERNSEC_KMEM
31656+ /* throw out everything else below 1MB */
31657+ if (pagenr <= 256)
31658+ return 0;
31659+#endif
31660 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
31661 return 0;
31662 if (!page_is_ram(pagenr))
31663@@ -628,8 +672,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
31664 #endif
31665 }
31666
31667+#ifdef CONFIG_GRKERNSEC_KMEM
31668+static inline void gr_init_ebda(void)
31669+{
31670+ unsigned int ebda_addr;
31671+ unsigned int ebda_size = 0;
31672+
31673+ ebda_addr = get_bios_ebda();
31674+ if (ebda_addr) {
31675+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
31676+ ebda_size <<= 10;
31677+ }
31678+ if (ebda_addr && ebda_size) {
31679+ ebda_start = ebda_addr >> PAGE_SHIFT;
31680+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
31681+ } else {
31682+ ebda_start = 0x9f000 >> PAGE_SHIFT;
31683+ ebda_end = 0xa0000 >> PAGE_SHIFT;
31684+ }
31685+}
31686+#else
31687+static inline void gr_init_ebda(void) { }
31688+#endif
31689+
31690 void free_initmem(void)
31691 {
31692+#ifdef CONFIG_PAX_KERNEXEC
31693+#ifdef CONFIG_X86_32
31694+ /* PaX: limit KERNEL_CS to actual size */
31695+ unsigned long addr, limit;
31696+ struct desc_struct d;
31697+ int cpu;
31698+#else
31699+ pgd_t *pgd;
31700+ pud_t *pud;
31701+ pmd_t *pmd;
31702+ unsigned long addr, end;
31703+#endif
31704+#endif
31705+
31706+ gr_init_ebda();
31707+
31708+#ifdef CONFIG_PAX_KERNEXEC
31709+#ifdef CONFIG_X86_32
31710+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
31711+ limit = (limit - 1UL) >> PAGE_SHIFT;
31712+
31713+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
31714+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
31715+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
31716+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
31717+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
31718+ }
31719+
31720+ /* PaX: make KERNEL_CS read-only */
31721+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
31722+ if (!paravirt_enabled())
31723+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
31724+/*
31725+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
31726+ pgd = pgd_offset_k(addr);
31727+ pud = pud_offset(pgd, addr);
31728+ pmd = pmd_offset(pud, addr);
31729+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
31730+ }
31731+*/
31732+#ifdef CONFIG_X86_PAE
31733+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
31734+/*
31735+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
31736+ pgd = pgd_offset_k(addr);
31737+ pud = pud_offset(pgd, addr);
31738+ pmd = pmd_offset(pud, addr);
31739+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
31740+ }
31741+*/
31742+#endif
31743+
31744+#ifdef CONFIG_MODULES
31745+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
31746+#endif
31747+
31748+#else
31749+ /* PaX: make kernel code/rodata read-only, rest non-executable */
31750+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
31751+ pgd = pgd_offset_k(addr);
31752+ pud = pud_offset(pgd, addr);
31753+ pmd = pmd_offset(pud, addr);
31754+ if (!pmd_present(*pmd))
31755+ continue;
31756+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
31757+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
31758+ else
31759+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
31760+ }
31761+
31762+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
31763+ end = addr + KERNEL_IMAGE_SIZE;
31764+ for (; addr < end; addr += PMD_SIZE) {
31765+ pgd = pgd_offset_k(addr);
31766+ pud = pud_offset(pgd, addr);
31767+ pmd = pmd_offset(pud, addr);
31768+ if (!pmd_present(*pmd))
31769+ continue;
31770+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
31771+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
31772+ }
31773+#endif
31774+
31775+ flush_tlb_all();
31776+#endif
31777+
31778 free_init_pages("unused kernel",
31779 (unsigned long)(&__init_begin),
31780 (unsigned long)(&__init_end));
31781diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
31782index 4287f1f..3b99c71 100644
31783--- a/arch/x86/mm/init_32.c
31784+++ b/arch/x86/mm/init_32.c
31785@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
31786 bool __read_mostly __vmalloc_start_set = false;
31787
31788 /*
31789- * Creates a middle page table and puts a pointer to it in the
31790- * given global directory entry. This only returns the gd entry
31791- * in non-PAE compilation mode, since the middle layer is folded.
31792- */
31793-static pmd_t * __init one_md_table_init(pgd_t *pgd)
31794-{
31795- pud_t *pud;
31796- pmd_t *pmd_table;
31797-
31798-#ifdef CONFIG_X86_PAE
31799- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
31800- pmd_table = (pmd_t *)alloc_low_page();
31801- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
31802- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
31803- pud = pud_offset(pgd, 0);
31804- BUG_ON(pmd_table != pmd_offset(pud, 0));
31805-
31806- return pmd_table;
31807- }
31808-#endif
31809- pud = pud_offset(pgd, 0);
31810- pmd_table = pmd_offset(pud, 0);
31811-
31812- return pmd_table;
31813-}
31814-
31815-/*
31816 * Create a page table and place a pointer to it in a middle page
31817 * directory entry:
31818 */
31819@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
31820 pte_t *page_table = (pte_t *)alloc_low_page();
31821
31822 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
31823+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31824+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
31825+#else
31826 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
31827+#endif
31828 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
31829 }
31830
31831 return pte_offset_kernel(pmd, 0);
31832 }
31833
31834+static pmd_t * __init one_md_table_init(pgd_t *pgd)
31835+{
31836+ pud_t *pud;
31837+ pmd_t *pmd_table;
31838+
31839+ pud = pud_offset(pgd, 0);
31840+ pmd_table = pmd_offset(pud, 0);
31841+
31842+ return pmd_table;
31843+}
31844+
31845 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
31846 {
31847 int pgd_idx = pgd_index(vaddr);
31848@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31849 int pgd_idx, pmd_idx;
31850 unsigned long vaddr;
31851 pgd_t *pgd;
31852+ pud_t *pud;
31853 pmd_t *pmd;
31854 pte_t *pte = NULL;
31855 unsigned long count = page_table_range_init_count(start, end);
31856@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31857 pgd = pgd_base + pgd_idx;
31858
31859 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
31860- pmd = one_md_table_init(pgd);
31861- pmd = pmd + pmd_index(vaddr);
31862+ pud = pud_offset(pgd, vaddr);
31863+ pmd = pmd_offset(pud, vaddr);
31864+
31865+#ifdef CONFIG_X86_PAE
31866+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31867+#endif
31868+
31869 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
31870 pmd++, pmd_idx++) {
31871 pte = page_table_kmap_check(one_page_table_init(pmd),
31872@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31873 }
31874 }
31875
31876-static inline int is_kernel_text(unsigned long addr)
31877+static inline int is_kernel_text(unsigned long start, unsigned long end)
31878 {
31879- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
31880- return 1;
31881- return 0;
31882+ if ((start > ktla_ktva((unsigned long)_etext) ||
31883+ end <= ktla_ktva((unsigned long)_stext)) &&
31884+ (start > ktla_ktva((unsigned long)_einittext) ||
31885+ end <= ktla_ktva((unsigned long)_sinittext)) &&
31886+
31887+#ifdef CONFIG_ACPI_SLEEP
31888+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
31889+#endif
31890+
31891+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
31892+ return 0;
31893+ return 1;
31894 }
31895
31896 /*
31897@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
31898 unsigned long last_map_addr = end;
31899 unsigned long start_pfn, end_pfn;
31900 pgd_t *pgd_base = swapper_pg_dir;
31901- int pgd_idx, pmd_idx, pte_ofs;
31902+ unsigned int pgd_idx, pmd_idx, pte_ofs;
31903 unsigned long pfn;
31904 pgd_t *pgd;
31905+ pud_t *pud;
31906 pmd_t *pmd;
31907 pte_t *pte;
31908 unsigned pages_2m, pages_4k;
31909@@ -291,8 +295,13 @@ repeat:
31910 pfn = start_pfn;
31911 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31912 pgd = pgd_base + pgd_idx;
31913- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
31914- pmd = one_md_table_init(pgd);
31915+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
31916+ pud = pud_offset(pgd, 0);
31917+ pmd = pmd_offset(pud, 0);
31918+
31919+#ifdef CONFIG_X86_PAE
31920+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31921+#endif
31922
31923 if (pfn >= end_pfn)
31924 continue;
31925@@ -304,14 +313,13 @@ repeat:
31926 #endif
31927 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
31928 pmd++, pmd_idx++) {
31929- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
31930+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
31931
31932 /*
31933 * Map with big pages if possible, otherwise
31934 * create normal page tables:
31935 */
31936 if (use_pse) {
31937- unsigned int addr2;
31938 pgprot_t prot = PAGE_KERNEL_LARGE;
31939 /*
31940 * first pass will use the same initial
31941@@ -322,11 +330,7 @@ repeat:
31942 _PAGE_PSE);
31943
31944 pfn &= PMD_MASK >> PAGE_SHIFT;
31945- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
31946- PAGE_OFFSET + PAGE_SIZE-1;
31947-
31948- if (is_kernel_text(addr) ||
31949- is_kernel_text(addr2))
31950+ if (is_kernel_text(address, address + PMD_SIZE))
31951 prot = PAGE_KERNEL_LARGE_EXEC;
31952
31953 pages_2m++;
31954@@ -343,7 +347,7 @@ repeat:
31955 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31956 pte += pte_ofs;
31957 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
31958- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
31959+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
31960 pgprot_t prot = PAGE_KERNEL;
31961 /*
31962 * first pass will use the same initial
31963@@ -351,7 +355,7 @@ repeat:
31964 */
31965 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
31966
31967- if (is_kernel_text(addr))
31968+ if (is_kernel_text(address, address + PAGE_SIZE))
31969 prot = PAGE_KERNEL_EXEC;
31970
31971 pages_4k++;
31972@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
31973
31974 pud = pud_offset(pgd, va);
31975 pmd = pmd_offset(pud, va);
31976- if (!pmd_present(*pmd))
31977+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
31978 break;
31979
31980 /* should not be large page here */
31981@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
31982
31983 static void __init pagetable_init(void)
31984 {
31985- pgd_t *pgd_base = swapper_pg_dir;
31986-
31987- permanent_kmaps_init(pgd_base);
31988+ permanent_kmaps_init(swapper_pg_dir);
31989 }
31990
31991-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31992+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31993 EXPORT_SYMBOL_GPL(__supported_pte_mask);
31994
31995 /* user-defined highmem size */
31996@@ -787,10 +789,10 @@ void __init mem_init(void)
31997 ((unsigned long)&__init_end -
31998 (unsigned long)&__init_begin) >> 10,
31999
32000- (unsigned long)&_etext, (unsigned long)&_edata,
32001- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
32002+ (unsigned long)&_sdata, (unsigned long)&_edata,
32003+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
32004
32005- (unsigned long)&_text, (unsigned long)&_etext,
32006+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
32007 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
32008
32009 /*
32010@@ -880,6 +882,7 @@ void set_kernel_text_rw(void)
32011 if (!kernel_set_to_readonly)
32012 return;
32013
32014+ start = ktla_ktva(start);
32015 pr_debug("Set kernel text: %lx - %lx for read write\n",
32016 start, start+size);
32017
32018@@ -894,6 +897,7 @@ void set_kernel_text_ro(void)
32019 if (!kernel_set_to_readonly)
32020 return;
32021
32022+ start = ktla_ktva(start);
32023 pr_debug("Set kernel text: %lx - %lx for read only\n",
32024 start, start+size);
32025
32026@@ -922,6 +926,7 @@ void mark_rodata_ro(void)
32027 unsigned long start = PFN_ALIGN(_text);
32028 unsigned long size = PFN_ALIGN(_etext) - start;
32029
32030+ start = ktla_ktva(start);
32031 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
32032 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
32033 size >> 10);
32034diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
32035index 104d56a..62ba13f1 100644
32036--- a/arch/x86/mm/init_64.c
32037+++ b/arch/x86/mm/init_64.c
32038@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
32039 * around without checking the pgd every time.
32040 */
32041
32042-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
32043+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
32044 EXPORT_SYMBOL_GPL(__supported_pte_mask);
32045
32046 int force_personality32;
32047@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
32048
32049 for (address = start; address <= end; address += PGDIR_SIZE) {
32050 const pgd_t *pgd_ref = pgd_offset_k(address);
32051+
32052+#ifdef CONFIG_PAX_PER_CPU_PGD
32053+ unsigned long cpu;
32054+#else
32055 struct page *page;
32056+#endif
32057
32058 if (pgd_none(*pgd_ref))
32059 continue;
32060
32061 spin_lock(&pgd_lock);
32062+
32063+#ifdef CONFIG_PAX_PER_CPU_PGD
32064+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32065+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
32066+
32067+ if (pgd_none(*pgd))
32068+ set_pgd(pgd, *pgd_ref);
32069+ else
32070+ BUG_ON(pgd_page_vaddr(*pgd)
32071+ != pgd_page_vaddr(*pgd_ref));
32072+ pgd = pgd_offset_cpu(cpu, kernel, address);
32073+#else
32074 list_for_each_entry(page, &pgd_list, lru) {
32075 pgd_t *pgd;
32076 spinlock_t *pgt_lock;
32077@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
32078 /* the pgt_lock only for Xen */
32079 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
32080 spin_lock(pgt_lock);
32081+#endif
32082
32083 if (pgd_none(*pgd))
32084 set_pgd(pgd, *pgd_ref);
32085@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
32086 BUG_ON(pgd_page_vaddr(*pgd)
32087 != pgd_page_vaddr(*pgd_ref));
32088
32089+#ifndef CONFIG_PAX_PER_CPU_PGD
32090 spin_unlock(pgt_lock);
32091+#endif
32092+
32093 }
32094 spin_unlock(&pgd_lock);
32095 }
32096@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
32097 {
32098 if (pgd_none(*pgd)) {
32099 pud_t *pud = (pud_t *)spp_getpage();
32100- pgd_populate(&init_mm, pgd, pud);
32101+ pgd_populate_kernel(&init_mm, pgd, pud);
32102 if (pud != pud_offset(pgd, 0))
32103 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
32104 pud, pud_offset(pgd, 0));
32105@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
32106 {
32107 if (pud_none(*pud)) {
32108 pmd_t *pmd = (pmd_t *) spp_getpage();
32109- pud_populate(&init_mm, pud, pmd);
32110+ pud_populate_kernel(&init_mm, pud, pmd);
32111 if (pmd != pmd_offset(pud, 0))
32112 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
32113 pmd, pmd_offset(pud, 0));
32114@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
32115 pmd = fill_pmd(pud, vaddr);
32116 pte = fill_pte(pmd, vaddr);
32117
32118+ pax_open_kernel();
32119 set_pte(pte, new_pte);
32120+ pax_close_kernel();
32121
32122 /*
32123 * It's enough to flush this one mapping.
32124@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
32125 pgd = pgd_offset_k((unsigned long)__va(phys));
32126 if (pgd_none(*pgd)) {
32127 pud = (pud_t *) spp_getpage();
32128- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
32129- _PAGE_USER));
32130+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
32131 }
32132 pud = pud_offset(pgd, (unsigned long)__va(phys));
32133 if (pud_none(*pud)) {
32134 pmd = (pmd_t *) spp_getpage();
32135- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
32136- _PAGE_USER));
32137+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
32138 }
32139 pmd = pmd_offset(pud, phys);
32140 BUG_ON(!pmd_none(*pmd));
32141@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
32142 prot);
32143
32144 spin_lock(&init_mm.page_table_lock);
32145- pud_populate(&init_mm, pud, pmd);
32146+ pud_populate_kernel(&init_mm, pud, pmd);
32147 spin_unlock(&init_mm.page_table_lock);
32148 }
32149 __flush_tlb_all();
32150@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
32151 page_size_mask);
32152
32153 spin_lock(&init_mm.page_table_lock);
32154- pgd_populate(&init_mm, pgd, pud);
32155+ pgd_populate_kernel(&init_mm, pgd, pud);
32156 spin_unlock(&init_mm.page_table_lock);
32157 pgd_changed = true;
32158 }
32159@@ -1188,8 +1209,8 @@ int kern_addr_valid(unsigned long addr)
32160 static struct vm_area_struct gate_vma = {
32161 .vm_start = VSYSCALL_START,
32162 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
32163- .vm_page_prot = PAGE_READONLY_EXEC,
32164- .vm_flags = VM_READ | VM_EXEC
32165+ .vm_page_prot = PAGE_READONLY,
32166+ .vm_flags = VM_READ
32167 };
32168
32169 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
32170@@ -1223,7 +1244,7 @@ int in_gate_area_no_mm(unsigned long addr)
32171
32172 const char *arch_vma_name(struct vm_area_struct *vma)
32173 {
32174- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
32175+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
32176 return "[vdso]";
32177 if (vma == &gate_vma)
32178 return "[vsyscall]";
32179diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
32180index 7b179b4..6bd17777 100644
32181--- a/arch/x86/mm/iomap_32.c
32182+++ b/arch/x86/mm/iomap_32.c
32183@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
32184 type = kmap_atomic_idx_push();
32185 idx = type + KM_TYPE_NR * smp_processor_id();
32186 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32187+
32188+ pax_open_kernel();
32189 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
32190+ pax_close_kernel();
32191+
32192 arch_flush_lazy_mmu_mode();
32193
32194 return (void *)vaddr;
32195diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
32196index 799580c..72f9fe0 100644
32197--- a/arch/x86/mm/ioremap.c
32198+++ b/arch/x86/mm/ioremap.c
32199@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
32200 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
32201 int is_ram = page_is_ram(pfn);
32202
32203- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
32204+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
32205 return NULL;
32206 WARN_ON_ONCE(is_ram);
32207 }
32208@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
32209 *
32210 * Caller must ensure there is only one unmapping for the same pointer.
32211 */
32212-void iounmap(volatile void __iomem *addr)
32213+void iounmap(const volatile void __iomem *addr)
32214 {
32215 struct vm_struct *p, *o;
32216
32217@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
32218
32219 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
32220 if (page_is_ram(start >> PAGE_SHIFT))
32221+#ifdef CONFIG_HIGHMEM
32222+ if ((start >> PAGE_SHIFT) < max_low_pfn)
32223+#endif
32224 return __va(phys);
32225
32226 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
32227@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
32228 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
32229 {
32230 if (page_is_ram(phys >> PAGE_SHIFT))
32231+#ifdef CONFIG_HIGHMEM
32232+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
32233+#endif
32234 return;
32235
32236 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
32237@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
32238 early_param("early_ioremap_debug", early_ioremap_debug_setup);
32239
32240 static __initdata int after_paging_init;
32241-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
32242+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
32243
32244 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
32245 {
32246@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
32247 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
32248
32249 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
32250- memset(bm_pte, 0, sizeof(bm_pte));
32251- pmd_populate_kernel(&init_mm, pmd, bm_pte);
32252+ pmd_populate_user(&init_mm, pmd, bm_pte);
32253
32254 /*
32255 * The boot-ioremap range spans multiple pmds, for which
32256diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
32257index d87dd6d..bf3fa66 100644
32258--- a/arch/x86/mm/kmemcheck/kmemcheck.c
32259+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
32260@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
32261 * memory (e.g. tracked pages)? For now, we need this to avoid
32262 * invoking kmemcheck for PnP BIOS calls.
32263 */
32264- if (regs->flags & X86_VM_MASK)
32265+ if (v8086_mode(regs))
32266 return false;
32267- if (regs->cs != __KERNEL_CS)
32268+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
32269 return false;
32270
32271 pte = kmemcheck_pte_lookup(address);
32272diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
32273index 25e7e13..1964579 100644
32274--- a/arch/x86/mm/mmap.c
32275+++ b/arch/x86/mm/mmap.c
32276@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
32277 * Leave an at least ~128 MB hole with possible stack randomization.
32278 */
32279 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
32280-#define MAX_GAP (TASK_SIZE/6*5)
32281+#define MAX_GAP (pax_task_size/6*5)
32282
32283 static int mmap_is_legacy(void)
32284 {
32285@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
32286 return rnd << PAGE_SHIFT;
32287 }
32288
32289-static unsigned long mmap_base(void)
32290+static unsigned long mmap_base(struct mm_struct *mm)
32291 {
32292 unsigned long gap = rlimit(RLIMIT_STACK);
32293+ unsigned long pax_task_size = TASK_SIZE;
32294+
32295+#ifdef CONFIG_PAX_SEGMEXEC
32296+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32297+ pax_task_size = SEGMEXEC_TASK_SIZE;
32298+#endif
32299
32300 if (gap < MIN_GAP)
32301 gap = MIN_GAP;
32302 else if (gap > MAX_GAP)
32303 gap = MAX_GAP;
32304
32305- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
32306+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
32307 }
32308
32309 /*
32310 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
32311 * does, but not when emulating X86_32
32312 */
32313-static unsigned long mmap_legacy_base(void)
32314+static unsigned long mmap_legacy_base(struct mm_struct *mm)
32315 {
32316- if (mmap_is_ia32())
32317+ if (mmap_is_ia32()) {
32318+
32319+#ifdef CONFIG_PAX_SEGMEXEC
32320+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32321+ return SEGMEXEC_TASK_UNMAPPED_BASE;
32322+ else
32323+#endif
32324+
32325 return TASK_UNMAPPED_BASE;
32326- else
32327+ } else
32328 return TASK_UNMAPPED_BASE + mmap_rnd();
32329 }
32330
32331@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
32332 */
32333 void arch_pick_mmap_layout(struct mm_struct *mm)
32334 {
32335- mm->mmap_legacy_base = mmap_legacy_base();
32336- mm->mmap_base = mmap_base();
32337+ mm->mmap_legacy_base = mmap_legacy_base(mm);
32338+ mm->mmap_base = mmap_base(mm);
32339+
32340+#ifdef CONFIG_PAX_RANDMMAP
32341+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
32342+ mm->mmap_legacy_base += mm->delta_mmap;
32343+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
32344+ }
32345+#endif
32346
32347 if (mmap_is_legacy()) {
32348 mm->mmap_base = mm->mmap_legacy_base;
32349diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
32350index 0057a7a..95c7edd 100644
32351--- a/arch/x86/mm/mmio-mod.c
32352+++ b/arch/x86/mm/mmio-mod.c
32353@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
32354 break;
32355 default:
32356 {
32357- unsigned char *ip = (unsigned char *)instptr;
32358+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
32359 my_trace->opcode = MMIO_UNKNOWN_OP;
32360 my_trace->width = 0;
32361 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
32362@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
32363 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
32364 void __iomem *addr)
32365 {
32366- static atomic_t next_id;
32367+ static atomic_unchecked_t next_id;
32368 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
32369 /* These are page-unaligned. */
32370 struct mmiotrace_map map = {
32371@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
32372 .private = trace
32373 },
32374 .phys = offset,
32375- .id = atomic_inc_return(&next_id)
32376+ .id = atomic_inc_return_unchecked(&next_id)
32377 };
32378 map.map_id = trace->id;
32379
32380@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
32381 ioremap_trace_core(offset, size, addr);
32382 }
32383
32384-static void iounmap_trace_core(volatile void __iomem *addr)
32385+static void iounmap_trace_core(const volatile void __iomem *addr)
32386 {
32387 struct mmiotrace_map map = {
32388 .phys = 0,
32389@@ -328,7 +328,7 @@ not_enabled:
32390 }
32391 }
32392
32393-void mmiotrace_iounmap(volatile void __iomem *addr)
32394+void mmiotrace_iounmap(const volatile void __iomem *addr)
32395 {
32396 might_sleep();
32397 if (is_enabled()) /* recheck and proper locking in *_core() */
32398diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
32399index 24aec58..c39fe8b 100644
32400--- a/arch/x86/mm/numa.c
32401+++ b/arch/x86/mm/numa.c
32402@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
32403 return true;
32404 }
32405
32406-static int __init numa_register_memblks(struct numa_meminfo *mi)
32407+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
32408 {
32409 unsigned long uninitialized_var(pfn_align);
32410 int i, nid;
32411diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
32412index d0b1773..4c3327c 100644
32413--- a/arch/x86/mm/pageattr-test.c
32414+++ b/arch/x86/mm/pageattr-test.c
32415@@ -36,7 +36,7 @@ enum {
32416
32417 static int pte_testbit(pte_t pte)
32418 {
32419- return pte_flags(pte) & _PAGE_UNUSED1;
32420+ return pte_flags(pte) & _PAGE_CPA_TEST;
32421 }
32422
32423 struct split_state {
32424diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
32425index bb32480..75f2f5e 100644
32426--- a/arch/x86/mm/pageattr.c
32427+++ b/arch/x86/mm/pageattr.c
32428@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32429 */
32430 #ifdef CONFIG_PCI_BIOS
32431 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
32432- pgprot_val(forbidden) |= _PAGE_NX;
32433+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32434 #endif
32435
32436 /*
32437@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32438 * Does not cover __inittext since that is gone later on. On
32439 * 64bit we do not enforce !NX on the low mapping
32440 */
32441- if (within(address, (unsigned long)_text, (unsigned long)_etext))
32442- pgprot_val(forbidden) |= _PAGE_NX;
32443+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
32444+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32445
32446+#ifdef CONFIG_DEBUG_RODATA
32447 /*
32448 * The .rodata section needs to be read-only. Using the pfn
32449 * catches all aliases.
32450@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32451 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
32452 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
32453 pgprot_val(forbidden) |= _PAGE_RW;
32454+#endif
32455
32456 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
32457 /*
32458@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32459 }
32460 #endif
32461
32462+#ifdef CONFIG_PAX_KERNEXEC
32463+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
32464+ pgprot_val(forbidden) |= _PAGE_RW;
32465+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32466+ }
32467+#endif
32468+
32469 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
32470
32471 return prot;
32472@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
32473 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
32474 {
32475 /* change init_mm */
32476+ pax_open_kernel();
32477 set_pte_atomic(kpte, pte);
32478+
32479 #ifdef CONFIG_X86_32
32480 if (!SHARED_KERNEL_PMD) {
32481+
32482+#ifdef CONFIG_PAX_PER_CPU_PGD
32483+ unsigned long cpu;
32484+#else
32485 struct page *page;
32486+#endif
32487
32488+#ifdef CONFIG_PAX_PER_CPU_PGD
32489+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32490+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
32491+#else
32492 list_for_each_entry(page, &pgd_list, lru) {
32493- pgd_t *pgd;
32494+ pgd_t *pgd = (pgd_t *)page_address(page);
32495+#endif
32496+
32497 pud_t *pud;
32498 pmd_t *pmd;
32499
32500- pgd = (pgd_t *)page_address(page) + pgd_index(address);
32501+ pgd += pgd_index(address);
32502 pud = pud_offset(pgd, address);
32503 pmd = pmd_offset(pud, address);
32504 set_pte_atomic((pte_t *)pmd, pte);
32505 }
32506 }
32507 #endif
32508+ pax_close_kernel();
32509 }
32510
32511 static int
32512diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
32513index 6574388..87e9bef 100644
32514--- a/arch/x86/mm/pat.c
32515+++ b/arch/x86/mm/pat.c
32516@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
32517
32518 if (!entry) {
32519 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
32520- current->comm, current->pid, start, end - 1);
32521+ current->comm, task_pid_nr(current), start, end - 1);
32522 return -EINVAL;
32523 }
32524
32525@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32526
32527 while (cursor < to) {
32528 if (!devmem_is_allowed(pfn)) {
32529- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
32530- current->comm, from, to - 1);
32531+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
32532+ current->comm, from, to - 1, cursor);
32533 return 0;
32534 }
32535 cursor += PAGE_SIZE;
32536@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
32537 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
32538 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
32539 "for [mem %#010Lx-%#010Lx]\n",
32540- current->comm, current->pid,
32541+ current->comm, task_pid_nr(current),
32542 cattr_name(flags),
32543 base, (unsigned long long)(base + size-1));
32544 return -EINVAL;
32545@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
32546 flags = lookup_memtype(paddr);
32547 if (want_flags != flags) {
32548 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
32549- current->comm, current->pid,
32550+ current->comm, task_pid_nr(current),
32551 cattr_name(want_flags),
32552 (unsigned long long)paddr,
32553 (unsigned long long)(paddr + size - 1),
32554@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
32555 free_memtype(paddr, paddr + size);
32556 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
32557 " for [mem %#010Lx-%#010Lx], got %s\n",
32558- current->comm, current->pid,
32559+ current->comm, task_pid_nr(current),
32560 cattr_name(want_flags),
32561 (unsigned long long)paddr,
32562 (unsigned long long)(paddr + size - 1),
32563diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
32564index 415f6c4..d319983 100644
32565--- a/arch/x86/mm/pat_rbtree.c
32566+++ b/arch/x86/mm/pat_rbtree.c
32567@@ -160,7 +160,7 @@ success:
32568
32569 failure:
32570 printk(KERN_INFO "%s:%d conflicting memory types "
32571- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
32572+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
32573 end, cattr_name(found_type), cattr_name(match->type));
32574 return -EBUSY;
32575 }
32576diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
32577index 9f0614d..92ae64a 100644
32578--- a/arch/x86/mm/pf_in.c
32579+++ b/arch/x86/mm/pf_in.c
32580@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
32581 int i;
32582 enum reason_type rv = OTHERS;
32583
32584- p = (unsigned char *)ins_addr;
32585+ p = (unsigned char *)ktla_ktva(ins_addr);
32586 p += skip_prefix(p, &prf);
32587 p += get_opcode(p, &opcode);
32588
32589@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
32590 struct prefix_bits prf;
32591 int i;
32592
32593- p = (unsigned char *)ins_addr;
32594+ p = (unsigned char *)ktla_ktva(ins_addr);
32595 p += skip_prefix(p, &prf);
32596 p += get_opcode(p, &opcode);
32597
32598@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
32599 struct prefix_bits prf;
32600 int i;
32601
32602- p = (unsigned char *)ins_addr;
32603+ p = (unsigned char *)ktla_ktva(ins_addr);
32604 p += skip_prefix(p, &prf);
32605 p += get_opcode(p, &opcode);
32606
32607@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
32608 struct prefix_bits prf;
32609 int i;
32610
32611- p = (unsigned char *)ins_addr;
32612+ p = (unsigned char *)ktla_ktva(ins_addr);
32613 p += skip_prefix(p, &prf);
32614 p += get_opcode(p, &opcode);
32615 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
32616@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
32617 struct prefix_bits prf;
32618 int i;
32619
32620- p = (unsigned char *)ins_addr;
32621+ p = (unsigned char *)ktla_ktva(ins_addr);
32622 p += skip_prefix(p, &prf);
32623 p += get_opcode(p, &opcode);
32624 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
32625diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
32626index c96314a..433b127 100644
32627--- a/arch/x86/mm/pgtable.c
32628+++ b/arch/x86/mm/pgtable.c
32629@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
32630 list_del(&page->lru);
32631 }
32632
32633-#define UNSHARED_PTRS_PER_PGD \
32634- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
32635+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32636+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
32637
32638+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
32639+{
32640+ unsigned int count = USER_PGD_PTRS;
32641
32642+ if (!pax_user_shadow_base)
32643+ return;
32644+
32645+ while (count--)
32646+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
32647+}
32648+#endif
32649+
32650+#ifdef CONFIG_PAX_PER_CPU_PGD
32651+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
32652+{
32653+ unsigned int count = USER_PGD_PTRS;
32654+
32655+ while (count--) {
32656+ pgd_t pgd;
32657+
32658+#ifdef CONFIG_X86_64
32659+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
32660+#else
32661+ pgd = *src++;
32662+#endif
32663+
32664+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32665+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
32666+#endif
32667+
32668+ *dst++ = pgd;
32669+ }
32670+
32671+}
32672+#endif
32673+
32674+#ifdef CONFIG_X86_64
32675+#define pxd_t pud_t
32676+#define pyd_t pgd_t
32677+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
32678+#define pgtable_pxd_page_ctor(page) true
32679+#define pgtable_pxd_page_dtor(page)
32680+#define pxd_free(mm, pud) pud_free((mm), (pud))
32681+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
32682+#define pyd_offset(mm, address) pgd_offset((mm), (address))
32683+#define PYD_SIZE PGDIR_SIZE
32684+#else
32685+#define pxd_t pmd_t
32686+#define pyd_t pud_t
32687+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
32688+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
32689+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
32690+#define pxd_free(mm, pud) pmd_free((mm), (pud))
32691+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
32692+#define pyd_offset(mm, address) pud_offset((mm), (address))
32693+#define PYD_SIZE PUD_SIZE
32694+#endif
32695+
32696+#ifdef CONFIG_PAX_PER_CPU_PGD
32697+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
32698+static inline void pgd_dtor(pgd_t *pgd) {}
32699+#else
32700 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
32701 {
32702 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
32703@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
32704 pgd_list_del(pgd);
32705 spin_unlock(&pgd_lock);
32706 }
32707+#endif
32708
32709 /*
32710 * List of all pgd's needed for non-PAE so it can invalidate entries
32711@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
32712 * -- nyc
32713 */
32714
32715-#ifdef CONFIG_X86_PAE
32716+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
32717 /*
32718 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
32719 * updating the top-level pagetable entries to guarantee the
32720@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
32721 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
32722 * and initialize the kernel pmds here.
32723 */
32724-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
32725+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
32726
32727 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
32728 {
32729@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
32730 */
32731 flush_tlb_mm(mm);
32732 }
32733+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
32734+#define PREALLOCATED_PXDS USER_PGD_PTRS
32735 #else /* !CONFIG_X86_PAE */
32736
32737 /* No need to prepopulate any pagetable entries in non-PAE modes. */
32738-#define PREALLOCATED_PMDS 0
32739+#define PREALLOCATED_PXDS 0
32740
32741 #endif /* CONFIG_X86_PAE */
32742
32743-static void free_pmds(pmd_t *pmds[])
32744+static void free_pxds(pxd_t *pxds[])
32745 {
32746 int i;
32747
32748- for(i = 0; i < PREALLOCATED_PMDS; i++)
32749- if (pmds[i]) {
32750- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
32751- free_page((unsigned long)pmds[i]);
32752+ for(i = 0; i < PREALLOCATED_PXDS; i++)
32753+ if (pxds[i]) {
32754+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
32755+ free_page((unsigned long)pxds[i]);
32756 }
32757 }
32758
32759-static int preallocate_pmds(pmd_t *pmds[])
32760+static int preallocate_pxds(pxd_t *pxds[])
32761 {
32762 int i;
32763 bool failed = false;
32764
32765- for(i = 0; i < PREALLOCATED_PMDS; i++) {
32766- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
32767- if (!pmd)
32768+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
32769+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
32770+ if (!pxd)
32771 failed = true;
32772- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
32773- free_page((unsigned long)pmd);
32774- pmd = NULL;
32775+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
32776+ free_page((unsigned long)pxd);
32777+ pxd = NULL;
32778 failed = true;
32779 }
32780- pmds[i] = pmd;
32781+ pxds[i] = pxd;
32782 }
32783
32784 if (failed) {
32785- free_pmds(pmds);
32786+ free_pxds(pxds);
32787 return -ENOMEM;
32788 }
32789
32790@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
32791 * preallocate which never got a corresponding vma will need to be
32792 * freed manually.
32793 */
32794-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
32795+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
32796 {
32797 int i;
32798
32799- for(i = 0; i < PREALLOCATED_PMDS; i++) {
32800+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
32801 pgd_t pgd = pgdp[i];
32802
32803 if (pgd_val(pgd) != 0) {
32804- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
32805+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
32806
32807- pgdp[i] = native_make_pgd(0);
32808+ set_pgd(pgdp + i, native_make_pgd(0));
32809
32810- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
32811- pmd_free(mm, pmd);
32812+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
32813+ pxd_free(mm, pxd);
32814 }
32815 }
32816 }
32817
32818-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
32819+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
32820 {
32821- pud_t *pud;
32822+ pyd_t *pyd;
32823 int i;
32824
32825- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
32826+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
32827 return;
32828
32829- pud = pud_offset(pgd, 0);
32830-
32831- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
32832- pmd_t *pmd = pmds[i];
32833+#ifdef CONFIG_X86_64
32834+ pyd = pyd_offset(mm, 0L);
32835+#else
32836+ pyd = pyd_offset(pgd, 0L);
32837+#endif
32838
32839+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
32840+ pxd_t *pxd = pxds[i];
32841 if (i >= KERNEL_PGD_BOUNDARY)
32842- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32843- sizeof(pmd_t) * PTRS_PER_PMD);
32844+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32845+ sizeof(pxd_t) * PTRS_PER_PMD);
32846
32847- pud_populate(mm, pud, pmd);
32848+ pyd_populate(mm, pyd, pxd);
32849 }
32850 }
32851
32852 pgd_t *pgd_alloc(struct mm_struct *mm)
32853 {
32854 pgd_t *pgd;
32855- pmd_t *pmds[PREALLOCATED_PMDS];
32856+ pxd_t *pxds[PREALLOCATED_PXDS];
32857
32858 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
32859
32860@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32861
32862 mm->pgd = pgd;
32863
32864- if (preallocate_pmds(pmds) != 0)
32865+ if (preallocate_pxds(pxds) != 0)
32866 goto out_free_pgd;
32867
32868 if (paravirt_pgd_alloc(mm) != 0)
32869- goto out_free_pmds;
32870+ goto out_free_pxds;
32871
32872 /*
32873 * Make sure that pre-populating the pmds is atomic with
32874@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32875 spin_lock(&pgd_lock);
32876
32877 pgd_ctor(mm, pgd);
32878- pgd_prepopulate_pmd(mm, pgd, pmds);
32879+ pgd_prepopulate_pxd(mm, pgd, pxds);
32880
32881 spin_unlock(&pgd_lock);
32882
32883 return pgd;
32884
32885-out_free_pmds:
32886- free_pmds(pmds);
32887+out_free_pxds:
32888+ free_pxds(pxds);
32889 out_free_pgd:
32890 free_page((unsigned long)pgd);
32891 out:
32892@@ -313,7 +380,7 @@ out:
32893
32894 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
32895 {
32896- pgd_mop_up_pmds(mm, pgd);
32897+ pgd_mop_up_pxds(mm, pgd);
32898 pgd_dtor(pgd);
32899 paravirt_pgd_free(mm, pgd);
32900 free_page((unsigned long)pgd);
32901diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
32902index a69bcb8..19068ab 100644
32903--- a/arch/x86/mm/pgtable_32.c
32904+++ b/arch/x86/mm/pgtable_32.c
32905@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
32906 return;
32907 }
32908 pte = pte_offset_kernel(pmd, vaddr);
32909+
32910+ pax_open_kernel();
32911 if (pte_val(pteval))
32912 set_pte_at(&init_mm, vaddr, pte, pteval);
32913 else
32914 pte_clear(&init_mm, vaddr, pte);
32915+ pax_close_kernel();
32916
32917 /*
32918 * It's enough to flush this one mapping.
32919diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
32920index e666cbb..61788c45 100644
32921--- a/arch/x86/mm/physaddr.c
32922+++ b/arch/x86/mm/physaddr.c
32923@@ -10,7 +10,7 @@
32924 #ifdef CONFIG_X86_64
32925
32926 #ifdef CONFIG_DEBUG_VIRTUAL
32927-unsigned long __phys_addr(unsigned long x)
32928+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32929 {
32930 unsigned long y = x - __START_KERNEL_map;
32931
32932@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
32933 #else
32934
32935 #ifdef CONFIG_DEBUG_VIRTUAL
32936-unsigned long __phys_addr(unsigned long x)
32937+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32938 {
32939 unsigned long phys_addr = x - PAGE_OFFSET;
32940 /* VMALLOC_* aren't constants */
32941diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
32942index 90555bf..f5f1828 100644
32943--- a/arch/x86/mm/setup_nx.c
32944+++ b/arch/x86/mm/setup_nx.c
32945@@ -5,8 +5,10 @@
32946 #include <asm/pgtable.h>
32947 #include <asm/proto.h>
32948
32949+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32950 static int disable_nx;
32951
32952+#ifndef CONFIG_PAX_PAGEEXEC
32953 /*
32954 * noexec = on|off
32955 *
32956@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
32957 return 0;
32958 }
32959 early_param("noexec", noexec_setup);
32960+#endif
32961+
32962+#endif
32963
32964 void x86_configure_nx(void)
32965 {
32966+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32967 if (cpu_has_nx && !disable_nx)
32968 __supported_pte_mask |= _PAGE_NX;
32969 else
32970+#endif
32971 __supported_pte_mask &= ~_PAGE_NX;
32972 }
32973
32974diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
32975index ae699b3..f1b2ad2 100644
32976--- a/arch/x86/mm/tlb.c
32977+++ b/arch/x86/mm/tlb.c
32978@@ -48,7 +48,11 @@ void leave_mm(int cpu)
32979 BUG();
32980 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
32981 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
32982+
32983+#ifndef CONFIG_PAX_PER_CPU_PGD
32984 load_cr3(swapper_pg_dir);
32985+#endif
32986+
32987 }
32988 }
32989 EXPORT_SYMBOL_GPL(leave_mm);
32990diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
32991new file mode 100644
32992index 0000000..dace51c
32993--- /dev/null
32994+++ b/arch/x86/mm/uderef_64.c
32995@@ -0,0 +1,37 @@
32996+#include <linux/mm.h>
32997+#include <asm/pgtable.h>
32998+#include <asm/uaccess.h>
32999+
33000+#ifdef CONFIG_PAX_MEMORY_UDEREF
33001+/* PaX: due to the special call convention these functions must
33002+ * - remain leaf functions under all configurations,
33003+ * - never be called directly, only dereferenced from the wrappers.
33004+ */
33005+void __pax_open_userland(void)
33006+{
33007+ unsigned int cpu;
33008+
33009+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
33010+ return;
33011+
33012+ cpu = raw_get_cpu();
33013+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
33014+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
33015+ raw_put_cpu_no_resched();
33016+}
33017+EXPORT_SYMBOL(__pax_open_userland);
33018+
33019+void __pax_close_userland(void)
33020+{
33021+ unsigned int cpu;
33022+
33023+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
33024+ return;
33025+
33026+ cpu = raw_get_cpu();
33027+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
33028+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
33029+ raw_put_cpu_no_resched();
33030+}
33031+EXPORT_SYMBOL(__pax_close_userland);
33032+#endif
33033diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
33034index 877b9a1..a8ecf42 100644
33035--- a/arch/x86/net/bpf_jit.S
33036+++ b/arch/x86/net/bpf_jit.S
33037@@ -9,6 +9,7 @@
33038 */
33039 #include <linux/linkage.h>
33040 #include <asm/dwarf2.h>
33041+#include <asm/alternative-asm.h>
33042
33043 /*
33044 * Calling convention :
33045@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
33046 jle bpf_slow_path_word
33047 mov (SKBDATA,%rsi),%eax
33048 bswap %eax /* ntohl() */
33049+ pax_force_retaddr
33050 ret
33051
33052 sk_load_half:
33053@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
33054 jle bpf_slow_path_half
33055 movzwl (SKBDATA,%rsi),%eax
33056 rol $8,%ax # ntohs()
33057+ pax_force_retaddr
33058 ret
33059
33060 sk_load_byte:
33061@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
33062 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
33063 jle bpf_slow_path_byte
33064 movzbl (SKBDATA,%rsi),%eax
33065+ pax_force_retaddr
33066 ret
33067
33068 /**
33069@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
33070 movzbl (SKBDATA,%rsi),%ebx
33071 and $15,%bl
33072 shl $2,%bl
33073+ pax_force_retaddr
33074 ret
33075
33076 /* rsi contains offset and can be scratched */
33077@@ -109,6 +114,7 @@ bpf_slow_path_word:
33078 js bpf_error
33079 mov -12(%rbp),%eax
33080 bswap %eax
33081+ pax_force_retaddr
33082 ret
33083
33084 bpf_slow_path_half:
33085@@ -117,12 +123,14 @@ bpf_slow_path_half:
33086 mov -12(%rbp),%ax
33087 rol $8,%ax
33088 movzwl %ax,%eax
33089+ pax_force_retaddr
33090 ret
33091
33092 bpf_slow_path_byte:
33093 bpf_slow_path_common(1)
33094 js bpf_error
33095 movzbl -12(%rbp),%eax
33096+ pax_force_retaddr
33097 ret
33098
33099 bpf_slow_path_byte_msh:
33100@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
33101 and $15,%al
33102 shl $2,%al
33103 xchg %eax,%ebx
33104+ pax_force_retaddr
33105 ret
33106
33107 #define sk_negative_common(SIZE) \
33108@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
33109 sk_negative_common(4)
33110 mov (%rax), %eax
33111 bswap %eax
33112+ pax_force_retaddr
33113 ret
33114
33115 bpf_slow_path_half_neg:
33116@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
33117 mov (%rax),%ax
33118 rol $8,%ax
33119 movzwl %ax,%eax
33120+ pax_force_retaddr
33121 ret
33122
33123 bpf_slow_path_byte_neg:
33124@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
33125 .globl sk_load_byte_negative_offset
33126 sk_negative_common(1)
33127 movzbl (%rax), %eax
33128+ pax_force_retaddr
33129 ret
33130
33131 bpf_slow_path_byte_msh_neg:
33132@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
33133 and $15,%al
33134 shl $2,%al
33135 xchg %eax,%ebx
33136+ pax_force_retaddr
33137 ret
33138
33139 bpf_error:
33140@@ -197,4 +210,5 @@ bpf_error:
33141 xor %eax,%eax
33142 mov -8(%rbp),%rbx
33143 leaveq
33144+ pax_force_retaddr
33145 ret
33146diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
33147index 4ed75dd..8dfe0d5 100644
33148--- a/arch/x86/net/bpf_jit_comp.c
33149+++ b/arch/x86/net/bpf_jit_comp.c
33150@@ -50,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
33151 return ptr + len;
33152 }
33153
33154+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33155+#define MAX_INSTR_CODE_SIZE 96
33156+#else
33157+#define MAX_INSTR_CODE_SIZE 64
33158+#endif
33159+
33160 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
33161
33162 #define EMIT1(b1) EMIT(b1, 1)
33163 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
33164 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
33165 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
33166+
33167+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33168+/* original constant will appear in ecx */
33169+#define DILUTE_CONST_SEQUENCE(_off, _key) \
33170+do { \
33171+ /* mov ecx, randkey */ \
33172+ EMIT1(0xb9); \
33173+ EMIT(_key, 4); \
33174+ /* xor ecx, randkey ^ off */ \
33175+ EMIT2(0x81, 0xf1); \
33176+ EMIT((_key) ^ (_off), 4); \
33177+} while (0)
33178+
33179+#define EMIT1_off32(b1, _off) \
33180+do { \
33181+ switch (b1) { \
33182+ case 0x05: /* add eax, imm32 */ \
33183+ case 0x2d: /* sub eax, imm32 */ \
33184+ case 0x25: /* and eax, imm32 */ \
33185+ case 0x0d: /* or eax, imm32 */ \
33186+ case 0xb8: /* mov eax, imm32 */ \
33187+ case 0x35: /* xor eax, imm32 */ \
33188+ case 0x3d: /* cmp eax, imm32 */ \
33189+ case 0xa9: /* test eax, imm32 */ \
33190+ DILUTE_CONST_SEQUENCE(_off, randkey); \
33191+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
33192+ break; \
33193+ case 0xbb: /* mov ebx, imm32 */ \
33194+ DILUTE_CONST_SEQUENCE(_off, randkey); \
33195+ /* mov ebx, ecx */ \
33196+ EMIT2(0x89, 0xcb); \
33197+ break; \
33198+ case 0xbe: /* mov esi, imm32 */ \
33199+ DILUTE_CONST_SEQUENCE(_off, randkey); \
33200+ /* mov esi, ecx */ \
33201+ EMIT2(0x89, 0xce); \
33202+ break; \
33203+ case 0xe8: /* call rel imm32, always to known funcs */ \
33204+ EMIT1(b1); \
33205+ EMIT(_off, 4); \
33206+ break; \
33207+ case 0xe9: /* jmp rel imm32 */ \
33208+ EMIT1(b1); \
33209+ EMIT(_off, 4); \
33210+ /* prevent fall-through, we're not called if off = 0 */ \
33211+ EMIT(0xcccccccc, 4); \
33212+ EMIT(0xcccccccc, 4); \
33213+ break; \
33214+ default: \
33215+ BUILD_BUG(); \
33216+ } \
33217+} while (0)
33218+
33219+#define EMIT2_off32(b1, b2, _off) \
33220+do { \
33221+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
33222+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
33223+ EMIT(randkey, 4); \
33224+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
33225+ EMIT((_off) - randkey, 4); \
33226+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
33227+ DILUTE_CONST_SEQUENCE(_off, randkey); \
33228+ /* imul eax, ecx */ \
33229+ EMIT3(0x0f, 0xaf, 0xc1); \
33230+ } else { \
33231+ BUILD_BUG(); \
33232+ } \
33233+} while (0)
33234+#else
33235 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
33236+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
33237+#endif
33238
33239 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
33240 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
33241@@ -91,6 +168,24 @@ do { \
33242 #define X86_JBE 0x76
33243 #define X86_JA 0x77
33244
33245+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33246+#define APPEND_FLOW_VERIFY() \
33247+do { \
33248+ /* mov ecx, randkey */ \
33249+ EMIT1(0xb9); \
33250+ EMIT(randkey, 4); \
33251+ /* cmp ecx, randkey */ \
33252+ EMIT2(0x81, 0xf9); \
33253+ EMIT(randkey, 4); \
33254+ /* jz after 8 int 3s */ \
33255+ EMIT2(0x74, 0x08); \
33256+ EMIT(0xcccccccc, 4); \
33257+ EMIT(0xcccccccc, 4); \
33258+} while (0)
33259+#else
33260+#define APPEND_FLOW_VERIFY() do { } while (0)
33261+#endif
33262+
33263 #define EMIT_COND_JMP(op, offset) \
33264 do { \
33265 if (is_near(offset)) \
33266@@ -98,6 +193,7 @@ do { \
33267 else { \
33268 EMIT2(0x0f, op + 0x10); \
33269 EMIT(offset, 4); /* jxx .+off32 */ \
33270+ APPEND_FLOW_VERIFY(); \
33271 } \
33272 } while (0)
33273
33274@@ -145,55 +241,54 @@ static int pkt_type_offset(void)
33275 return -1;
33276 }
33277
33278-struct bpf_binary_header {
33279- unsigned int pages;
33280- /* Note : for security reasons, bpf code will follow a randomly
33281- * sized amount of int3 instructions
33282- */
33283- u8 image[];
33284-};
33285-
33286-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
33287+/* Note : for security reasons, bpf code will follow a randomly
33288+ * sized amount of int3 instructions
33289+ */
33290+static u8 *bpf_alloc_binary(unsigned int proglen,
33291 u8 **image_ptr)
33292 {
33293 unsigned int sz, hole;
33294- struct bpf_binary_header *header;
33295+ u8 *header;
33296
33297 /* Most of BPF filters are really small,
33298 * but if some of them fill a page, allow at least
33299 * 128 extra bytes to insert a random section of int3
33300 */
33301- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
33302- header = module_alloc(sz);
33303+ sz = round_up(proglen + 128, PAGE_SIZE);
33304+ header = module_alloc_exec(sz);
33305 if (!header)
33306 return NULL;
33307
33308+ pax_open_kernel();
33309 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
33310+ pax_close_kernel();
33311
33312- header->pages = sz / PAGE_SIZE;
33313- hole = sz - (proglen + sizeof(*header));
33314+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
33315
33316 /* insert a random number of int3 instructions before BPF code */
33317- *image_ptr = &header->image[prandom_u32() % hole];
33318+ *image_ptr = &header[prandom_u32() % hole];
33319 return header;
33320 }
33321
33322 void bpf_jit_compile(struct sk_filter *fp)
33323 {
33324- u8 temp[64];
33325+ u8 temp[MAX_INSTR_CODE_SIZE];
33326 u8 *prog;
33327 unsigned int proglen, oldproglen = 0;
33328 int ilen, i;
33329 int t_offset, f_offset;
33330 u8 t_op, f_op, seen = 0, pass;
33331 u8 *image = NULL;
33332- struct bpf_binary_header *header = NULL;
33333+ u8 *header = NULL;
33334 u8 *func;
33335 int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
33336 unsigned int cleanup_addr; /* epilogue code offset */
33337 unsigned int *addrs;
33338 const struct sock_filter *filter = fp->insns;
33339 int flen = fp->len;
33340+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33341+ unsigned int randkey;
33342+#endif
33343
33344 if (!bpf_jit_enable)
33345 return;
33346@@ -202,11 +297,15 @@ void bpf_jit_compile(struct sk_filter *fp)
33347 if (addrs == NULL)
33348 return;
33349
33350+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33351+ randkey = get_random_int();
33352+#endif
33353+
33354 /* Before first pass, make a rough estimation of addrs[]
33355- * each bpf instruction is translated to less than 64 bytes
33356+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
33357 */
33358 for (proglen = 0, i = 0; i < flen; i++) {
33359- proglen += 64;
33360+ proglen += MAX_INSTR_CODE_SIZE;
33361 addrs[i] = proglen;
33362 }
33363 cleanup_addr = proglen; /* epilogue address */
33364@@ -317,10 +416,8 @@ void bpf_jit_compile(struct sk_filter *fp)
33365 case BPF_S_ALU_MUL_K: /* A *= K */
33366 if (is_imm8(K))
33367 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
33368- else {
33369- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
33370- EMIT(K, 4);
33371- }
33372+ else
33373+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
33374 break;
33375 case BPF_S_ALU_DIV_X: /* A /= X; */
33376 seen |= SEEN_XREG;
33377@@ -364,7 +461,11 @@ void bpf_jit_compile(struct sk_filter *fp)
33378 break;
33379 }
33380 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
33381+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33382+ DILUTE_CONST_SEQUENCE(K, randkey);
33383+#else
33384 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
33385+#endif
33386 EMIT2(0xf7, 0xf1); /* div %ecx */
33387 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
33388 break;
33389@@ -372,7 +473,11 @@ void bpf_jit_compile(struct sk_filter *fp)
33390 if (K == 1)
33391 break;
33392 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
33393+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33394+ DILUTE_CONST_SEQUENCE(K, randkey);
33395+#else
33396 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
33397+#endif
33398 EMIT2(0xf7, 0xf1); /* div %ecx */
33399 break;
33400 case BPF_S_ALU_AND_X:
33401@@ -643,8 +748,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
33402 if (is_imm8(K)) {
33403 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
33404 } else {
33405- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
33406- EMIT(K, 4);
33407+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
33408 }
33409 } else {
33410 EMIT2(0x89,0xde); /* mov %ebx,%esi */
33411@@ -734,10 +838,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
33412 if (unlikely(proglen + ilen > oldproglen)) {
33413 pr_err("bpb_jit_compile fatal error\n");
33414 kfree(addrs);
33415- module_free(NULL, header);
33416+ module_free_exec(NULL, image);
33417 return;
33418 }
33419+ pax_open_kernel();
33420 memcpy(image + proglen, temp, ilen);
33421+ pax_close_kernel();
33422 }
33423 proglen += ilen;
33424 addrs[i] = proglen;
33425@@ -770,7 +876,6 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
33426
33427 if (image) {
33428 bpf_flush_icache(header, image + proglen);
33429- set_memory_ro((unsigned long)header, header->pages);
33430 fp->bpf_func = (void *)image;
33431 }
33432 out:
33433@@ -782,10 +887,9 @@ static void bpf_jit_free_deferred(struct work_struct *work)
33434 {
33435 struct sk_filter *fp = container_of(work, struct sk_filter, work);
33436 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
33437- struct bpf_binary_header *header = (void *)addr;
33438
33439- set_memory_rw(addr, header->pages);
33440- module_free(NULL, header);
33441+ set_memory_rw(addr, 1);
33442+ module_free_exec(NULL, (void *)addr);
33443 kfree(fp);
33444 }
33445
33446diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
33447index 5d04be5..2beeaa2 100644
33448--- a/arch/x86/oprofile/backtrace.c
33449+++ b/arch/x86/oprofile/backtrace.c
33450@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
33451 struct stack_frame_ia32 *fp;
33452 unsigned long bytes;
33453
33454- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
33455+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
33456 if (bytes != 0)
33457 return NULL;
33458
33459- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
33460+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
33461
33462 oprofile_add_trace(bufhead[0].return_address);
33463
33464@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
33465 struct stack_frame bufhead[2];
33466 unsigned long bytes;
33467
33468- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
33469+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
33470 if (bytes != 0)
33471 return NULL;
33472
33473@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
33474 {
33475 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
33476
33477- if (!user_mode_vm(regs)) {
33478+ if (!user_mode(regs)) {
33479 unsigned long stack = kernel_stack_pointer(regs);
33480 if (depth)
33481 dump_trace(NULL, regs, (unsigned long *)stack, 0,
33482diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
33483index 6890d84..1dad1f1 100644
33484--- a/arch/x86/oprofile/nmi_int.c
33485+++ b/arch/x86/oprofile/nmi_int.c
33486@@ -23,6 +23,7 @@
33487 #include <asm/nmi.h>
33488 #include <asm/msr.h>
33489 #include <asm/apic.h>
33490+#include <asm/pgtable.h>
33491
33492 #include "op_counter.h"
33493 #include "op_x86_model.h"
33494@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
33495 if (ret)
33496 return ret;
33497
33498- if (!model->num_virt_counters)
33499- model->num_virt_counters = model->num_counters;
33500+ if (!model->num_virt_counters) {
33501+ pax_open_kernel();
33502+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
33503+ pax_close_kernel();
33504+ }
33505
33506 mux_init(ops);
33507
33508diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
33509index 50d86c0..7985318 100644
33510--- a/arch/x86/oprofile/op_model_amd.c
33511+++ b/arch/x86/oprofile/op_model_amd.c
33512@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
33513 num_counters = AMD64_NUM_COUNTERS;
33514 }
33515
33516- op_amd_spec.num_counters = num_counters;
33517- op_amd_spec.num_controls = num_counters;
33518- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
33519+ pax_open_kernel();
33520+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
33521+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
33522+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
33523+ pax_close_kernel();
33524
33525 return 0;
33526 }
33527diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
33528index d90528e..0127e2b 100644
33529--- a/arch/x86/oprofile/op_model_ppro.c
33530+++ b/arch/x86/oprofile/op_model_ppro.c
33531@@ -19,6 +19,7 @@
33532 #include <asm/msr.h>
33533 #include <asm/apic.h>
33534 #include <asm/nmi.h>
33535+#include <asm/pgtable.h>
33536
33537 #include "op_x86_model.h"
33538 #include "op_counter.h"
33539@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
33540
33541 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
33542
33543- op_arch_perfmon_spec.num_counters = num_counters;
33544- op_arch_perfmon_spec.num_controls = num_counters;
33545+ pax_open_kernel();
33546+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
33547+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
33548+ pax_close_kernel();
33549 }
33550
33551 static int arch_perfmon_init(struct oprofile_operations *ignore)
33552diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
33553index 71e8a67..6a313bb 100644
33554--- a/arch/x86/oprofile/op_x86_model.h
33555+++ b/arch/x86/oprofile/op_x86_model.h
33556@@ -52,7 +52,7 @@ struct op_x86_model_spec {
33557 void (*switch_ctrl)(struct op_x86_model_spec const *model,
33558 struct op_msrs const * const msrs);
33559 #endif
33560-};
33561+} __do_const;
33562
33563 struct op_counter_config;
33564
33565diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
33566index 51384ca..a25f51e 100644
33567--- a/arch/x86/pci/intel_mid_pci.c
33568+++ b/arch/x86/pci/intel_mid_pci.c
33569@@ -241,7 +241,7 @@ int __init intel_mid_pci_init(void)
33570 pr_info("Intel MID platform detected, using MID PCI ops\n");
33571 pci_mmcfg_late_init();
33572 pcibios_enable_irq = intel_mid_pci_irq_enable;
33573- pci_root_ops = intel_mid_pci_ops;
33574+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
33575 pci_soc_mode = 1;
33576 /* Continue with standard init */
33577 return 1;
33578diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
33579index 372e9b8..e775a6c 100644
33580--- a/arch/x86/pci/irq.c
33581+++ b/arch/x86/pci/irq.c
33582@@ -50,7 +50,7 @@ struct irq_router {
33583 struct irq_router_handler {
33584 u16 vendor;
33585 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
33586-};
33587+} __do_const;
33588
33589 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
33590 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
33591@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
33592 return 0;
33593 }
33594
33595-static __initdata struct irq_router_handler pirq_routers[] = {
33596+static __initconst const struct irq_router_handler pirq_routers[] = {
33597 { PCI_VENDOR_ID_INTEL, intel_router_probe },
33598 { PCI_VENDOR_ID_AL, ali_router_probe },
33599 { PCI_VENDOR_ID_ITE, ite_router_probe },
33600@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
33601 static void __init pirq_find_router(struct irq_router *r)
33602 {
33603 struct irq_routing_table *rt = pirq_table;
33604- struct irq_router_handler *h;
33605+ const struct irq_router_handler *h;
33606
33607 #ifdef CONFIG_PCI_BIOS
33608 if (!rt->signature) {
33609@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
33610 return 0;
33611 }
33612
33613-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
33614+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
33615 {
33616 .callback = fix_broken_hp_bios_irq9,
33617 .ident = "HP Pavilion N5400 Series Laptop",
33618diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
33619index c77b24a..c979855 100644
33620--- a/arch/x86/pci/pcbios.c
33621+++ b/arch/x86/pci/pcbios.c
33622@@ -79,7 +79,7 @@ union bios32 {
33623 static struct {
33624 unsigned long address;
33625 unsigned short segment;
33626-} bios32_indirect = { 0, __KERNEL_CS };
33627+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
33628
33629 /*
33630 * Returns the entry point for the given service, NULL on error
33631@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
33632 unsigned long length; /* %ecx */
33633 unsigned long entry; /* %edx */
33634 unsigned long flags;
33635+ struct desc_struct d, *gdt;
33636
33637 local_irq_save(flags);
33638- __asm__("lcall *(%%edi); cld"
33639+
33640+ gdt = get_cpu_gdt_table(smp_processor_id());
33641+
33642+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
33643+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
33644+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
33645+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
33646+
33647+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
33648 : "=a" (return_code),
33649 "=b" (address),
33650 "=c" (length),
33651 "=d" (entry)
33652 : "0" (service),
33653 "1" (0),
33654- "D" (&bios32_indirect));
33655+ "D" (&bios32_indirect),
33656+ "r"(__PCIBIOS_DS)
33657+ : "memory");
33658+
33659+ pax_open_kernel();
33660+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
33661+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
33662+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
33663+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
33664+ pax_close_kernel();
33665+
33666 local_irq_restore(flags);
33667
33668 switch (return_code) {
33669- case 0:
33670- return address + entry;
33671- case 0x80: /* Not present */
33672- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
33673- return 0;
33674- default: /* Shouldn't happen */
33675- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
33676- service, return_code);
33677+ case 0: {
33678+ int cpu;
33679+ unsigned char flags;
33680+
33681+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
33682+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
33683+ printk(KERN_WARNING "bios32_service: not valid\n");
33684 return 0;
33685+ }
33686+ address = address + PAGE_OFFSET;
33687+ length += 16UL; /* some BIOSs underreport this... */
33688+ flags = 4;
33689+ if (length >= 64*1024*1024) {
33690+ length >>= PAGE_SHIFT;
33691+ flags |= 8;
33692+ }
33693+
33694+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
33695+ gdt = get_cpu_gdt_table(cpu);
33696+ pack_descriptor(&d, address, length, 0x9b, flags);
33697+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
33698+ pack_descriptor(&d, address, length, 0x93, flags);
33699+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
33700+ }
33701+ return entry;
33702+ }
33703+ case 0x80: /* Not present */
33704+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
33705+ return 0;
33706+ default: /* Shouldn't happen */
33707+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
33708+ service, return_code);
33709+ return 0;
33710 }
33711 }
33712
33713 static struct {
33714 unsigned long address;
33715 unsigned short segment;
33716-} pci_indirect = { 0, __KERNEL_CS };
33717+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
33718
33719-static int pci_bios_present;
33720+static int pci_bios_present __read_only;
33721
33722 static int check_pcibios(void)
33723 {
33724@@ -131,11 +174,13 @@ static int check_pcibios(void)
33725 unsigned long flags, pcibios_entry;
33726
33727 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
33728- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
33729+ pci_indirect.address = pcibios_entry;
33730
33731 local_irq_save(flags);
33732- __asm__(
33733- "lcall *(%%edi); cld\n\t"
33734+ __asm__("movw %w6, %%ds\n\t"
33735+ "lcall *%%ss:(%%edi); cld\n\t"
33736+ "push %%ss\n\t"
33737+ "pop %%ds\n\t"
33738 "jc 1f\n\t"
33739 "xor %%ah, %%ah\n"
33740 "1:"
33741@@ -144,7 +189,8 @@ static int check_pcibios(void)
33742 "=b" (ebx),
33743 "=c" (ecx)
33744 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
33745- "D" (&pci_indirect)
33746+ "D" (&pci_indirect),
33747+ "r" (__PCIBIOS_DS)
33748 : "memory");
33749 local_irq_restore(flags);
33750
33751@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33752
33753 switch (len) {
33754 case 1:
33755- __asm__("lcall *(%%esi); cld\n\t"
33756+ __asm__("movw %w6, %%ds\n\t"
33757+ "lcall *%%ss:(%%esi); cld\n\t"
33758+ "push %%ss\n\t"
33759+ "pop %%ds\n\t"
33760 "jc 1f\n\t"
33761 "xor %%ah, %%ah\n"
33762 "1:"
33763@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33764 : "1" (PCIBIOS_READ_CONFIG_BYTE),
33765 "b" (bx),
33766 "D" ((long)reg),
33767- "S" (&pci_indirect));
33768+ "S" (&pci_indirect),
33769+ "r" (__PCIBIOS_DS));
33770 /*
33771 * Zero-extend the result beyond 8 bits, do not trust the
33772 * BIOS having done it:
33773@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33774 *value &= 0xff;
33775 break;
33776 case 2:
33777- __asm__("lcall *(%%esi); cld\n\t"
33778+ __asm__("movw %w6, %%ds\n\t"
33779+ "lcall *%%ss:(%%esi); cld\n\t"
33780+ "push %%ss\n\t"
33781+ "pop %%ds\n\t"
33782 "jc 1f\n\t"
33783 "xor %%ah, %%ah\n"
33784 "1:"
33785@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33786 : "1" (PCIBIOS_READ_CONFIG_WORD),
33787 "b" (bx),
33788 "D" ((long)reg),
33789- "S" (&pci_indirect));
33790+ "S" (&pci_indirect),
33791+ "r" (__PCIBIOS_DS));
33792 /*
33793 * Zero-extend the result beyond 16 bits, do not trust the
33794 * BIOS having done it:
33795@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33796 *value &= 0xffff;
33797 break;
33798 case 4:
33799- __asm__("lcall *(%%esi); cld\n\t"
33800+ __asm__("movw %w6, %%ds\n\t"
33801+ "lcall *%%ss:(%%esi); cld\n\t"
33802+ "push %%ss\n\t"
33803+ "pop %%ds\n\t"
33804 "jc 1f\n\t"
33805 "xor %%ah, %%ah\n"
33806 "1:"
33807@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33808 : "1" (PCIBIOS_READ_CONFIG_DWORD),
33809 "b" (bx),
33810 "D" ((long)reg),
33811- "S" (&pci_indirect));
33812+ "S" (&pci_indirect),
33813+ "r" (__PCIBIOS_DS));
33814 break;
33815 }
33816
33817@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33818
33819 switch (len) {
33820 case 1:
33821- __asm__("lcall *(%%esi); cld\n\t"
33822+ __asm__("movw %w6, %%ds\n\t"
33823+ "lcall *%%ss:(%%esi); cld\n\t"
33824+ "push %%ss\n\t"
33825+ "pop %%ds\n\t"
33826 "jc 1f\n\t"
33827 "xor %%ah, %%ah\n"
33828 "1:"
33829@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33830 "c" (value),
33831 "b" (bx),
33832 "D" ((long)reg),
33833- "S" (&pci_indirect));
33834+ "S" (&pci_indirect),
33835+ "r" (__PCIBIOS_DS));
33836 break;
33837 case 2:
33838- __asm__("lcall *(%%esi); cld\n\t"
33839+ __asm__("movw %w6, %%ds\n\t"
33840+ "lcall *%%ss:(%%esi); cld\n\t"
33841+ "push %%ss\n\t"
33842+ "pop %%ds\n\t"
33843 "jc 1f\n\t"
33844 "xor %%ah, %%ah\n"
33845 "1:"
33846@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33847 "c" (value),
33848 "b" (bx),
33849 "D" ((long)reg),
33850- "S" (&pci_indirect));
33851+ "S" (&pci_indirect),
33852+ "r" (__PCIBIOS_DS));
33853 break;
33854 case 4:
33855- __asm__("lcall *(%%esi); cld\n\t"
33856+ __asm__("movw %w6, %%ds\n\t"
33857+ "lcall *%%ss:(%%esi); cld\n\t"
33858+ "push %%ss\n\t"
33859+ "pop %%ds\n\t"
33860 "jc 1f\n\t"
33861 "xor %%ah, %%ah\n"
33862 "1:"
33863@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33864 "c" (value),
33865 "b" (bx),
33866 "D" ((long)reg),
33867- "S" (&pci_indirect));
33868+ "S" (&pci_indirect),
33869+ "r" (__PCIBIOS_DS));
33870 break;
33871 }
33872
33873@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33874
33875 DBG("PCI: Fetching IRQ routing table... ");
33876 __asm__("push %%es\n\t"
33877+ "movw %w8, %%ds\n\t"
33878 "push %%ds\n\t"
33879 "pop %%es\n\t"
33880- "lcall *(%%esi); cld\n\t"
33881+ "lcall *%%ss:(%%esi); cld\n\t"
33882 "pop %%es\n\t"
33883+ "push %%ss\n\t"
33884+ "pop %%ds\n"
33885 "jc 1f\n\t"
33886 "xor %%ah, %%ah\n"
33887 "1:"
33888@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33889 "1" (0),
33890 "D" ((long) &opt),
33891 "S" (&pci_indirect),
33892- "m" (opt)
33893+ "m" (opt),
33894+ "r" (__PCIBIOS_DS)
33895 : "memory");
33896 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
33897 if (ret & 0xff00)
33898@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33899 {
33900 int ret;
33901
33902- __asm__("lcall *(%%esi); cld\n\t"
33903+ __asm__("movw %w5, %%ds\n\t"
33904+ "lcall *%%ss:(%%esi); cld\n\t"
33905+ "push %%ss\n\t"
33906+ "pop %%ds\n"
33907 "jc 1f\n\t"
33908 "xor %%ah, %%ah\n"
33909 "1:"
33910@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33911 : "0" (PCIBIOS_SET_PCI_HW_INT),
33912 "b" ((dev->bus->number << 8) | dev->devfn),
33913 "c" ((irq << 8) | (pin + 10)),
33914- "S" (&pci_indirect));
33915+ "S" (&pci_indirect),
33916+ "r" (__PCIBIOS_DS));
33917 return !(ret & 0xff00);
33918 }
33919 EXPORT_SYMBOL(pcibios_set_irq_routing);
33920diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
33921index 40e4469..d915bf9 100644
33922--- a/arch/x86/platform/efi/efi_32.c
33923+++ b/arch/x86/platform/efi/efi_32.c
33924@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
33925 {
33926 struct desc_ptr gdt_descr;
33927
33928+#ifdef CONFIG_PAX_KERNEXEC
33929+ struct desc_struct d;
33930+#endif
33931+
33932 local_irq_save(efi_rt_eflags);
33933
33934 load_cr3(initial_page_table);
33935 __flush_tlb_all();
33936
33937+#ifdef CONFIG_PAX_KERNEXEC
33938+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
33939+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33940+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
33941+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33942+#endif
33943+
33944 gdt_descr.address = __pa(get_cpu_gdt_table(0));
33945 gdt_descr.size = GDT_SIZE - 1;
33946 load_gdt(&gdt_descr);
33947@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
33948 {
33949 struct desc_ptr gdt_descr;
33950
33951+#ifdef CONFIG_PAX_KERNEXEC
33952+ struct desc_struct d;
33953+
33954+ memset(&d, 0, sizeof d);
33955+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33956+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33957+#endif
33958+
33959 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
33960 gdt_descr.size = GDT_SIZE - 1;
33961 load_gdt(&gdt_descr);
33962
33963+#ifdef CONFIG_PAX_PER_CPU_PGD
33964+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33965+#else
33966 load_cr3(swapper_pg_dir);
33967+#endif
33968+
33969 __flush_tlb_all();
33970
33971 local_irq_restore(efi_rt_eflags);
33972diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
33973index 39a0e7f1..872396e 100644
33974--- a/arch/x86/platform/efi/efi_64.c
33975+++ b/arch/x86/platform/efi/efi_64.c
33976@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
33977 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
33978 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
33979 }
33980+
33981+#ifdef CONFIG_PAX_PER_CPU_PGD
33982+ load_cr3(swapper_pg_dir);
33983+#endif
33984+
33985 __flush_tlb_all();
33986 }
33987
33988@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
33989 for (pgd = 0; pgd < n_pgds; pgd++)
33990 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
33991 kfree(save_pgd);
33992+
33993+#ifdef CONFIG_PAX_PER_CPU_PGD
33994+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33995+#endif
33996+
33997 __flush_tlb_all();
33998 local_irq_restore(efi_flags);
33999 early_code_mapping_set_exec(0);
34000diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34001index fbe66e6..eae5e38 100644
34002--- a/arch/x86/platform/efi/efi_stub_32.S
34003+++ b/arch/x86/platform/efi/efi_stub_32.S
34004@@ -6,7 +6,9 @@
34005 */
34006
34007 #include <linux/linkage.h>
34008+#include <linux/init.h>
34009 #include <asm/page_types.h>
34010+#include <asm/segment.h>
34011
34012 /*
34013 * efi_call_phys(void *, ...) is a function with variable parameters.
34014@@ -20,7 +22,7 @@
34015 * service functions will comply with gcc calling convention, too.
34016 */
34017
34018-.text
34019+__INIT
34020 ENTRY(efi_call_phys)
34021 /*
34022 * 0. The function can only be called in Linux kernel. So CS has been
34023@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34024 * The mapping of lower virtual memory has been created in prelog and
34025 * epilog.
34026 */
34027- movl $1f, %edx
34028- subl $__PAGE_OFFSET, %edx
34029- jmp *%edx
34030+#ifdef CONFIG_PAX_KERNEXEC
34031+ movl $(__KERNEXEC_EFI_DS), %edx
34032+ mov %edx, %ds
34033+ mov %edx, %es
34034+ mov %edx, %ss
34035+ addl $2f,(1f)
34036+ ljmp *(1f)
34037+
34038+__INITDATA
34039+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34040+.previous
34041+
34042+2:
34043+ subl $2b,(1b)
34044+#else
34045+ jmp 1f-__PAGE_OFFSET
34046 1:
34047+#endif
34048
34049 /*
34050 * 2. Now on the top of stack is the return
34051@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34052 * parameter 2, ..., param n. To make things easy, we save the return
34053 * address of efi_call_phys in a global variable.
34054 */
34055- popl %edx
34056- movl %edx, saved_return_addr
34057- /* get the function pointer into ECX*/
34058- popl %ecx
34059- movl %ecx, efi_rt_function_ptr
34060- movl $2f, %edx
34061- subl $__PAGE_OFFSET, %edx
34062- pushl %edx
34063+ popl (saved_return_addr)
34064+ popl (efi_rt_function_ptr)
34065
34066 /*
34067 * 3. Clear PG bit in %CR0.
34068@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34069 /*
34070 * 5. Call the physical function.
34071 */
34072- jmp *%ecx
34073+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
34074
34075-2:
34076 /*
34077 * 6. After EFI runtime service returns, control will return to
34078 * following instruction. We'd better readjust stack pointer first.
34079@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
34080 movl %cr0, %edx
34081 orl $0x80000000, %edx
34082 movl %edx, %cr0
34083- jmp 1f
34084-1:
34085+
34086 /*
34087 * 8. Now restore the virtual mode from flat mode by
34088 * adding EIP with PAGE_OFFSET.
34089 */
34090- movl $1f, %edx
34091- jmp *%edx
34092+#ifdef CONFIG_PAX_KERNEXEC
34093+ movl $(__KERNEL_DS), %edx
34094+ mov %edx, %ds
34095+ mov %edx, %es
34096+ mov %edx, %ss
34097+ ljmp $(__KERNEL_CS),$1f
34098+#else
34099+ jmp 1f+__PAGE_OFFSET
34100+#endif
34101 1:
34102
34103 /*
34104 * 9. Balance the stack. And because EAX contain the return value,
34105 * we'd better not clobber it.
34106 */
34107- leal efi_rt_function_ptr, %edx
34108- movl (%edx), %ecx
34109- pushl %ecx
34110+ pushl (efi_rt_function_ptr)
34111
34112 /*
34113- * 10. Push the saved return address onto the stack and return.
34114+ * 10. Return to the saved return address.
34115 */
34116- leal saved_return_addr, %edx
34117- movl (%edx), %ecx
34118- pushl %ecx
34119- ret
34120+ jmpl *(saved_return_addr)
34121 ENDPROC(efi_call_phys)
34122 .previous
34123
34124-.data
34125+__INITDATA
34126 saved_return_addr:
34127 .long 0
34128 efi_rt_function_ptr:
34129diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
34130index 4c07cca..2c8427d 100644
34131--- a/arch/x86/platform/efi/efi_stub_64.S
34132+++ b/arch/x86/platform/efi/efi_stub_64.S
34133@@ -7,6 +7,7 @@
34134 */
34135
34136 #include <linux/linkage.h>
34137+#include <asm/alternative-asm.h>
34138
34139 #define SAVE_XMM \
34140 mov %rsp, %rax; \
34141@@ -40,6 +41,7 @@ ENTRY(efi_call0)
34142 call *%rdi
34143 addq $32, %rsp
34144 RESTORE_XMM
34145+ pax_force_retaddr 0, 1
34146 ret
34147 ENDPROC(efi_call0)
34148
34149@@ -50,6 +52,7 @@ ENTRY(efi_call1)
34150 call *%rdi
34151 addq $32, %rsp
34152 RESTORE_XMM
34153+ pax_force_retaddr 0, 1
34154 ret
34155 ENDPROC(efi_call1)
34156
34157@@ -60,6 +63,7 @@ ENTRY(efi_call2)
34158 call *%rdi
34159 addq $32, %rsp
34160 RESTORE_XMM
34161+ pax_force_retaddr 0, 1
34162 ret
34163 ENDPROC(efi_call2)
34164
34165@@ -71,6 +75,7 @@ ENTRY(efi_call3)
34166 call *%rdi
34167 addq $32, %rsp
34168 RESTORE_XMM
34169+ pax_force_retaddr 0, 1
34170 ret
34171 ENDPROC(efi_call3)
34172
34173@@ -83,6 +88,7 @@ ENTRY(efi_call4)
34174 call *%rdi
34175 addq $32, %rsp
34176 RESTORE_XMM
34177+ pax_force_retaddr 0, 1
34178 ret
34179 ENDPROC(efi_call4)
34180
34181@@ -96,6 +102,7 @@ ENTRY(efi_call5)
34182 call *%rdi
34183 addq $48, %rsp
34184 RESTORE_XMM
34185+ pax_force_retaddr 0, 1
34186 ret
34187 ENDPROC(efi_call5)
34188
34189@@ -112,5 +119,6 @@ ENTRY(efi_call6)
34190 call *%rdi
34191 addq $48, %rsp
34192 RESTORE_XMM
34193+ pax_force_retaddr 0, 1
34194 ret
34195 ENDPROC(efi_call6)
34196diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
34197index f90e290..435f0dd 100644
34198--- a/arch/x86/platform/intel-mid/intel-mid.c
34199+++ b/arch/x86/platform/intel-mid/intel-mid.c
34200@@ -65,9 +65,10 @@ static void intel_mid_power_off(void)
34201 {
34202 }
34203
34204-static void intel_mid_reboot(void)
34205+static void __noreturn intel_mid_reboot(void)
34206 {
34207 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
34208+ BUG();
34209 }
34210
34211 static unsigned long __init intel_mid_calibrate_tsc(void)
34212diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
34213index d6ee929..3637cb5 100644
34214--- a/arch/x86/platform/olpc/olpc_dt.c
34215+++ b/arch/x86/platform/olpc/olpc_dt.c
34216@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
34217 return res;
34218 }
34219
34220-static struct of_pdt_ops prom_olpc_ops __initdata = {
34221+static struct of_pdt_ops prom_olpc_ops __initconst = {
34222 .nextprop = olpc_dt_nextprop,
34223 .getproplen = olpc_dt_getproplen,
34224 .getproperty = olpc_dt_getproperty,
34225diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
34226index 424f4c9..f2a2988 100644
34227--- a/arch/x86/power/cpu.c
34228+++ b/arch/x86/power/cpu.c
34229@@ -137,11 +137,8 @@ static void do_fpu_end(void)
34230 static void fix_processor_context(void)
34231 {
34232 int cpu = smp_processor_id();
34233- struct tss_struct *t = &per_cpu(init_tss, cpu);
34234-#ifdef CONFIG_X86_64
34235- struct desc_struct *desc = get_cpu_gdt_table(cpu);
34236- tss_desc tss;
34237-#endif
34238+ struct tss_struct *t = init_tss + cpu;
34239+
34240 set_tss_desc(cpu, t); /*
34241 * This just modifies memory; should not be
34242 * necessary. But... This is necessary, because
34243@@ -150,10 +147,6 @@ static void fix_processor_context(void)
34244 */
34245
34246 #ifdef CONFIG_X86_64
34247- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
34248- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
34249- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
34250-
34251 syscall_init(); /* This sets MSR_*STAR and related */
34252 #endif
34253 load_TR_desc(); /* This does ltr */
34254diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
34255index a44f457..9140171 100644
34256--- a/arch/x86/realmode/init.c
34257+++ b/arch/x86/realmode/init.c
34258@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
34259 __va(real_mode_header->trampoline_header);
34260
34261 #ifdef CONFIG_X86_32
34262- trampoline_header->start = __pa_symbol(startup_32_smp);
34263+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
34264+
34265+#ifdef CONFIG_PAX_KERNEXEC
34266+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
34267+#endif
34268+
34269+ trampoline_header->boot_cs = __BOOT_CS;
34270 trampoline_header->gdt_limit = __BOOT_DS + 7;
34271 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
34272 #else
34273@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
34274 *trampoline_cr4_features = read_cr4();
34275
34276 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
34277- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
34278+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
34279 trampoline_pgd[511] = init_level4_pgt[511].pgd;
34280 #endif
34281 }
34282diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
34283index 9cac825..4890b25 100644
34284--- a/arch/x86/realmode/rm/Makefile
34285+++ b/arch/x86/realmode/rm/Makefile
34286@@ -79,5 +79,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
34287 $(call cc-option, -fno-unit-at-a-time)) \
34288 $(call cc-option, -fno-stack-protector) \
34289 $(call cc-option, -mpreferred-stack-boundary=2)
34290+ifdef CONSTIFY_PLUGIN
34291+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
34292+endif
34293 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
34294 GCOV_PROFILE := n
34295diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
34296index a28221d..93c40f1 100644
34297--- a/arch/x86/realmode/rm/header.S
34298+++ b/arch/x86/realmode/rm/header.S
34299@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
34300 #endif
34301 /* APM/BIOS reboot */
34302 .long pa_machine_real_restart_asm
34303-#ifdef CONFIG_X86_64
34304+#ifdef CONFIG_X86_32
34305+ .long __KERNEL_CS
34306+#else
34307 .long __KERNEL32_CS
34308 #endif
34309 END(real_mode_header)
34310diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
34311index c1b2791..f9e31c7 100644
34312--- a/arch/x86/realmode/rm/trampoline_32.S
34313+++ b/arch/x86/realmode/rm/trampoline_32.S
34314@@ -25,6 +25,12 @@
34315 #include <asm/page_types.h>
34316 #include "realmode.h"
34317
34318+#ifdef CONFIG_PAX_KERNEXEC
34319+#define ta(X) (X)
34320+#else
34321+#define ta(X) (pa_ ## X)
34322+#endif
34323+
34324 .text
34325 .code16
34326
34327@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
34328
34329 cli # We should be safe anyway
34330
34331- movl tr_start, %eax # where we need to go
34332-
34333 movl $0xA5A5A5A5, trampoline_status
34334 # write marker for master knows we're running
34335
34336@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
34337 movw $1, %dx # protected mode (PE) bit
34338 lmsw %dx # into protected mode
34339
34340- ljmpl $__BOOT_CS, $pa_startup_32
34341+ ljmpl *(trampoline_header)
34342
34343 .section ".text32","ax"
34344 .code32
34345@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
34346 .balign 8
34347 GLOBAL(trampoline_header)
34348 tr_start: .space 4
34349- tr_gdt_pad: .space 2
34350+ tr_boot_cs: .space 2
34351 tr_gdt: .space 6
34352 END(trampoline_header)
34353
34354diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
34355index bb360dc..d0fd8f8 100644
34356--- a/arch/x86/realmode/rm/trampoline_64.S
34357+++ b/arch/x86/realmode/rm/trampoline_64.S
34358@@ -94,6 +94,7 @@ ENTRY(startup_32)
34359 movl %edx, %gs
34360
34361 movl pa_tr_cr4, %eax
34362+ andl $~X86_CR4_PCIDE, %eax
34363 movl %eax, %cr4 # Enable PAE mode
34364
34365 # Setup trampoline 4 level pagetables
34366@@ -107,7 +108,7 @@ ENTRY(startup_32)
34367 wrmsr
34368
34369 # Enable paging and in turn activate Long Mode
34370- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
34371+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
34372 movl %eax, %cr0
34373
34374 /*
34375diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
34376index e812034..c747134 100644
34377--- a/arch/x86/tools/Makefile
34378+++ b/arch/x86/tools/Makefile
34379@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
34380
34381 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
34382
34383-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
34384+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
34385 hostprogs-y += relocs
34386 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
34387 relocs: $(obj)/relocs
34388diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
34389index f7bab68..b6d9886 100644
34390--- a/arch/x86/tools/relocs.c
34391+++ b/arch/x86/tools/relocs.c
34392@@ -1,5 +1,7 @@
34393 /* This is included from relocs_32/64.c */
34394
34395+#include "../../../include/generated/autoconf.h"
34396+
34397 #define ElfW(type) _ElfW(ELF_BITS, type)
34398 #define _ElfW(bits, type) __ElfW(bits, type)
34399 #define __ElfW(bits, type) Elf##bits##_##type
34400@@ -11,6 +13,7 @@
34401 #define Elf_Sym ElfW(Sym)
34402
34403 static Elf_Ehdr ehdr;
34404+static Elf_Phdr *phdr;
34405
34406 struct relocs {
34407 uint32_t *offset;
34408@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
34409 }
34410 }
34411
34412+static void read_phdrs(FILE *fp)
34413+{
34414+ unsigned int i;
34415+
34416+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
34417+ if (!phdr) {
34418+ die("Unable to allocate %d program headers\n",
34419+ ehdr.e_phnum);
34420+ }
34421+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
34422+ die("Seek to %d failed: %s\n",
34423+ ehdr.e_phoff, strerror(errno));
34424+ }
34425+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
34426+ die("Cannot read ELF program headers: %s\n",
34427+ strerror(errno));
34428+ }
34429+ for(i = 0; i < ehdr.e_phnum; i++) {
34430+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
34431+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
34432+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
34433+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
34434+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
34435+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
34436+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
34437+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
34438+ }
34439+
34440+}
34441+
34442 static void read_shdrs(FILE *fp)
34443 {
34444- int i;
34445+ unsigned int i;
34446 Elf_Shdr shdr;
34447
34448 secs = calloc(ehdr.e_shnum, sizeof(struct section));
34449@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
34450
34451 static void read_strtabs(FILE *fp)
34452 {
34453- int i;
34454+ unsigned int i;
34455 for (i = 0; i < ehdr.e_shnum; i++) {
34456 struct section *sec = &secs[i];
34457 if (sec->shdr.sh_type != SHT_STRTAB) {
34458@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
34459
34460 static void read_symtabs(FILE *fp)
34461 {
34462- int i,j;
34463+ unsigned int i,j;
34464 for (i = 0; i < ehdr.e_shnum; i++) {
34465 struct section *sec = &secs[i];
34466 if (sec->shdr.sh_type != SHT_SYMTAB) {
34467@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
34468 }
34469
34470
34471-static void read_relocs(FILE *fp)
34472+static void read_relocs(FILE *fp, int use_real_mode)
34473 {
34474- int i,j;
34475+ unsigned int i,j;
34476+ uint32_t base;
34477+
34478 for (i = 0; i < ehdr.e_shnum; i++) {
34479 struct section *sec = &secs[i];
34480 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34481@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
34482 die("Cannot read symbol table: %s\n",
34483 strerror(errno));
34484 }
34485+ base = 0;
34486+
34487+#ifdef CONFIG_X86_32
34488+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
34489+ if (phdr[j].p_type != PT_LOAD )
34490+ continue;
34491+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
34492+ continue;
34493+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
34494+ break;
34495+ }
34496+#endif
34497+
34498 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
34499 Elf_Rel *rel = &sec->reltab[j];
34500- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
34501+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
34502 rel->r_info = elf_xword_to_cpu(rel->r_info);
34503 #if (SHT_REL_TYPE == SHT_RELA)
34504 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
34505@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
34506
34507 static void print_absolute_symbols(void)
34508 {
34509- int i;
34510+ unsigned int i;
34511 const char *format;
34512
34513 if (ELF_BITS == 64)
34514@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
34515 for (i = 0; i < ehdr.e_shnum; i++) {
34516 struct section *sec = &secs[i];
34517 char *sym_strtab;
34518- int j;
34519+ unsigned int j;
34520
34521 if (sec->shdr.sh_type != SHT_SYMTAB) {
34522 continue;
34523@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
34524
34525 static void print_absolute_relocs(void)
34526 {
34527- int i, printed = 0;
34528+ unsigned int i, printed = 0;
34529 const char *format;
34530
34531 if (ELF_BITS == 64)
34532@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
34533 struct section *sec_applies, *sec_symtab;
34534 char *sym_strtab;
34535 Elf_Sym *sh_symtab;
34536- int j;
34537+ unsigned int j;
34538 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34539 continue;
34540 }
34541@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
34542 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
34543 Elf_Sym *sym, const char *symname))
34544 {
34545- int i;
34546+ unsigned int i;
34547 /* Walk through the relocations */
34548 for (i = 0; i < ehdr.e_shnum; i++) {
34549 char *sym_strtab;
34550 Elf_Sym *sh_symtab;
34551 struct section *sec_applies, *sec_symtab;
34552- int j;
34553+ unsigned int j;
34554 struct section *sec = &secs[i];
34555
34556 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34557@@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
34558 {
34559 unsigned r_type = ELF32_R_TYPE(rel->r_info);
34560 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
34561+ char *sym_strtab = sec->link->link->strtab;
34562+
34563+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
34564+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
34565+ return 0;
34566+
34567+#ifdef CONFIG_PAX_KERNEXEC
34568+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
34569+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
34570+ return 0;
34571+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
34572+ return 0;
34573+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
34574+ return 0;
34575+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
34576+ return 0;
34577+#endif
34578
34579 switch (r_type) {
34580 case R_386_NONE:
34581@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
34582
34583 static void emit_relocs(int as_text, int use_real_mode)
34584 {
34585- int i;
34586+ unsigned int i;
34587 int (*write_reloc)(uint32_t, FILE *) = write32;
34588 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
34589 const char *symname);
34590@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
34591 {
34592 regex_init(use_real_mode);
34593 read_ehdr(fp);
34594+ read_phdrs(fp);
34595 read_shdrs(fp);
34596 read_strtabs(fp);
34597 read_symtabs(fp);
34598- read_relocs(fp);
34599+ read_relocs(fp, use_real_mode);
34600 if (ELF_BITS == 64)
34601 percpu_init();
34602 if (show_absolute_syms) {
34603diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
34604index 80ffa5b..a33bd15 100644
34605--- a/arch/x86/um/tls_32.c
34606+++ b/arch/x86/um/tls_32.c
34607@@ -260,7 +260,7 @@ out:
34608 if (unlikely(task == current &&
34609 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
34610 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
34611- "without flushed TLS.", current->pid);
34612+ "without flushed TLS.", task_pid_nr(current));
34613 }
34614
34615 return 0;
34616diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
34617index fd14be1..e3c79c0 100644
34618--- a/arch/x86/vdso/Makefile
34619+++ b/arch/x86/vdso/Makefile
34620@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
34621 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
34622 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
34623
34624-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
34625+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
34626 GCOV_PROFILE := n
34627
34628 #
34629diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
34630index d6bfb87..876ee18 100644
34631--- a/arch/x86/vdso/vdso32-setup.c
34632+++ b/arch/x86/vdso/vdso32-setup.c
34633@@ -25,6 +25,7 @@
34634 #include <asm/tlbflush.h>
34635 #include <asm/vdso.h>
34636 #include <asm/proto.h>
34637+#include <asm/mman.h>
34638
34639 enum {
34640 VDSO_DISABLED = 0,
34641@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
34642 void enable_sep_cpu(void)
34643 {
34644 int cpu = get_cpu();
34645- struct tss_struct *tss = &per_cpu(init_tss, cpu);
34646+ struct tss_struct *tss = init_tss + cpu;
34647
34648 if (!boot_cpu_has(X86_FEATURE_SEP)) {
34649 put_cpu();
34650@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
34651 gate_vma.vm_start = FIXADDR_USER_START;
34652 gate_vma.vm_end = FIXADDR_USER_END;
34653 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
34654- gate_vma.vm_page_prot = __P101;
34655+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
34656
34657 return 0;
34658 }
34659@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
34660 if (compat)
34661 addr = VDSO_HIGH_BASE;
34662 else {
34663- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
34664+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
34665 if (IS_ERR_VALUE(addr)) {
34666 ret = addr;
34667 goto up_fail;
34668 }
34669 }
34670
34671- current->mm->context.vdso = (void *)addr;
34672+ current->mm->context.vdso = addr;
34673
34674 if (compat_uses_vma || !compat) {
34675 /*
34676@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
34677 }
34678
34679 current_thread_info()->sysenter_return =
34680- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
34681+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
34682
34683 up_fail:
34684 if (ret)
34685- current->mm->context.vdso = NULL;
34686+ current->mm->context.vdso = 0;
34687
34688 up_write(&mm->mmap_sem);
34689
34690@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
34691
34692 const char *arch_vma_name(struct vm_area_struct *vma)
34693 {
34694- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
34695+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
34696 return "[vdso]";
34697+
34698+#ifdef CONFIG_PAX_SEGMEXEC
34699+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
34700+ return "[vdso]";
34701+#endif
34702+
34703 return NULL;
34704 }
34705
34706@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
34707 * Check to see if the corresponding task was created in compat vdso
34708 * mode.
34709 */
34710- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
34711+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
34712 return &gate_vma;
34713 return NULL;
34714 }
34715diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
34716index 431e875..cbb23f3 100644
34717--- a/arch/x86/vdso/vma.c
34718+++ b/arch/x86/vdso/vma.c
34719@@ -16,8 +16,6 @@
34720 #include <asm/vdso.h>
34721 #include <asm/page.h>
34722
34723-unsigned int __read_mostly vdso_enabled = 1;
34724-
34725 extern char vdso_start[], vdso_end[];
34726 extern unsigned short vdso_sync_cpuid;
34727
34728@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
34729 * unaligned here as a result of stack start randomization.
34730 */
34731 addr = PAGE_ALIGN(addr);
34732- addr = align_vdso_addr(addr);
34733
34734 return addr;
34735 }
34736@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
34737 unsigned size)
34738 {
34739 struct mm_struct *mm = current->mm;
34740- unsigned long addr;
34741+ unsigned long addr = 0;
34742 int ret;
34743
34744- if (!vdso_enabled)
34745- return 0;
34746-
34747 down_write(&mm->mmap_sem);
34748+
34749+#ifdef CONFIG_PAX_RANDMMAP
34750+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
34751+#endif
34752+
34753 addr = vdso_addr(mm->start_stack, size);
34754+ addr = align_vdso_addr(addr);
34755 addr = get_unmapped_area(NULL, addr, size, 0, 0);
34756 if (IS_ERR_VALUE(addr)) {
34757 ret = addr;
34758 goto up_fail;
34759 }
34760
34761- current->mm->context.vdso = (void *)addr;
34762+ mm->context.vdso = addr;
34763
34764 ret = install_special_mapping(mm, addr, size,
34765 VM_READ|VM_EXEC|
34766 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
34767 pages);
34768- if (ret) {
34769- current->mm->context.vdso = NULL;
34770- goto up_fail;
34771- }
34772+ if (ret)
34773+ mm->context.vdso = 0;
34774
34775 up_fail:
34776 up_write(&mm->mmap_sem);
34777@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
34778 vdsox32_size);
34779 }
34780 #endif
34781-
34782-static __init int vdso_setup(char *s)
34783-{
34784- vdso_enabled = simple_strtoul(s, NULL, 0);
34785- return 0;
34786-}
34787-__setup("vdso=", vdso_setup);
34788diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
34789index fa6ade7..73da73a5 100644
34790--- a/arch/x86/xen/enlighten.c
34791+++ b/arch/x86/xen/enlighten.c
34792@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
34793
34794 struct shared_info xen_dummy_shared_info;
34795
34796-void *xen_initial_gdt;
34797-
34798 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
34799 __read_mostly int xen_have_vector_callback;
34800 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
34801@@ -541,8 +539,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
34802 {
34803 unsigned long va = dtr->address;
34804 unsigned int size = dtr->size + 1;
34805- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34806- unsigned long frames[pages];
34807+ unsigned long frames[65536 / PAGE_SIZE];
34808 int f;
34809
34810 /*
34811@@ -590,8 +587,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34812 {
34813 unsigned long va = dtr->address;
34814 unsigned int size = dtr->size + 1;
34815- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34816- unsigned long frames[pages];
34817+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
34818 int f;
34819
34820 /*
34821@@ -599,7 +595,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34822 * 8-byte entries, or 16 4k pages..
34823 */
34824
34825- BUG_ON(size > 65536);
34826+ BUG_ON(size > GDT_SIZE);
34827 BUG_ON(va & ~PAGE_MASK);
34828
34829 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
34830@@ -988,7 +984,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
34831 return 0;
34832 }
34833
34834-static void set_xen_basic_apic_ops(void)
34835+static void __init set_xen_basic_apic_ops(void)
34836 {
34837 apic->read = xen_apic_read;
34838 apic->write = xen_apic_write;
34839@@ -1293,30 +1289,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
34840 #endif
34841 };
34842
34843-static void xen_reboot(int reason)
34844+static __noreturn void xen_reboot(int reason)
34845 {
34846 struct sched_shutdown r = { .reason = reason };
34847
34848- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
34849- BUG();
34850+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
34851+ BUG();
34852 }
34853
34854-static void xen_restart(char *msg)
34855+static __noreturn void xen_restart(char *msg)
34856 {
34857 xen_reboot(SHUTDOWN_reboot);
34858 }
34859
34860-static void xen_emergency_restart(void)
34861+static __noreturn void xen_emergency_restart(void)
34862 {
34863 xen_reboot(SHUTDOWN_reboot);
34864 }
34865
34866-static void xen_machine_halt(void)
34867+static __noreturn void xen_machine_halt(void)
34868 {
34869 xen_reboot(SHUTDOWN_poweroff);
34870 }
34871
34872-static void xen_machine_power_off(void)
34873+static __noreturn void xen_machine_power_off(void)
34874 {
34875 if (pm_power_off)
34876 pm_power_off();
34877@@ -1467,7 +1463,17 @@ asmlinkage void __init xen_start_kernel(void)
34878 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
34879
34880 /* Work out if we support NX */
34881- x86_configure_nx();
34882+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34883+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
34884+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
34885+ unsigned l, h;
34886+
34887+ __supported_pte_mask |= _PAGE_NX;
34888+ rdmsr(MSR_EFER, l, h);
34889+ l |= EFER_NX;
34890+ wrmsr(MSR_EFER, l, h);
34891+ }
34892+#endif
34893
34894 xen_setup_features();
34895
34896@@ -1498,13 +1504,6 @@ asmlinkage void __init xen_start_kernel(void)
34897
34898 machine_ops = xen_machine_ops;
34899
34900- /*
34901- * The only reliable way to retain the initial address of the
34902- * percpu gdt_page is to remember it here, so we can go and
34903- * mark it RW later, when the initial percpu area is freed.
34904- */
34905- xen_initial_gdt = &per_cpu(gdt_page, 0);
34906-
34907 xen_smp_init();
34908
34909 #ifdef CONFIG_ACPI_NUMA
34910diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
34911index ce563be..7327d91 100644
34912--- a/arch/x86/xen/mmu.c
34913+++ b/arch/x86/xen/mmu.c
34914@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
34915 return val;
34916 }
34917
34918-static pteval_t pte_pfn_to_mfn(pteval_t val)
34919+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
34920 {
34921 if (val & _PAGE_PRESENT) {
34922 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
34923@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34924 /* L3_k[510] -> level2_kernel_pgt
34925 * L3_i[511] -> level2_fixmap_pgt */
34926 convert_pfn_mfn(level3_kernel_pgt);
34927+ convert_pfn_mfn(level3_vmalloc_start_pgt);
34928+ convert_pfn_mfn(level3_vmalloc_end_pgt);
34929+ convert_pfn_mfn(level3_vmemmap_pgt);
34930
34931 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
34932 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
34933@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34934 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
34935 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
34936 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
34937+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
34938+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
34939+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
34940 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
34941 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
34942+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
34943 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
34944 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
34945
34946@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
34947 pv_mmu_ops.set_pud = xen_set_pud;
34948 #if PAGETABLE_LEVELS == 4
34949 pv_mmu_ops.set_pgd = xen_set_pgd;
34950+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
34951 #endif
34952
34953 /* This will work as long as patching hasn't happened yet
34954@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
34955 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
34956 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
34957 .set_pgd = xen_set_pgd_hyper,
34958+ .set_pgd_batched = xen_set_pgd_hyper,
34959
34960 .alloc_pud = xen_alloc_pmd_init,
34961 .release_pud = xen_release_pmd_init,
34962diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
34963index c36b325..b0f1518 100644
34964--- a/arch/x86/xen/smp.c
34965+++ b/arch/x86/xen/smp.c
34966@@ -274,17 +274,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
34967 native_smp_prepare_boot_cpu();
34968
34969 if (xen_pv_domain()) {
34970- /* We've switched to the "real" per-cpu gdt, so make sure the
34971- old memory can be recycled */
34972- make_lowmem_page_readwrite(xen_initial_gdt);
34973-
34974 #ifdef CONFIG_X86_32
34975 /*
34976 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
34977 * expects __USER_DS
34978 */
34979- loadsegment(ds, __USER_DS);
34980- loadsegment(es, __USER_DS);
34981+ loadsegment(ds, __KERNEL_DS);
34982+ loadsegment(es, __KERNEL_DS);
34983 #endif
34984
34985 xen_filter_cpu_maps();
34986@@ -364,7 +360,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34987 ctxt->user_regs.ss = __KERNEL_DS;
34988 #ifdef CONFIG_X86_32
34989 ctxt->user_regs.fs = __KERNEL_PERCPU;
34990- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
34991+ savesegment(gs, ctxt->user_regs.gs);
34992 #else
34993 ctxt->gs_base_kernel = per_cpu_offset(cpu);
34994 #endif
34995@@ -374,8 +370,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34996
34997 {
34998 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
34999- ctxt->user_regs.ds = __USER_DS;
35000- ctxt->user_regs.es = __USER_DS;
35001+ ctxt->user_regs.ds = __KERNEL_DS;
35002+ ctxt->user_regs.es = __KERNEL_DS;
35003
35004 xen_copy_trap_info(ctxt->trap_ctxt);
35005
35006@@ -420,13 +416,12 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35007 int rc;
35008
35009 per_cpu(current_task, cpu) = idle;
35010+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
35011 #ifdef CONFIG_X86_32
35012 irq_ctx_init(cpu);
35013 #else
35014 clear_tsk_thread_flag(idle, TIF_FORK);
35015- per_cpu(kernel_stack, cpu) =
35016- (unsigned long)task_stack_page(idle) -
35017- KERNEL_STACK_OFFSET + THREAD_SIZE;
35018+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
35019 #endif
35020 xen_setup_runstate_info(cpu);
35021 xen_setup_timer(cpu);
35022@@ -702,7 +697,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
35023
35024 void __init xen_smp_init(void)
35025 {
35026- smp_ops = xen_smp_ops;
35027+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
35028 xen_fill_possible_map();
35029 }
35030
35031diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
35032index 33ca6e4..0ded929 100644
35033--- a/arch/x86/xen/xen-asm_32.S
35034+++ b/arch/x86/xen/xen-asm_32.S
35035@@ -84,14 +84,14 @@ ENTRY(xen_iret)
35036 ESP_OFFSET=4 # bytes pushed onto stack
35037
35038 /*
35039- * Store vcpu_info pointer for easy access. Do it this way to
35040- * avoid having to reload %fs
35041+ * Store vcpu_info pointer for easy access.
35042 */
35043 #ifdef CONFIG_SMP
35044- GET_THREAD_INFO(%eax)
35045- movl %ss:TI_cpu(%eax), %eax
35046- movl %ss:__per_cpu_offset(,%eax,4), %eax
35047- mov %ss:xen_vcpu(%eax), %eax
35048+ push %fs
35049+ mov $(__KERNEL_PERCPU), %eax
35050+ mov %eax, %fs
35051+ mov PER_CPU_VAR(xen_vcpu), %eax
35052+ pop %fs
35053 #else
35054 movl %ss:xen_vcpu, %eax
35055 #endif
35056diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
35057index 7faed58..ba4427c 100644
35058--- a/arch/x86/xen/xen-head.S
35059+++ b/arch/x86/xen/xen-head.S
35060@@ -19,6 +19,17 @@ ENTRY(startup_xen)
35061 #ifdef CONFIG_X86_32
35062 mov %esi,xen_start_info
35063 mov $init_thread_union+THREAD_SIZE,%esp
35064+#ifdef CONFIG_SMP
35065+ movl $cpu_gdt_table,%edi
35066+ movl $__per_cpu_load,%eax
35067+ movw %ax,__KERNEL_PERCPU + 2(%edi)
35068+ rorl $16,%eax
35069+ movb %al,__KERNEL_PERCPU + 4(%edi)
35070+ movb %ah,__KERNEL_PERCPU + 7(%edi)
35071+ movl $__per_cpu_end - 1,%eax
35072+ subl $__per_cpu_start,%eax
35073+ movw %ax,__KERNEL_PERCPU + 0(%edi)
35074+#endif
35075 #else
35076 mov %rsi,xen_start_info
35077 mov $init_thread_union+THREAD_SIZE,%rsp
35078diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
35079index 95f8c61..611d6e8 100644
35080--- a/arch/x86/xen/xen-ops.h
35081+++ b/arch/x86/xen/xen-ops.h
35082@@ -10,8 +10,6 @@
35083 extern const char xen_hypervisor_callback[];
35084 extern const char xen_failsafe_callback[];
35085
35086-extern void *xen_initial_gdt;
35087-
35088 struct trap_info;
35089 void xen_copy_trap_info(struct trap_info *traps);
35090
35091diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
35092index 525bd3d..ef888b1 100644
35093--- a/arch/xtensa/variants/dc232b/include/variant/core.h
35094+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
35095@@ -119,9 +119,9 @@
35096 ----------------------------------------------------------------------*/
35097
35098 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
35099-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
35100 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
35101 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
35102+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35103
35104 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
35105 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
35106diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
35107index 2f33760..835e50a 100644
35108--- a/arch/xtensa/variants/fsf/include/variant/core.h
35109+++ b/arch/xtensa/variants/fsf/include/variant/core.h
35110@@ -11,6 +11,7 @@
35111 #ifndef _XTENSA_CORE_H
35112 #define _XTENSA_CORE_H
35113
35114+#include <linux/const.h>
35115
35116 /****************************************************************************
35117 Parameters Useful for Any Code, USER or PRIVILEGED
35118@@ -112,9 +113,9 @@
35119 ----------------------------------------------------------------------*/
35120
35121 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35122-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35123 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35124 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
35125+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35126
35127 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
35128 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
35129diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
35130index af00795..2bb8105 100644
35131--- a/arch/xtensa/variants/s6000/include/variant/core.h
35132+++ b/arch/xtensa/variants/s6000/include/variant/core.h
35133@@ -11,6 +11,7 @@
35134 #ifndef _XTENSA_CORE_CONFIGURATION_H
35135 #define _XTENSA_CORE_CONFIGURATION_H
35136
35137+#include <linux/const.h>
35138
35139 /****************************************************************************
35140 Parameters Useful for Any Code, USER or PRIVILEGED
35141@@ -118,9 +119,9 @@
35142 ----------------------------------------------------------------------*/
35143
35144 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35145-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35146 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35147 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
35148+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35149
35150 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
35151 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
35152diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
35153index 4e491d9..c8e18e4 100644
35154--- a/block/blk-cgroup.c
35155+++ b/block/blk-cgroup.c
35156@@ -812,7 +812,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
35157 static struct cgroup_subsys_state *
35158 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
35159 {
35160- static atomic64_t id_seq = ATOMIC64_INIT(0);
35161+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
35162 struct blkcg *blkcg;
35163
35164 if (!parent_css) {
35165@@ -826,7 +826,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
35166
35167 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
35168 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
35169- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
35170+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
35171 done:
35172 spin_lock_init(&blkcg->lock);
35173 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
35174diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
35175index 1855bf5..af12b06 100644
35176--- a/block/blk-iopoll.c
35177+++ b/block/blk-iopoll.c
35178@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
35179 }
35180 EXPORT_SYMBOL(blk_iopoll_complete);
35181
35182-static void blk_iopoll_softirq(struct softirq_action *h)
35183+static __latent_entropy void blk_iopoll_softirq(void)
35184 {
35185 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
35186 int rearm = 0, budget = blk_iopoll_budget;
35187diff --git a/block/blk-map.c b/block/blk-map.c
35188index 623e1cd..ca1e109 100644
35189--- a/block/blk-map.c
35190+++ b/block/blk-map.c
35191@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
35192 if (!len || !kbuf)
35193 return -EINVAL;
35194
35195- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
35196+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
35197 if (do_copy)
35198 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
35199 else
35200diff --git a/block/blk-softirq.c b/block/blk-softirq.c
35201index 57790c1..5e988dd 100644
35202--- a/block/blk-softirq.c
35203+++ b/block/blk-softirq.c
35204@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
35205 * Softirq action handler - move entries to local list and loop over them
35206 * while passing them to the queue registered handler.
35207 */
35208-static void blk_done_softirq(struct softirq_action *h)
35209+static __latent_entropy void blk_done_softirq(void)
35210 {
35211 struct list_head *cpu_list, local_list;
35212
35213diff --git a/block/bsg.c b/block/bsg.c
35214index 420a5a9..23834aa 100644
35215--- a/block/bsg.c
35216+++ b/block/bsg.c
35217@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
35218 struct sg_io_v4 *hdr, struct bsg_device *bd,
35219 fmode_t has_write_perm)
35220 {
35221+ unsigned char tmpcmd[sizeof(rq->__cmd)];
35222+ unsigned char *cmdptr;
35223+
35224 if (hdr->request_len > BLK_MAX_CDB) {
35225 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
35226 if (!rq->cmd)
35227 return -ENOMEM;
35228- }
35229+ cmdptr = rq->cmd;
35230+ } else
35231+ cmdptr = tmpcmd;
35232
35233- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
35234+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
35235 hdr->request_len))
35236 return -EFAULT;
35237
35238+ if (cmdptr != rq->cmd)
35239+ memcpy(rq->cmd, cmdptr, hdr->request_len);
35240+
35241 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
35242 if (blk_verify_command(rq->cmd, has_write_perm))
35243 return -EPERM;
35244diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
35245index fbd5a67..f24fd95 100644
35246--- a/block/compat_ioctl.c
35247+++ b/block/compat_ioctl.c
35248@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
35249 cgc = compat_alloc_user_space(sizeof(*cgc));
35250 cgc32 = compat_ptr(arg);
35251
35252- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
35253+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
35254 get_user(data, &cgc32->buffer) ||
35255 put_user(compat_ptr(data), &cgc->buffer) ||
35256 copy_in_user(&cgc->buflen, &cgc32->buflen,
35257@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
35258 err |= __get_user(f->spec1, &uf->spec1);
35259 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
35260 err |= __get_user(name, &uf->name);
35261- f->name = compat_ptr(name);
35262+ f->name = (void __force_kernel *)compat_ptr(name);
35263 if (err) {
35264 err = -EFAULT;
35265 goto out;
35266diff --git a/block/genhd.c b/block/genhd.c
35267index 791f419..89f21c4 100644
35268--- a/block/genhd.c
35269+++ b/block/genhd.c
35270@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
35271
35272 /*
35273 * Register device numbers dev..(dev+range-1)
35274- * range must be nonzero
35275+ * Noop if @range is zero.
35276 * The hash chain is sorted on range, so that subranges can override.
35277 */
35278 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
35279 struct kobject *(*probe)(dev_t, int *, void *),
35280 int (*lock)(dev_t, void *), void *data)
35281 {
35282- kobj_map(bdev_map, devt, range, module, probe, lock, data);
35283+ if (range)
35284+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
35285 }
35286
35287 EXPORT_SYMBOL(blk_register_region);
35288
35289+/* undo blk_register_region(), noop if @range is zero */
35290 void blk_unregister_region(dev_t devt, unsigned long range)
35291 {
35292- kobj_unmap(bdev_map, devt, range);
35293+ if (range)
35294+ kobj_unmap(bdev_map, devt, range);
35295 }
35296
35297 EXPORT_SYMBOL(blk_unregister_region);
35298diff --git a/block/partitions/efi.c b/block/partitions/efi.c
35299index dc51f46..d5446a8 100644
35300--- a/block/partitions/efi.c
35301+++ b/block/partitions/efi.c
35302@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
35303 if (!gpt)
35304 return NULL;
35305
35306+ if (!le32_to_cpu(gpt->num_partition_entries))
35307+ return NULL;
35308+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
35309+ if (!pte)
35310+ return NULL;
35311+
35312 count = le32_to_cpu(gpt->num_partition_entries) *
35313 le32_to_cpu(gpt->sizeof_partition_entry);
35314- if (!count)
35315- return NULL;
35316- pte = kmalloc(count, GFP_KERNEL);
35317- if (!pte)
35318- return NULL;
35319-
35320 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
35321 (u8 *) pte, count) < count) {
35322 kfree(pte);
35323diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
35324index 625e3e4..b5339f9 100644
35325--- a/block/scsi_ioctl.c
35326+++ b/block/scsi_ioctl.c
35327@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
35328 return put_user(0, p);
35329 }
35330
35331-static int sg_get_timeout(struct request_queue *q)
35332+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
35333 {
35334 return jiffies_to_clock_t(q->sg_timeout);
35335 }
35336@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
35337 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
35338 struct sg_io_hdr *hdr, fmode_t mode)
35339 {
35340- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
35341+ unsigned char tmpcmd[sizeof(rq->__cmd)];
35342+ unsigned char *cmdptr;
35343+
35344+ if (rq->cmd != rq->__cmd)
35345+ cmdptr = rq->cmd;
35346+ else
35347+ cmdptr = tmpcmd;
35348+
35349+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
35350 return -EFAULT;
35351+
35352+ if (cmdptr != rq->cmd)
35353+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
35354+
35355 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
35356 return -EPERM;
35357
35358@@ -415,6 +427,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
35359 int err;
35360 unsigned int in_len, out_len, bytes, opcode, cmdlen;
35361 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
35362+ unsigned char tmpcmd[sizeof(rq->__cmd)];
35363+ unsigned char *cmdptr;
35364
35365 if (!sic)
35366 return -EINVAL;
35367@@ -448,9 +462,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
35368 */
35369 err = -EFAULT;
35370 rq->cmd_len = cmdlen;
35371- if (copy_from_user(rq->cmd, sic->data, cmdlen))
35372+
35373+ if (rq->cmd != rq->__cmd)
35374+ cmdptr = rq->cmd;
35375+ else
35376+ cmdptr = tmpcmd;
35377+
35378+ if (copy_from_user(cmdptr, sic->data, cmdlen))
35379 goto error;
35380
35381+ if (rq->cmd != cmdptr)
35382+ memcpy(rq->cmd, cmdptr, cmdlen);
35383+
35384 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
35385 goto error;
35386
35387diff --git a/crypto/cryptd.c b/crypto/cryptd.c
35388index 7bdd61b..afec999 100644
35389--- a/crypto/cryptd.c
35390+++ b/crypto/cryptd.c
35391@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
35392
35393 struct cryptd_blkcipher_request_ctx {
35394 crypto_completion_t complete;
35395-};
35396+} __no_const;
35397
35398 struct cryptd_hash_ctx {
35399 struct crypto_shash *child;
35400@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
35401
35402 struct cryptd_aead_request_ctx {
35403 crypto_completion_t complete;
35404-};
35405+} __no_const;
35406
35407 static void cryptd_queue_worker(struct work_struct *work);
35408
35409diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
35410index f8c920c..ab2cb5a 100644
35411--- a/crypto/pcrypt.c
35412+++ b/crypto/pcrypt.c
35413@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
35414 int ret;
35415
35416 pinst->kobj.kset = pcrypt_kset;
35417- ret = kobject_add(&pinst->kobj, NULL, name);
35418+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
35419 if (!ret)
35420 kobject_uevent(&pinst->kobj, KOBJ_ADD);
35421
35422diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
35423index 15dddc1..b61cf0c 100644
35424--- a/drivers/acpi/acpica/hwxfsleep.c
35425+++ b/drivers/acpi/acpica/hwxfsleep.c
35426@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
35427 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
35428
35429 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
35430- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
35431- acpi_hw_extended_sleep},
35432- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
35433- acpi_hw_extended_wake_prep},
35434- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
35435+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
35436+ .extended_function = acpi_hw_extended_sleep},
35437+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
35438+ .extended_function = acpi_hw_extended_wake_prep},
35439+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
35440+ .extended_function = acpi_hw_extended_wake}
35441 };
35442
35443 /*
35444diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
35445index 21ba34a..cb05966 100644
35446--- a/drivers/acpi/apei/apei-internal.h
35447+++ b/drivers/acpi/apei/apei-internal.h
35448@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
35449 struct apei_exec_ins_type {
35450 u32 flags;
35451 apei_exec_ins_func_t run;
35452-};
35453+} __do_const;
35454
35455 struct apei_exec_context {
35456 u32 ip;
35457diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
35458index a30bc31..b91c4d5 100644
35459--- a/drivers/acpi/apei/ghes.c
35460+++ b/drivers/acpi/apei/ghes.c
35461@@ -498,7 +498,7 @@ static void __ghes_print_estatus(const char *pfx,
35462 const struct acpi_hest_generic *generic,
35463 const struct acpi_generic_status *estatus)
35464 {
35465- static atomic_t seqno;
35466+ static atomic_unchecked_t seqno;
35467 unsigned int curr_seqno;
35468 char pfx_seq[64];
35469
35470@@ -509,7 +509,7 @@ static void __ghes_print_estatus(const char *pfx,
35471 else
35472 pfx = KERN_ERR;
35473 }
35474- curr_seqno = atomic_inc_return(&seqno);
35475+ curr_seqno = atomic_inc_return_unchecked(&seqno);
35476 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
35477 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
35478 pfx_seq, generic->header.source_id);
35479diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
35480index a83e3c6..c3d617f 100644
35481--- a/drivers/acpi/bgrt.c
35482+++ b/drivers/acpi/bgrt.c
35483@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
35484 if (!bgrt_image)
35485 return -ENODEV;
35486
35487- bin_attr_image.private = bgrt_image;
35488- bin_attr_image.size = bgrt_image_size;
35489+ pax_open_kernel();
35490+ *(void **)&bin_attr_image.private = bgrt_image;
35491+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
35492+ pax_close_kernel();
35493
35494 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
35495 if (!bgrt_kobj)
35496diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
35497index 078c4f7..410e272 100644
35498--- a/drivers/acpi/blacklist.c
35499+++ b/drivers/acpi/blacklist.c
35500@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
35501 u32 is_critical_error;
35502 };
35503
35504-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
35505+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
35506
35507 /*
35508 * POLICY: If *anything* doesn't work, put it on the blacklist.
35509@@ -164,7 +164,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
35510 return 0;
35511 }
35512
35513-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
35514+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
35515 {
35516 .callback = dmi_disable_osi_vista,
35517 .ident = "Fujitsu Siemens",
35518diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
35519index 12b62f2..dc2aac8 100644
35520--- a/drivers/acpi/custom_method.c
35521+++ b/drivers/acpi/custom_method.c
35522@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
35523 struct acpi_table_header table;
35524 acpi_status status;
35525
35526+#ifdef CONFIG_GRKERNSEC_KMEM
35527+ return -EPERM;
35528+#endif
35529+
35530 if (!(*ppos)) {
35531 /* parse the table header to get the table length */
35532 if (count <= sizeof(struct acpi_table_header))
35533diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
35534index 644516d..643937e 100644
35535--- a/drivers/acpi/processor_idle.c
35536+++ b/drivers/acpi/processor_idle.c
35537@@ -963,7 +963,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
35538 {
35539 int i, count = CPUIDLE_DRIVER_STATE_START;
35540 struct acpi_processor_cx *cx;
35541- struct cpuidle_state *state;
35542+ cpuidle_state_no_const *state;
35543 struct cpuidle_driver *drv = &acpi_idle_driver;
35544
35545 if (!pr->flags.power_setup_done)
35546diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
35547index 6dbc3ca..b8b59a0 100644
35548--- a/drivers/acpi/sysfs.c
35549+++ b/drivers/acpi/sysfs.c
35550@@ -425,11 +425,11 @@ static u32 num_counters;
35551 static struct attribute **all_attrs;
35552 static u32 acpi_gpe_count;
35553
35554-static struct attribute_group interrupt_stats_attr_group = {
35555+static attribute_group_no_const interrupt_stats_attr_group = {
35556 .name = "interrupts",
35557 };
35558
35559-static struct kobj_attribute *counter_attrs;
35560+static kobj_attribute_no_const *counter_attrs;
35561
35562 static void delete_gpe_attr_array(void)
35563 {
35564diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
35565index c482f8c..c832240 100644
35566--- a/drivers/ata/libahci.c
35567+++ b/drivers/ata/libahci.c
35568@@ -1239,7 +1239,7 @@ int ahci_kick_engine(struct ata_port *ap)
35569 }
35570 EXPORT_SYMBOL_GPL(ahci_kick_engine);
35571
35572-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
35573+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
35574 struct ata_taskfile *tf, int is_cmd, u16 flags,
35575 unsigned long timeout_msec)
35576 {
35577diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
35578index 1a3dbd1..dfc6e5c 100644
35579--- a/drivers/ata/libata-core.c
35580+++ b/drivers/ata/libata-core.c
35581@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
35582 static void ata_dev_xfermask(struct ata_device *dev);
35583 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
35584
35585-atomic_t ata_print_id = ATOMIC_INIT(0);
35586+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
35587
35588 struct ata_force_param {
35589 const char *name;
35590@@ -4850,7 +4850,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
35591 struct ata_port *ap;
35592 unsigned int tag;
35593
35594- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35595+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35596 ap = qc->ap;
35597
35598 qc->flags = 0;
35599@@ -4866,7 +4866,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
35600 struct ata_port *ap;
35601 struct ata_link *link;
35602
35603- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35604+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35605 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
35606 ap = qc->ap;
35607 link = qc->dev->link;
35608@@ -5985,6 +5985,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
35609 return;
35610
35611 spin_lock(&lock);
35612+ pax_open_kernel();
35613
35614 for (cur = ops->inherits; cur; cur = cur->inherits) {
35615 void **inherit = (void **)cur;
35616@@ -5998,8 +5999,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
35617 if (IS_ERR(*pp))
35618 *pp = NULL;
35619
35620- ops->inherits = NULL;
35621+ *(struct ata_port_operations **)&ops->inherits = NULL;
35622
35623+ pax_close_kernel();
35624 spin_unlock(&lock);
35625 }
35626
35627@@ -6192,7 +6194,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
35628
35629 /* give ports names and add SCSI hosts */
35630 for (i = 0; i < host->n_ports; i++) {
35631- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
35632+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
35633 host->ports[i]->local_port_no = i + 1;
35634 }
35635
35636diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
35637index ef8567d..8bdbd03 100644
35638--- a/drivers/ata/libata-scsi.c
35639+++ b/drivers/ata/libata-scsi.c
35640@@ -4147,7 +4147,7 @@ int ata_sas_port_init(struct ata_port *ap)
35641
35642 if (rc)
35643 return rc;
35644- ap->print_id = atomic_inc_return(&ata_print_id);
35645+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
35646 return 0;
35647 }
35648 EXPORT_SYMBOL_GPL(ata_sas_port_init);
35649diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
35650index 45b5ab3..98446b8 100644
35651--- a/drivers/ata/libata.h
35652+++ b/drivers/ata/libata.h
35653@@ -53,7 +53,7 @@ enum {
35654 ATA_DNXFER_QUIET = (1 << 31),
35655 };
35656
35657-extern atomic_t ata_print_id;
35658+extern atomic_unchecked_t ata_print_id;
35659 extern int atapi_passthru16;
35660 extern int libata_fua;
35661 extern int libata_noacpi;
35662diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
35663index 73492dd..ca2bff5 100644
35664--- a/drivers/ata/pata_arasan_cf.c
35665+++ b/drivers/ata/pata_arasan_cf.c
35666@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
35667 /* Handle platform specific quirks */
35668 if (quirk) {
35669 if (quirk & CF_BROKEN_PIO) {
35670- ap->ops->set_piomode = NULL;
35671+ pax_open_kernel();
35672+ *(void **)&ap->ops->set_piomode = NULL;
35673+ pax_close_kernel();
35674 ap->pio_mask = 0;
35675 }
35676 if (quirk & CF_BROKEN_MWDMA)
35677diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
35678index f9b983a..887b9d8 100644
35679--- a/drivers/atm/adummy.c
35680+++ b/drivers/atm/adummy.c
35681@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
35682 vcc->pop(vcc, skb);
35683 else
35684 dev_kfree_skb_any(skb);
35685- atomic_inc(&vcc->stats->tx);
35686+ atomic_inc_unchecked(&vcc->stats->tx);
35687
35688 return 0;
35689 }
35690diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
35691index 62a7607..cc4be104 100644
35692--- a/drivers/atm/ambassador.c
35693+++ b/drivers/atm/ambassador.c
35694@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
35695 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
35696
35697 // VC layer stats
35698- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35699+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35700
35701 // free the descriptor
35702 kfree (tx_descr);
35703@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
35704 dump_skb ("<<<", vc, skb);
35705
35706 // VC layer stats
35707- atomic_inc(&atm_vcc->stats->rx);
35708+ atomic_inc_unchecked(&atm_vcc->stats->rx);
35709 __net_timestamp(skb);
35710 // end of our responsibility
35711 atm_vcc->push (atm_vcc, skb);
35712@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
35713 } else {
35714 PRINTK (KERN_INFO, "dropped over-size frame");
35715 // should we count this?
35716- atomic_inc(&atm_vcc->stats->rx_drop);
35717+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35718 }
35719
35720 } else {
35721@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
35722 }
35723
35724 if (check_area (skb->data, skb->len)) {
35725- atomic_inc(&atm_vcc->stats->tx_err);
35726+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
35727 return -ENOMEM; // ?
35728 }
35729
35730diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
35731index 0e3f8f9..765a7a5 100644
35732--- a/drivers/atm/atmtcp.c
35733+++ b/drivers/atm/atmtcp.c
35734@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
35735 if (vcc->pop) vcc->pop(vcc,skb);
35736 else dev_kfree_skb(skb);
35737 if (dev_data) return 0;
35738- atomic_inc(&vcc->stats->tx_err);
35739+ atomic_inc_unchecked(&vcc->stats->tx_err);
35740 return -ENOLINK;
35741 }
35742 size = skb->len+sizeof(struct atmtcp_hdr);
35743@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
35744 if (!new_skb) {
35745 if (vcc->pop) vcc->pop(vcc,skb);
35746 else dev_kfree_skb(skb);
35747- atomic_inc(&vcc->stats->tx_err);
35748+ atomic_inc_unchecked(&vcc->stats->tx_err);
35749 return -ENOBUFS;
35750 }
35751 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
35752@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
35753 if (vcc->pop) vcc->pop(vcc,skb);
35754 else dev_kfree_skb(skb);
35755 out_vcc->push(out_vcc,new_skb);
35756- atomic_inc(&vcc->stats->tx);
35757- atomic_inc(&out_vcc->stats->rx);
35758+ atomic_inc_unchecked(&vcc->stats->tx);
35759+ atomic_inc_unchecked(&out_vcc->stats->rx);
35760 return 0;
35761 }
35762
35763@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
35764 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
35765 read_unlock(&vcc_sklist_lock);
35766 if (!out_vcc) {
35767- atomic_inc(&vcc->stats->tx_err);
35768+ atomic_inc_unchecked(&vcc->stats->tx_err);
35769 goto done;
35770 }
35771 skb_pull(skb,sizeof(struct atmtcp_hdr));
35772@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
35773 __net_timestamp(new_skb);
35774 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
35775 out_vcc->push(out_vcc,new_skb);
35776- atomic_inc(&vcc->stats->tx);
35777- atomic_inc(&out_vcc->stats->rx);
35778+ atomic_inc_unchecked(&vcc->stats->tx);
35779+ atomic_inc_unchecked(&out_vcc->stats->rx);
35780 done:
35781 if (vcc->pop) vcc->pop(vcc,skb);
35782 else dev_kfree_skb(skb);
35783diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
35784index b1955ba..b179940 100644
35785--- a/drivers/atm/eni.c
35786+++ b/drivers/atm/eni.c
35787@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
35788 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
35789 vcc->dev->number);
35790 length = 0;
35791- atomic_inc(&vcc->stats->rx_err);
35792+ atomic_inc_unchecked(&vcc->stats->rx_err);
35793 }
35794 else {
35795 length = ATM_CELL_SIZE-1; /* no HEC */
35796@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
35797 size);
35798 }
35799 eff = length = 0;
35800- atomic_inc(&vcc->stats->rx_err);
35801+ atomic_inc_unchecked(&vcc->stats->rx_err);
35802 }
35803 else {
35804 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
35805@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
35806 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
35807 vcc->dev->number,vcc->vci,length,size << 2,descr);
35808 length = eff = 0;
35809- atomic_inc(&vcc->stats->rx_err);
35810+ atomic_inc_unchecked(&vcc->stats->rx_err);
35811 }
35812 }
35813 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
35814@@ -767,7 +767,7 @@ rx_dequeued++;
35815 vcc->push(vcc,skb);
35816 pushed++;
35817 }
35818- atomic_inc(&vcc->stats->rx);
35819+ atomic_inc_unchecked(&vcc->stats->rx);
35820 }
35821 wake_up(&eni_dev->rx_wait);
35822 }
35823@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
35824 PCI_DMA_TODEVICE);
35825 if (vcc->pop) vcc->pop(vcc,skb);
35826 else dev_kfree_skb_irq(skb);
35827- atomic_inc(&vcc->stats->tx);
35828+ atomic_inc_unchecked(&vcc->stats->tx);
35829 wake_up(&eni_dev->tx_wait);
35830 dma_complete++;
35831 }
35832diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
35833index b41c948..a002b17 100644
35834--- a/drivers/atm/firestream.c
35835+++ b/drivers/atm/firestream.c
35836@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
35837 }
35838 }
35839
35840- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35841+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35842
35843 fs_dprintk (FS_DEBUG_TXMEM, "i");
35844 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
35845@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35846 #endif
35847 skb_put (skb, qe->p1 & 0xffff);
35848 ATM_SKB(skb)->vcc = atm_vcc;
35849- atomic_inc(&atm_vcc->stats->rx);
35850+ atomic_inc_unchecked(&atm_vcc->stats->rx);
35851 __net_timestamp(skb);
35852 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
35853 atm_vcc->push (atm_vcc, skb);
35854@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35855 kfree (pe);
35856 }
35857 if (atm_vcc)
35858- atomic_inc(&atm_vcc->stats->rx_drop);
35859+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35860 break;
35861 case 0x1f: /* Reassembly abort: no buffers. */
35862 /* Silently increment error counter. */
35863 if (atm_vcc)
35864- atomic_inc(&atm_vcc->stats->rx_drop);
35865+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35866 break;
35867 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
35868 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
35869diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
35870index 204814e..cede831 100644
35871--- a/drivers/atm/fore200e.c
35872+++ b/drivers/atm/fore200e.c
35873@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
35874 #endif
35875 /* check error condition */
35876 if (*entry->status & STATUS_ERROR)
35877- atomic_inc(&vcc->stats->tx_err);
35878+ atomic_inc_unchecked(&vcc->stats->tx_err);
35879 else
35880- atomic_inc(&vcc->stats->tx);
35881+ atomic_inc_unchecked(&vcc->stats->tx);
35882 }
35883 }
35884
35885@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35886 if (skb == NULL) {
35887 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
35888
35889- atomic_inc(&vcc->stats->rx_drop);
35890+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35891 return -ENOMEM;
35892 }
35893
35894@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35895
35896 dev_kfree_skb_any(skb);
35897
35898- atomic_inc(&vcc->stats->rx_drop);
35899+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35900 return -ENOMEM;
35901 }
35902
35903 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35904
35905 vcc->push(vcc, skb);
35906- atomic_inc(&vcc->stats->rx);
35907+ atomic_inc_unchecked(&vcc->stats->rx);
35908
35909 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35910
35911@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
35912 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
35913 fore200e->atm_dev->number,
35914 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
35915- atomic_inc(&vcc->stats->rx_err);
35916+ atomic_inc_unchecked(&vcc->stats->rx_err);
35917 }
35918 }
35919
35920@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
35921 goto retry_here;
35922 }
35923
35924- atomic_inc(&vcc->stats->tx_err);
35925+ atomic_inc_unchecked(&vcc->stats->tx_err);
35926
35927 fore200e->tx_sat++;
35928 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
35929diff --git a/drivers/atm/he.c b/drivers/atm/he.c
35930index 8557adc..3fb5d55 100644
35931--- a/drivers/atm/he.c
35932+++ b/drivers/atm/he.c
35933@@ -1691,7 +1691,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35934
35935 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
35936 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
35937- atomic_inc(&vcc->stats->rx_drop);
35938+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35939 goto return_host_buffers;
35940 }
35941
35942@@ -1718,7 +1718,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35943 RBRQ_LEN_ERR(he_dev->rbrq_head)
35944 ? "LEN_ERR" : "",
35945 vcc->vpi, vcc->vci);
35946- atomic_inc(&vcc->stats->rx_err);
35947+ atomic_inc_unchecked(&vcc->stats->rx_err);
35948 goto return_host_buffers;
35949 }
35950
35951@@ -1770,7 +1770,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35952 vcc->push(vcc, skb);
35953 spin_lock(&he_dev->global_lock);
35954
35955- atomic_inc(&vcc->stats->rx);
35956+ atomic_inc_unchecked(&vcc->stats->rx);
35957
35958 return_host_buffers:
35959 ++pdus_assembled;
35960@@ -2096,7 +2096,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
35961 tpd->vcc->pop(tpd->vcc, tpd->skb);
35962 else
35963 dev_kfree_skb_any(tpd->skb);
35964- atomic_inc(&tpd->vcc->stats->tx_err);
35965+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
35966 }
35967 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
35968 return;
35969@@ -2508,7 +2508,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35970 vcc->pop(vcc, skb);
35971 else
35972 dev_kfree_skb_any(skb);
35973- atomic_inc(&vcc->stats->tx_err);
35974+ atomic_inc_unchecked(&vcc->stats->tx_err);
35975 return -EINVAL;
35976 }
35977
35978@@ -2519,7 +2519,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35979 vcc->pop(vcc, skb);
35980 else
35981 dev_kfree_skb_any(skb);
35982- atomic_inc(&vcc->stats->tx_err);
35983+ atomic_inc_unchecked(&vcc->stats->tx_err);
35984 return -EINVAL;
35985 }
35986 #endif
35987@@ -2531,7 +2531,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35988 vcc->pop(vcc, skb);
35989 else
35990 dev_kfree_skb_any(skb);
35991- atomic_inc(&vcc->stats->tx_err);
35992+ atomic_inc_unchecked(&vcc->stats->tx_err);
35993 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35994 return -ENOMEM;
35995 }
35996@@ -2573,7 +2573,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35997 vcc->pop(vcc, skb);
35998 else
35999 dev_kfree_skb_any(skb);
36000- atomic_inc(&vcc->stats->tx_err);
36001+ atomic_inc_unchecked(&vcc->stats->tx_err);
36002 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36003 return -ENOMEM;
36004 }
36005@@ -2604,7 +2604,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36006 __enqueue_tpd(he_dev, tpd, cid);
36007 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36008
36009- atomic_inc(&vcc->stats->tx);
36010+ atomic_inc_unchecked(&vcc->stats->tx);
36011
36012 return 0;
36013 }
36014diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
36015index 1dc0519..1aadaf7 100644
36016--- a/drivers/atm/horizon.c
36017+++ b/drivers/atm/horizon.c
36018@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
36019 {
36020 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
36021 // VC layer stats
36022- atomic_inc(&vcc->stats->rx);
36023+ atomic_inc_unchecked(&vcc->stats->rx);
36024 __net_timestamp(skb);
36025 // end of our responsibility
36026 vcc->push (vcc, skb);
36027@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
36028 dev->tx_iovec = NULL;
36029
36030 // VC layer stats
36031- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36032+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36033
36034 // free the skb
36035 hrz_kfree_skb (skb);
36036diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
36037index 1bdf104..9dc44b1 100644
36038--- a/drivers/atm/idt77252.c
36039+++ b/drivers/atm/idt77252.c
36040@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
36041 else
36042 dev_kfree_skb(skb);
36043
36044- atomic_inc(&vcc->stats->tx);
36045+ atomic_inc_unchecked(&vcc->stats->tx);
36046 }
36047
36048 atomic_dec(&scq->used);
36049@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36050 if ((sb = dev_alloc_skb(64)) == NULL) {
36051 printk("%s: Can't allocate buffers for aal0.\n",
36052 card->name);
36053- atomic_add(i, &vcc->stats->rx_drop);
36054+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
36055 break;
36056 }
36057 if (!atm_charge(vcc, sb->truesize)) {
36058 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
36059 card->name);
36060- atomic_add(i - 1, &vcc->stats->rx_drop);
36061+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
36062 dev_kfree_skb(sb);
36063 break;
36064 }
36065@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36066 ATM_SKB(sb)->vcc = vcc;
36067 __net_timestamp(sb);
36068 vcc->push(vcc, sb);
36069- atomic_inc(&vcc->stats->rx);
36070+ atomic_inc_unchecked(&vcc->stats->rx);
36071
36072 cell += ATM_CELL_PAYLOAD;
36073 }
36074@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36075 "(CDC: %08x)\n",
36076 card->name, len, rpp->len, readl(SAR_REG_CDC));
36077 recycle_rx_pool_skb(card, rpp);
36078- atomic_inc(&vcc->stats->rx_err);
36079+ atomic_inc_unchecked(&vcc->stats->rx_err);
36080 return;
36081 }
36082 if (stat & SAR_RSQE_CRC) {
36083 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
36084 recycle_rx_pool_skb(card, rpp);
36085- atomic_inc(&vcc->stats->rx_err);
36086+ atomic_inc_unchecked(&vcc->stats->rx_err);
36087 return;
36088 }
36089 if (skb_queue_len(&rpp->queue) > 1) {
36090@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36091 RXPRINTK("%s: Can't alloc RX skb.\n",
36092 card->name);
36093 recycle_rx_pool_skb(card, rpp);
36094- atomic_inc(&vcc->stats->rx_err);
36095+ atomic_inc_unchecked(&vcc->stats->rx_err);
36096 return;
36097 }
36098 if (!atm_charge(vcc, skb->truesize)) {
36099@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36100 __net_timestamp(skb);
36101
36102 vcc->push(vcc, skb);
36103- atomic_inc(&vcc->stats->rx);
36104+ atomic_inc_unchecked(&vcc->stats->rx);
36105
36106 return;
36107 }
36108@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36109 __net_timestamp(skb);
36110
36111 vcc->push(vcc, skb);
36112- atomic_inc(&vcc->stats->rx);
36113+ atomic_inc_unchecked(&vcc->stats->rx);
36114
36115 if (skb->truesize > SAR_FB_SIZE_3)
36116 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
36117@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
36118 if (vcc->qos.aal != ATM_AAL0) {
36119 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
36120 card->name, vpi, vci);
36121- atomic_inc(&vcc->stats->rx_drop);
36122+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36123 goto drop;
36124 }
36125
36126 if ((sb = dev_alloc_skb(64)) == NULL) {
36127 printk("%s: Can't allocate buffers for AAL0.\n",
36128 card->name);
36129- atomic_inc(&vcc->stats->rx_err);
36130+ atomic_inc_unchecked(&vcc->stats->rx_err);
36131 goto drop;
36132 }
36133
36134@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
36135 ATM_SKB(sb)->vcc = vcc;
36136 __net_timestamp(sb);
36137 vcc->push(vcc, sb);
36138- atomic_inc(&vcc->stats->rx);
36139+ atomic_inc_unchecked(&vcc->stats->rx);
36140
36141 drop:
36142 skb_pull(queue, 64);
36143@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36144
36145 if (vc == NULL) {
36146 printk("%s: NULL connection in send().\n", card->name);
36147- atomic_inc(&vcc->stats->tx_err);
36148+ atomic_inc_unchecked(&vcc->stats->tx_err);
36149 dev_kfree_skb(skb);
36150 return -EINVAL;
36151 }
36152 if (!test_bit(VCF_TX, &vc->flags)) {
36153 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
36154- atomic_inc(&vcc->stats->tx_err);
36155+ atomic_inc_unchecked(&vcc->stats->tx_err);
36156 dev_kfree_skb(skb);
36157 return -EINVAL;
36158 }
36159@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36160 break;
36161 default:
36162 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
36163- atomic_inc(&vcc->stats->tx_err);
36164+ atomic_inc_unchecked(&vcc->stats->tx_err);
36165 dev_kfree_skb(skb);
36166 return -EINVAL;
36167 }
36168
36169 if (skb_shinfo(skb)->nr_frags != 0) {
36170 printk("%s: No scatter-gather yet.\n", card->name);
36171- atomic_inc(&vcc->stats->tx_err);
36172+ atomic_inc_unchecked(&vcc->stats->tx_err);
36173 dev_kfree_skb(skb);
36174 return -EINVAL;
36175 }
36176@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36177
36178 err = queue_skb(card, vc, skb, oam);
36179 if (err) {
36180- atomic_inc(&vcc->stats->tx_err);
36181+ atomic_inc_unchecked(&vcc->stats->tx_err);
36182 dev_kfree_skb(skb);
36183 return err;
36184 }
36185@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
36186 skb = dev_alloc_skb(64);
36187 if (!skb) {
36188 printk("%s: Out of memory in send_oam().\n", card->name);
36189- atomic_inc(&vcc->stats->tx_err);
36190+ atomic_inc_unchecked(&vcc->stats->tx_err);
36191 return -ENOMEM;
36192 }
36193 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
36194diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
36195index 4217f29..88f547a 100644
36196--- a/drivers/atm/iphase.c
36197+++ b/drivers/atm/iphase.c
36198@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
36199 status = (u_short) (buf_desc_ptr->desc_mode);
36200 if (status & (RX_CER | RX_PTE | RX_OFL))
36201 {
36202- atomic_inc(&vcc->stats->rx_err);
36203+ atomic_inc_unchecked(&vcc->stats->rx_err);
36204 IF_ERR(printk("IA: bad packet, dropping it");)
36205 if (status & RX_CER) {
36206 IF_ERR(printk(" cause: packet CRC error\n");)
36207@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
36208 len = dma_addr - buf_addr;
36209 if (len > iadev->rx_buf_sz) {
36210 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
36211- atomic_inc(&vcc->stats->rx_err);
36212+ atomic_inc_unchecked(&vcc->stats->rx_err);
36213 goto out_free_desc;
36214 }
36215
36216@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
36217 ia_vcc = INPH_IA_VCC(vcc);
36218 if (ia_vcc == NULL)
36219 {
36220- atomic_inc(&vcc->stats->rx_err);
36221+ atomic_inc_unchecked(&vcc->stats->rx_err);
36222 atm_return(vcc, skb->truesize);
36223 dev_kfree_skb_any(skb);
36224 goto INCR_DLE;
36225@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
36226 if ((length > iadev->rx_buf_sz) || (length >
36227 (skb->len - sizeof(struct cpcs_trailer))))
36228 {
36229- atomic_inc(&vcc->stats->rx_err);
36230+ atomic_inc_unchecked(&vcc->stats->rx_err);
36231 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
36232 length, skb->len);)
36233 atm_return(vcc, skb->truesize);
36234@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
36235
36236 IF_RX(printk("rx_dle_intr: skb push");)
36237 vcc->push(vcc,skb);
36238- atomic_inc(&vcc->stats->rx);
36239+ atomic_inc_unchecked(&vcc->stats->rx);
36240 iadev->rx_pkt_cnt++;
36241 }
36242 INCR_DLE:
36243@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
36244 {
36245 struct k_sonet_stats *stats;
36246 stats = &PRIV(_ia_dev[board])->sonet_stats;
36247- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
36248- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
36249- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
36250- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
36251- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
36252- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
36253- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
36254- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
36255- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
36256+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
36257+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
36258+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
36259+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
36260+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
36261+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
36262+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
36263+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
36264+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
36265 }
36266 ia_cmds.status = 0;
36267 break;
36268@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
36269 if ((desc == 0) || (desc > iadev->num_tx_desc))
36270 {
36271 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
36272- atomic_inc(&vcc->stats->tx);
36273+ atomic_inc_unchecked(&vcc->stats->tx);
36274 if (vcc->pop)
36275 vcc->pop(vcc, skb);
36276 else
36277@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
36278 ATM_DESC(skb) = vcc->vci;
36279 skb_queue_tail(&iadev->tx_dma_q, skb);
36280
36281- atomic_inc(&vcc->stats->tx);
36282+ atomic_inc_unchecked(&vcc->stats->tx);
36283 iadev->tx_pkt_cnt++;
36284 /* Increment transaction counter */
36285 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
36286
36287 #if 0
36288 /* add flow control logic */
36289- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
36290+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
36291 if (iavcc->vc_desc_cnt > 10) {
36292 vcc->tx_quota = vcc->tx_quota * 3 / 4;
36293 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
36294diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
36295index fa7d701..1e404c7 100644
36296--- a/drivers/atm/lanai.c
36297+++ b/drivers/atm/lanai.c
36298@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
36299 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
36300 lanai_endtx(lanai, lvcc);
36301 lanai_free_skb(lvcc->tx.atmvcc, skb);
36302- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
36303+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
36304 }
36305
36306 /* Try to fill the buffer - don't call unless there is backlog */
36307@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
36308 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
36309 __net_timestamp(skb);
36310 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
36311- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
36312+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
36313 out:
36314 lvcc->rx.buf.ptr = end;
36315 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
36316@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36317 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
36318 "vcc %d\n", lanai->number, (unsigned int) s, vci);
36319 lanai->stats.service_rxnotaal5++;
36320- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36321+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36322 return 0;
36323 }
36324 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
36325@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36326 int bytes;
36327 read_unlock(&vcc_sklist_lock);
36328 DPRINTK("got trashed rx pdu on vci %d\n", vci);
36329- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36330+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36331 lvcc->stats.x.aal5.service_trash++;
36332 bytes = (SERVICE_GET_END(s) * 16) -
36333 (((unsigned long) lvcc->rx.buf.ptr) -
36334@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36335 }
36336 if (s & SERVICE_STREAM) {
36337 read_unlock(&vcc_sklist_lock);
36338- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36339+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36340 lvcc->stats.x.aal5.service_stream++;
36341 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
36342 "PDU on VCI %d!\n", lanai->number, vci);
36343@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36344 return 0;
36345 }
36346 DPRINTK("got rx crc error on vci %d\n", vci);
36347- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36348+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36349 lvcc->stats.x.aal5.service_rxcrc++;
36350 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
36351 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
36352diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
36353index 5aca5f4..ce3a6b0 100644
36354--- a/drivers/atm/nicstar.c
36355+++ b/drivers/atm/nicstar.c
36356@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36357 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
36358 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
36359 card->index);
36360- atomic_inc(&vcc->stats->tx_err);
36361+ atomic_inc_unchecked(&vcc->stats->tx_err);
36362 dev_kfree_skb_any(skb);
36363 return -EINVAL;
36364 }
36365@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36366 if (!vc->tx) {
36367 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
36368 card->index);
36369- atomic_inc(&vcc->stats->tx_err);
36370+ atomic_inc_unchecked(&vcc->stats->tx_err);
36371 dev_kfree_skb_any(skb);
36372 return -EINVAL;
36373 }
36374@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36375 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
36376 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
36377 card->index);
36378- atomic_inc(&vcc->stats->tx_err);
36379+ atomic_inc_unchecked(&vcc->stats->tx_err);
36380 dev_kfree_skb_any(skb);
36381 return -EINVAL;
36382 }
36383
36384 if (skb_shinfo(skb)->nr_frags != 0) {
36385 printk("nicstar%d: No scatter-gather yet.\n", card->index);
36386- atomic_inc(&vcc->stats->tx_err);
36387+ atomic_inc_unchecked(&vcc->stats->tx_err);
36388 dev_kfree_skb_any(skb);
36389 return -EINVAL;
36390 }
36391@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36392 }
36393
36394 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
36395- atomic_inc(&vcc->stats->tx_err);
36396+ atomic_inc_unchecked(&vcc->stats->tx_err);
36397 dev_kfree_skb_any(skb);
36398 return -EIO;
36399 }
36400- atomic_inc(&vcc->stats->tx);
36401+ atomic_inc_unchecked(&vcc->stats->tx);
36402
36403 return 0;
36404 }
36405@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36406 printk
36407 ("nicstar%d: Can't allocate buffers for aal0.\n",
36408 card->index);
36409- atomic_add(i, &vcc->stats->rx_drop);
36410+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
36411 break;
36412 }
36413 if (!atm_charge(vcc, sb->truesize)) {
36414 RXPRINTK
36415 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
36416 card->index);
36417- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
36418+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
36419 dev_kfree_skb_any(sb);
36420 break;
36421 }
36422@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36423 ATM_SKB(sb)->vcc = vcc;
36424 __net_timestamp(sb);
36425 vcc->push(vcc, sb);
36426- atomic_inc(&vcc->stats->rx);
36427+ atomic_inc_unchecked(&vcc->stats->rx);
36428 cell += ATM_CELL_PAYLOAD;
36429 }
36430
36431@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36432 if (iovb == NULL) {
36433 printk("nicstar%d: Out of iovec buffers.\n",
36434 card->index);
36435- atomic_inc(&vcc->stats->rx_drop);
36436+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36437 recycle_rx_buf(card, skb);
36438 return;
36439 }
36440@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36441 small or large buffer itself. */
36442 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
36443 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
36444- atomic_inc(&vcc->stats->rx_err);
36445+ atomic_inc_unchecked(&vcc->stats->rx_err);
36446 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36447 NS_MAX_IOVECS);
36448 NS_PRV_IOVCNT(iovb) = 0;
36449@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36450 ("nicstar%d: Expected a small buffer, and this is not one.\n",
36451 card->index);
36452 which_list(card, skb);
36453- atomic_inc(&vcc->stats->rx_err);
36454+ atomic_inc_unchecked(&vcc->stats->rx_err);
36455 recycle_rx_buf(card, skb);
36456 vc->rx_iov = NULL;
36457 recycle_iov_buf(card, iovb);
36458@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36459 ("nicstar%d: Expected a large buffer, and this is not one.\n",
36460 card->index);
36461 which_list(card, skb);
36462- atomic_inc(&vcc->stats->rx_err);
36463+ atomic_inc_unchecked(&vcc->stats->rx_err);
36464 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36465 NS_PRV_IOVCNT(iovb));
36466 vc->rx_iov = NULL;
36467@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36468 printk(" - PDU size mismatch.\n");
36469 else
36470 printk(".\n");
36471- atomic_inc(&vcc->stats->rx_err);
36472+ atomic_inc_unchecked(&vcc->stats->rx_err);
36473 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36474 NS_PRV_IOVCNT(iovb));
36475 vc->rx_iov = NULL;
36476@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36477 /* skb points to a small buffer */
36478 if (!atm_charge(vcc, skb->truesize)) {
36479 push_rxbufs(card, skb);
36480- atomic_inc(&vcc->stats->rx_drop);
36481+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36482 } else {
36483 skb_put(skb, len);
36484 dequeue_sm_buf(card, skb);
36485@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36486 ATM_SKB(skb)->vcc = vcc;
36487 __net_timestamp(skb);
36488 vcc->push(vcc, skb);
36489- atomic_inc(&vcc->stats->rx);
36490+ atomic_inc_unchecked(&vcc->stats->rx);
36491 }
36492 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
36493 struct sk_buff *sb;
36494@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36495 if (len <= NS_SMBUFSIZE) {
36496 if (!atm_charge(vcc, sb->truesize)) {
36497 push_rxbufs(card, sb);
36498- atomic_inc(&vcc->stats->rx_drop);
36499+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36500 } else {
36501 skb_put(sb, len);
36502 dequeue_sm_buf(card, sb);
36503@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36504 ATM_SKB(sb)->vcc = vcc;
36505 __net_timestamp(sb);
36506 vcc->push(vcc, sb);
36507- atomic_inc(&vcc->stats->rx);
36508+ atomic_inc_unchecked(&vcc->stats->rx);
36509 }
36510
36511 push_rxbufs(card, skb);
36512@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36513
36514 if (!atm_charge(vcc, skb->truesize)) {
36515 push_rxbufs(card, skb);
36516- atomic_inc(&vcc->stats->rx_drop);
36517+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36518 } else {
36519 dequeue_lg_buf(card, skb);
36520 #ifdef NS_USE_DESTRUCTORS
36521@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36522 ATM_SKB(skb)->vcc = vcc;
36523 __net_timestamp(skb);
36524 vcc->push(vcc, skb);
36525- atomic_inc(&vcc->stats->rx);
36526+ atomic_inc_unchecked(&vcc->stats->rx);
36527 }
36528
36529 push_rxbufs(card, sb);
36530@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36531 printk
36532 ("nicstar%d: Out of huge buffers.\n",
36533 card->index);
36534- atomic_inc(&vcc->stats->rx_drop);
36535+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36536 recycle_iovec_rx_bufs(card,
36537 (struct iovec *)
36538 iovb->data,
36539@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36540 card->hbpool.count++;
36541 } else
36542 dev_kfree_skb_any(hb);
36543- atomic_inc(&vcc->stats->rx_drop);
36544+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36545 } else {
36546 /* Copy the small buffer to the huge buffer */
36547 sb = (struct sk_buff *)iov->iov_base;
36548@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36549 #endif /* NS_USE_DESTRUCTORS */
36550 __net_timestamp(hb);
36551 vcc->push(vcc, hb);
36552- atomic_inc(&vcc->stats->rx);
36553+ atomic_inc_unchecked(&vcc->stats->rx);
36554 }
36555 }
36556
36557diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
36558index 32784d1..4a8434a 100644
36559--- a/drivers/atm/solos-pci.c
36560+++ b/drivers/atm/solos-pci.c
36561@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
36562 }
36563 atm_charge(vcc, skb->truesize);
36564 vcc->push(vcc, skb);
36565- atomic_inc(&vcc->stats->rx);
36566+ atomic_inc_unchecked(&vcc->stats->rx);
36567 break;
36568
36569 case PKT_STATUS:
36570@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
36571 vcc = SKB_CB(oldskb)->vcc;
36572
36573 if (vcc) {
36574- atomic_inc(&vcc->stats->tx);
36575+ atomic_inc_unchecked(&vcc->stats->tx);
36576 solos_pop(vcc, oldskb);
36577 } else {
36578 dev_kfree_skb_irq(oldskb);
36579diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
36580index 0215934..ce9f5b1 100644
36581--- a/drivers/atm/suni.c
36582+++ b/drivers/atm/suni.c
36583@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
36584
36585
36586 #define ADD_LIMITED(s,v) \
36587- atomic_add((v),&stats->s); \
36588- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
36589+ atomic_add_unchecked((v),&stats->s); \
36590+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
36591
36592
36593 static void suni_hz(unsigned long from_timer)
36594diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
36595index 5120a96..e2572bd 100644
36596--- a/drivers/atm/uPD98402.c
36597+++ b/drivers/atm/uPD98402.c
36598@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
36599 struct sonet_stats tmp;
36600 int error = 0;
36601
36602- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
36603+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
36604 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
36605 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
36606 if (zero && !error) {
36607@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
36608
36609
36610 #define ADD_LIMITED(s,v) \
36611- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
36612- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
36613- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
36614+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
36615+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
36616+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
36617
36618
36619 static void stat_event(struct atm_dev *dev)
36620@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
36621 if (reason & uPD98402_INT_PFM) stat_event(dev);
36622 if (reason & uPD98402_INT_PCO) {
36623 (void) GET(PCOCR); /* clear interrupt cause */
36624- atomic_add(GET(HECCT),
36625+ atomic_add_unchecked(GET(HECCT),
36626 &PRIV(dev)->sonet_stats.uncorr_hcs);
36627 }
36628 if ((reason & uPD98402_INT_RFO) &&
36629@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
36630 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
36631 uPD98402_INT_LOS),PIMR); /* enable them */
36632 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
36633- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
36634- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
36635- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
36636+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
36637+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
36638+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
36639 return 0;
36640 }
36641
36642diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
36643index 969c3c2..9b72956 100644
36644--- a/drivers/atm/zatm.c
36645+++ b/drivers/atm/zatm.c
36646@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
36647 }
36648 if (!size) {
36649 dev_kfree_skb_irq(skb);
36650- if (vcc) atomic_inc(&vcc->stats->rx_err);
36651+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
36652 continue;
36653 }
36654 if (!atm_charge(vcc,skb->truesize)) {
36655@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
36656 skb->len = size;
36657 ATM_SKB(skb)->vcc = vcc;
36658 vcc->push(vcc,skb);
36659- atomic_inc(&vcc->stats->rx);
36660+ atomic_inc_unchecked(&vcc->stats->rx);
36661 }
36662 zout(pos & 0xffff,MTA(mbx));
36663 #if 0 /* probably a stupid idea */
36664@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
36665 skb_queue_head(&zatm_vcc->backlog,skb);
36666 break;
36667 }
36668- atomic_inc(&vcc->stats->tx);
36669+ atomic_inc_unchecked(&vcc->stats->tx);
36670 wake_up(&zatm_vcc->tx_wait);
36671 }
36672
36673diff --git a/drivers/base/bus.c b/drivers/base/bus.c
36674index 73f6c29..b0c0e13 100644
36675--- a/drivers/base/bus.c
36676+++ b/drivers/base/bus.c
36677@@ -1115,7 +1115,7 @@ int subsys_interface_register(struct subsys_interface *sif)
36678 return -EINVAL;
36679
36680 mutex_lock(&subsys->p->mutex);
36681- list_add_tail(&sif->node, &subsys->p->interfaces);
36682+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
36683 if (sif->add_dev) {
36684 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
36685 while ((dev = subsys_dev_iter_next(&iter)))
36686@@ -1140,7 +1140,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
36687 subsys = sif->subsys;
36688
36689 mutex_lock(&subsys->p->mutex);
36690- list_del_init(&sif->node);
36691+ pax_list_del_init((struct list_head *)&sif->node);
36692 if (sif->remove_dev) {
36693 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
36694 while ((dev = subsys_dev_iter_next(&iter)))
36695diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
36696index 0f38201..6c2b444 100644
36697--- a/drivers/base/devtmpfs.c
36698+++ b/drivers/base/devtmpfs.c
36699@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
36700 if (!thread)
36701 return 0;
36702
36703- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
36704+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
36705 if (err)
36706 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
36707 else
36708@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
36709 *err = sys_unshare(CLONE_NEWNS);
36710 if (*err)
36711 goto out;
36712- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
36713+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
36714 if (*err)
36715 goto out;
36716- sys_chdir("/.."); /* will traverse into overmounted root */
36717- sys_chroot(".");
36718+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
36719+ sys_chroot((char __force_user *)".");
36720 complete(&setup_done);
36721 while (1) {
36722 spin_lock(&req_lock);
36723diff --git a/drivers/base/node.c b/drivers/base/node.c
36724index bc9f43b..29703b8 100644
36725--- a/drivers/base/node.c
36726+++ b/drivers/base/node.c
36727@@ -620,7 +620,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
36728 struct node_attr {
36729 struct device_attribute attr;
36730 enum node_states state;
36731-};
36732+} __do_const;
36733
36734 static ssize_t show_node_state(struct device *dev,
36735 struct device_attribute *attr, char *buf)
36736diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
36737index bfb8955..42c9b9a 100644
36738--- a/drivers/base/power/domain.c
36739+++ b/drivers/base/power/domain.c
36740@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
36741 {
36742 struct cpuidle_driver *cpuidle_drv;
36743 struct gpd_cpu_data *cpu_data;
36744- struct cpuidle_state *idle_state;
36745+ cpuidle_state_no_const *idle_state;
36746 int ret = 0;
36747
36748 if (IS_ERR_OR_NULL(genpd) || state < 0)
36749@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
36750 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
36751 {
36752 struct gpd_cpu_data *cpu_data;
36753- struct cpuidle_state *idle_state;
36754+ cpuidle_state_no_const *idle_state;
36755 int ret = 0;
36756
36757 if (IS_ERR_OR_NULL(genpd))
36758diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
36759index 03e089a..0e9560c 100644
36760--- a/drivers/base/power/sysfs.c
36761+++ b/drivers/base/power/sysfs.c
36762@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
36763 return -EIO;
36764 }
36765 }
36766- return sprintf(buf, p);
36767+ return sprintf(buf, "%s", p);
36768 }
36769
36770 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
36771diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
36772index 2d56f41..8830f19 100644
36773--- a/drivers/base/power/wakeup.c
36774+++ b/drivers/base/power/wakeup.c
36775@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
36776 * They need to be modified together atomically, so it's better to use one
36777 * atomic variable to hold them both.
36778 */
36779-static atomic_t combined_event_count = ATOMIC_INIT(0);
36780+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
36781
36782 #define IN_PROGRESS_BITS (sizeof(int) * 4)
36783 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
36784
36785 static void split_counters(unsigned int *cnt, unsigned int *inpr)
36786 {
36787- unsigned int comb = atomic_read(&combined_event_count);
36788+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
36789
36790 *cnt = (comb >> IN_PROGRESS_BITS);
36791 *inpr = comb & MAX_IN_PROGRESS;
36792@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
36793 ws->start_prevent_time = ws->last_time;
36794
36795 /* Increment the counter of events in progress. */
36796- cec = atomic_inc_return(&combined_event_count);
36797+ cec = atomic_inc_return_unchecked(&combined_event_count);
36798
36799 trace_wakeup_source_activate(ws->name, cec);
36800 }
36801@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
36802 * Increment the counter of registered wakeup events and decrement the
36803 * couter of wakeup events in progress simultaneously.
36804 */
36805- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
36806+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
36807 trace_wakeup_source_deactivate(ws->name, cec);
36808
36809 split_counters(&cnt, &inpr);
36810diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
36811index e8d11b6..7b1b36f 100644
36812--- a/drivers/base/syscore.c
36813+++ b/drivers/base/syscore.c
36814@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
36815 void register_syscore_ops(struct syscore_ops *ops)
36816 {
36817 mutex_lock(&syscore_ops_lock);
36818- list_add_tail(&ops->node, &syscore_ops_list);
36819+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
36820 mutex_unlock(&syscore_ops_lock);
36821 }
36822 EXPORT_SYMBOL_GPL(register_syscore_ops);
36823@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
36824 void unregister_syscore_ops(struct syscore_ops *ops)
36825 {
36826 mutex_lock(&syscore_ops_lock);
36827- list_del(&ops->node);
36828+ pax_list_del((struct list_head *)&ops->node);
36829 mutex_unlock(&syscore_ops_lock);
36830 }
36831 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
36832diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
36833index b35fc4f..c902870 100644
36834--- a/drivers/block/cciss.c
36835+++ b/drivers/block/cciss.c
36836@@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
36837 while (!list_empty(&h->reqQ)) {
36838 c = list_entry(h->reqQ.next, CommandList_struct, list);
36839 /* can't do anything if fifo is full */
36840- if ((h->access.fifo_full(h))) {
36841+ if ((h->access->fifo_full(h))) {
36842 dev_warn(&h->pdev->dev, "fifo full\n");
36843 break;
36844 }
36845@@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
36846 h->Qdepth--;
36847
36848 /* Tell the controller execute command */
36849- h->access.submit_command(h, c);
36850+ h->access->submit_command(h, c);
36851
36852 /* Put job onto the completed Q */
36853 addQ(&h->cmpQ, c);
36854@@ -3447,17 +3447,17 @@ startio:
36855
36856 static inline unsigned long get_next_completion(ctlr_info_t *h)
36857 {
36858- return h->access.command_completed(h);
36859+ return h->access->command_completed(h);
36860 }
36861
36862 static inline int interrupt_pending(ctlr_info_t *h)
36863 {
36864- return h->access.intr_pending(h);
36865+ return h->access->intr_pending(h);
36866 }
36867
36868 static inline long interrupt_not_for_us(ctlr_info_t *h)
36869 {
36870- return ((h->access.intr_pending(h) == 0) ||
36871+ return ((h->access->intr_pending(h) == 0) ||
36872 (h->interrupts_enabled == 0));
36873 }
36874
36875@@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h)
36876 u32 a;
36877
36878 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36879- return h->access.command_completed(h);
36880+ return h->access->command_completed(h);
36881
36882 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36883 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36884@@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
36885 trans_support & CFGTBL_Trans_use_short_tags);
36886
36887 /* Change the access methods to the performant access methods */
36888- h->access = SA5_performant_access;
36889+ h->access = &SA5_performant_access;
36890 h->transMethod = CFGTBL_Trans_Performant;
36891
36892 return;
36893@@ -4327,7 +4327,7 @@ static int cciss_pci_init(ctlr_info_t *h)
36894 if (prod_index < 0)
36895 return -ENODEV;
36896 h->product_name = products[prod_index].product_name;
36897- h->access = *(products[prod_index].access);
36898+ h->access = products[prod_index].access;
36899
36900 if (cciss_board_disabled(h)) {
36901 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36902@@ -5059,7 +5059,7 @@ reinit_after_soft_reset:
36903 }
36904
36905 /* make sure the board interrupts are off */
36906- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36907+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36908 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
36909 if (rc)
36910 goto clean2;
36911@@ -5109,7 +5109,7 @@ reinit_after_soft_reset:
36912 * fake ones to scoop up any residual completions.
36913 */
36914 spin_lock_irqsave(&h->lock, flags);
36915- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36916+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36917 spin_unlock_irqrestore(&h->lock, flags);
36918 free_irq(h->intr[h->intr_mode], h);
36919 rc = cciss_request_irq(h, cciss_msix_discard_completions,
36920@@ -5129,9 +5129,9 @@ reinit_after_soft_reset:
36921 dev_info(&h->pdev->dev, "Board READY.\n");
36922 dev_info(&h->pdev->dev,
36923 "Waiting for stale completions to drain.\n");
36924- h->access.set_intr_mask(h, CCISS_INTR_ON);
36925+ h->access->set_intr_mask(h, CCISS_INTR_ON);
36926 msleep(10000);
36927- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36928+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36929
36930 rc = controller_reset_failed(h->cfgtable);
36931 if (rc)
36932@@ -5154,7 +5154,7 @@ reinit_after_soft_reset:
36933 cciss_scsi_setup(h);
36934
36935 /* Turn the interrupts on so we can service requests */
36936- h->access.set_intr_mask(h, CCISS_INTR_ON);
36937+ h->access->set_intr_mask(h, CCISS_INTR_ON);
36938
36939 /* Get the firmware version */
36940 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
36941@@ -5226,7 +5226,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
36942 kfree(flush_buf);
36943 if (return_code != IO_OK)
36944 dev_warn(&h->pdev->dev, "Error flushing cache\n");
36945- h->access.set_intr_mask(h, CCISS_INTR_OFF);
36946+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
36947 free_irq(h->intr[h->intr_mode], h);
36948 }
36949
36950diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
36951index 7fda30e..2f27946 100644
36952--- a/drivers/block/cciss.h
36953+++ b/drivers/block/cciss.h
36954@@ -101,7 +101,7 @@ struct ctlr_info
36955 /* information about each logical volume */
36956 drive_info_struct *drv[CISS_MAX_LUN];
36957
36958- struct access_method access;
36959+ struct access_method *access;
36960
36961 /* queue and queue Info */
36962 struct list_head reqQ;
36963@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
36964 }
36965
36966 static struct access_method SA5_access = {
36967- SA5_submit_command,
36968- SA5_intr_mask,
36969- SA5_fifo_full,
36970- SA5_intr_pending,
36971- SA5_completed,
36972+ .submit_command = SA5_submit_command,
36973+ .set_intr_mask = SA5_intr_mask,
36974+ .fifo_full = SA5_fifo_full,
36975+ .intr_pending = SA5_intr_pending,
36976+ .command_completed = SA5_completed,
36977 };
36978
36979 static struct access_method SA5B_access = {
36980- SA5_submit_command,
36981- SA5B_intr_mask,
36982- SA5_fifo_full,
36983- SA5B_intr_pending,
36984- SA5_completed,
36985+ .submit_command = SA5_submit_command,
36986+ .set_intr_mask = SA5B_intr_mask,
36987+ .fifo_full = SA5_fifo_full,
36988+ .intr_pending = SA5B_intr_pending,
36989+ .command_completed = SA5_completed,
36990 };
36991
36992 static struct access_method SA5_performant_access = {
36993- SA5_submit_command,
36994- SA5_performant_intr_mask,
36995- SA5_fifo_full,
36996- SA5_performant_intr_pending,
36997- SA5_performant_completed,
36998+ .submit_command = SA5_submit_command,
36999+ .set_intr_mask = SA5_performant_intr_mask,
37000+ .fifo_full = SA5_fifo_full,
37001+ .intr_pending = SA5_performant_intr_pending,
37002+ .command_completed = SA5_performant_completed,
37003 };
37004
37005 struct board_type {
37006diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
37007index 2b94403..fd6ad1f 100644
37008--- a/drivers/block/cpqarray.c
37009+++ b/drivers/block/cpqarray.c
37010@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37011 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
37012 goto Enomem4;
37013 }
37014- hba[i]->access.set_intr_mask(hba[i], 0);
37015+ hba[i]->access->set_intr_mask(hba[i], 0);
37016 if (request_irq(hba[i]->intr, do_ida_intr,
37017 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
37018 {
37019@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37020 add_timer(&hba[i]->timer);
37021
37022 /* Enable IRQ now that spinlock and rate limit timer are set up */
37023- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37024+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37025
37026 for(j=0; j<NWD; j++) {
37027 struct gendisk *disk = ida_gendisk[i][j];
37028@@ -694,7 +694,7 @@ DBGINFO(
37029 for(i=0; i<NR_PRODUCTS; i++) {
37030 if (board_id == products[i].board_id) {
37031 c->product_name = products[i].product_name;
37032- c->access = *(products[i].access);
37033+ c->access = products[i].access;
37034 break;
37035 }
37036 }
37037@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
37038 hba[ctlr]->intr = intr;
37039 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
37040 hba[ctlr]->product_name = products[j].product_name;
37041- hba[ctlr]->access = *(products[j].access);
37042+ hba[ctlr]->access = products[j].access;
37043 hba[ctlr]->ctlr = ctlr;
37044 hba[ctlr]->board_id = board_id;
37045 hba[ctlr]->pci_dev = NULL; /* not PCI */
37046@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
37047
37048 while((c = h->reqQ) != NULL) {
37049 /* Can't do anything if we're busy */
37050- if (h->access.fifo_full(h) == 0)
37051+ if (h->access->fifo_full(h) == 0)
37052 return;
37053
37054 /* Get the first entry from the request Q */
37055@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
37056 h->Qdepth--;
37057
37058 /* Tell the controller to do our bidding */
37059- h->access.submit_command(h, c);
37060+ h->access->submit_command(h, c);
37061
37062 /* Get onto the completion Q */
37063 addQ(&h->cmpQ, c);
37064@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37065 unsigned long flags;
37066 __u32 a,a1;
37067
37068- istat = h->access.intr_pending(h);
37069+ istat = h->access->intr_pending(h);
37070 /* Is this interrupt for us? */
37071 if (istat == 0)
37072 return IRQ_NONE;
37073@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37074 */
37075 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
37076 if (istat & FIFO_NOT_EMPTY) {
37077- while((a = h->access.command_completed(h))) {
37078+ while((a = h->access->command_completed(h))) {
37079 a1 = a; a &= ~3;
37080 if ((c = h->cmpQ) == NULL)
37081 {
37082@@ -1448,11 +1448,11 @@ static int sendcmd(
37083 /*
37084 * Disable interrupt
37085 */
37086- info_p->access.set_intr_mask(info_p, 0);
37087+ info_p->access->set_intr_mask(info_p, 0);
37088 /* Make sure there is room in the command FIFO */
37089 /* Actually it should be completely empty at this time. */
37090 for (i = 200000; i > 0; i--) {
37091- temp = info_p->access.fifo_full(info_p);
37092+ temp = info_p->access->fifo_full(info_p);
37093 if (temp != 0) {
37094 break;
37095 }
37096@@ -1465,7 +1465,7 @@ DBG(
37097 /*
37098 * Send the cmd
37099 */
37100- info_p->access.submit_command(info_p, c);
37101+ info_p->access->submit_command(info_p, c);
37102 complete = pollcomplete(ctlr);
37103
37104 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
37105@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
37106 * we check the new geometry. Then turn interrupts back on when
37107 * we're done.
37108 */
37109- host->access.set_intr_mask(host, 0);
37110+ host->access->set_intr_mask(host, 0);
37111 getgeometry(ctlr);
37112- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
37113+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
37114
37115 for(i=0; i<NWD; i++) {
37116 struct gendisk *disk = ida_gendisk[ctlr][i];
37117@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
37118 /* Wait (up to 2 seconds) for a command to complete */
37119
37120 for (i = 200000; i > 0; i--) {
37121- done = hba[ctlr]->access.command_completed(hba[ctlr]);
37122+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
37123 if (done == 0) {
37124 udelay(10); /* a short fixed delay */
37125 } else
37126diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
37127index be73e9d..7fbf140 100644
37128--- a/drivers/block/cpqarray.h
37129+++ b/drivers/block/cpqarray.h
37130@@ -99,7 +99,7 @@ struct ctlr_info {
37131 drv_info_t drv[NWD];
37132 struct proc_dir_entry *proc;
37133
37134- struct access_method access;
37135+ struct access_method *access;
37136
37137 cmdlist_t *reqQ;
37138 cmdlist_t *cmpQ;
37139diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
37140index 0e06f0c..c47b81d 100644
37141--- a/drivers/block/drbd/drbd_int.h
37142+++ b/drivers/block/drbd/drbd_int.h
37143@@ -582,7 +582,7 @@ struct drbd_epoch {
37144 struct drbd_tconn *tconn;
37145 struct list_head list;
37146 unsigned int barrier_nr;
37147- atomic_t epoch_size; /* increased on every request added. */
37148+ atomic_unchecked_t epoch_size; /* increased on every request added. */
37149 atomic_t active; /* increased on every req. added, and dec on every finished. */
37150 unsigned long flags;
37151 };
37152@@ -1022,7 +1022,7 @@ struct drbd_conf {
37153 unsigned int al_tr_number;
37154 int al_tr_cycle;
37155 wait_queue_head_t seq_wait;
37156- atomic_t packet_seq;
37157+ atomic_unchecked_t packet_seq;
37158 unsigned int peer_seq;
37159 spinlock_t peer_seq_lock;
37160 unsigned int minor;
37161@@ -1573,7 +1573,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
37162 char __user *uoptval;
37163 int err;
37164
37165- uoptval = (char __user __force *)optval;
37166+ uoptval = (char __force_user *)optval;
37167
37168 set_fs(KERNEL_DS);
37169 if (level == SOL_SOCKET)
37170diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
37171index 89c497c..9c736ae 100644
37172--- a/drivers/block/drbd/drbd_interval.c
37173+++ b/drivers/block/drbd/drbd_interval.c
37174@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
37175 }
37176
37177 static const struct rb_augment_callbacks augment_callbacks = {
37178- augment_propagate,
37179- augment_copy,
37180- augment_rotate,
37181+ .propagate = augment_propagate,
37182+ .copy = augment_copy,
37183+ .rotate = augment_rotate,
37184 };
37185
37186 /**
37187diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
37188index 9e3818b..7b64c92 100644
37189--- a/drivers/block/drbd/drbd_main.c
37190+++ b/drivers/block/drbd/drbd_main.c
37191@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
37192 p->sector = sector;
37193 p->block_id = block_id;
37194 p->blksize = blksize;
37195- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
37196+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
37197 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
37198 }
37199
37200@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
37201 return -EIO;
37202 p->sector = cpu_to_be64(req->i.sector);
37203 p->block_id = (unsigned long)req;
37204- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
37205+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
37206 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
37207 if (mdev->state.conn >= C_SYNC_SOURCE &&
37208 mdev->state.conn <= C_PAUSED_SYNC_T)
37209@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
37210 {
37211 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
37212
37213- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
37214- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
37215+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
37216+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
37217 kfree(tconn->current_epoch);
37218
37219 idr_destroy(&tconn->volumes);
37220diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
37221index c706d50..5e1b472 100644
37222--- a/drivers/block/drbd/drbd_nl.c
37223+++ b/drivers/block/drbd/drbd_nl.c
37224@@ -3440,7 +3440,7 @@ out:
37225
37226 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
37227 {
37228- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
37229+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
37230 struct sk_buff *msg;
37231 struct drbd_genlmsghdr *d_out;
37232 unsigned seq;
37233@@ -3453,7 +3453,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
37234 return;
37235 }
37236
37237- seq = atomic_inc_return(&drbd_genl_seq);
37238+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
37239 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
37240 if (!msg)
37241 goto failed;
37242diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
37243index 6fa6673..b7f97e9 100644
37244--- a/drivers/block/drbd/drbd_receiver.c
37245+++ b/drivers/block/drbd/drbd_receiver.c
37246@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
37247 {
37248 int err;
37249
37250- atomic_set(&mdev->packet_seq, 0);
37251+ atomic_set_unchecked(&mdev->packet_seq, 0);
37252 mdev->peer_seq = 0;
37253
37254 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
37255@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
37256 do {
37257 next_epoch = NULL;
37258
37259- epoch_size = atomic_read(&epoch->epoch_size);
37260+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
37261
37262 switch (ev & ~EV_CLEANUP) {
37263 case EV_PUT:
37264@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
37265 rv = FE_DESTROYED;
37266 } else {
37267 epoch->flags = 0;
37268- atomic_set(&epoch->epoch_size, 0);
37269+ atomic_set_unchecked(&epoch->epoch_size, 0);
37270 /* atomic_set(&epoch->active, 0); is already zero */
37271 if (rv == FE_STILL_LIVE)
37272 rv = FE_RECYCLED;
37273@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
37274 conn_wait_active_ee_empty(tconn);
37275 drbd_flush(tconn);
37276
37277- if (atomic_read(&tconn->current_epoch->epoch_size)) {
37278+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
37279 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
37280 if (epoch)
37281 break;
37282@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
37283 }
37284
37285 epoch->flags = 0;
37286- atomic_set(&epoch->epoch_size, 0);
37287+ atomic_set_unchecked(&epoch->epoch_size, 0);
37288 atomic_set(&epoch->active, 0);
37289
37290 spin_lock(&tconn->epoch_lock);
37291- if (atomic_read(&tconn->current_epoch->epoch_size)) {
37292+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
37293 list_add(&epoch->list, &tconn->current_epoch->list);
37294 tconn->current_epoch = epoch;
37295 tconn->epochs++;
37296@@ -2163,7 +2163,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
37297
37298 err = wait_for_and_update_peer_seq(mdev, peer_seq);
37299 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
37300- atomic_inc(&tconn->current_epoch->epoch_size);
37301+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
37302 err2 = drbd_drain_block(mdev, pi->size);
37303 if (!err)
37304 err = err2;
37305@@ -2197,7 +2197,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
37306
37307 spin_lock(&tconn->epoch_lock);
37308 peer_req->epoch = tconn->current_epoch;
37309- atomic_inc(&peer_req->epoch->epoch_size);
37310+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
37311 atomic_inc(&peer_req->epoch->active);
37312 spin_unlock(&tconn->epoch_lock);
37313
37314@@ -4344,7 +4344,7 @@ struct data_cmd {
37315 int expect_payload;
37316 size_t pkt_size;
37317 int (*fn)(struct drbd_tconn *, struct packet_info *);
37318-};
37319+} __do_const;
37320
37321 static struct data_cmd drbd_cmd_handler[] = {
37322 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
37323@@ -4464,7 +4464,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
37324 if (!list_empty(&tconn->current_epoch->list))
37325 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
37326 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
37327- atomic_set(&tconn->current_epoch->epoch_size, 0);
37328+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
37329 tconn->send.seen_any_write_yet = false;
37330
37331 conn_info(tconn, "Connection closed\n");
37332@@ -5220,7 +5220,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
37333 struct asender_cmd {
37334 size_t pkt_size;
37335 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
37336-};
37337+} __do_const;
37338
37339 static struct asender_cmd asender_tbl[] = {
37340 [P_PING] = { 0, got_Ping },
37341diff --git a/drivers/block/loop.c b/drivers/block/loop.c
37342index c8dac73..1800093 100644
37343--- a/drivers/block/loop.c
37344+++ b/drivers/block/loop.c
37345@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
37346
37347 file_start_write(file);
37348 set_fs(get_ds());
37349- bw = file->f_op->write(file, buf, len, &pos);
37350+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
37351 set_fs(old_fs);
37352 file_end_write(file);
37353 if (likely(bw == len))
37354diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
37355index 83a598e..2de5ce3 100644
37356--- a/drivers/block/null_blk.c
37357+++ b/drivers/block/null_blk.c
37358@@ -407,14 +407,24 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
37359 return 0;
37360 }
37361
37362-static struct blk_mq_ops null_mq_ops = {
37363- .queue_rq = null_queue_rq,
37364- .map_queue = blk_mq_map_queue,
37365+static struct blk_mq_ops null_mq_single_ops = {
37366+ .queue_rq = null_queue_rq,
37367+ .map_queue = blk_mq_map_queue,
37368 .init_hctx = null_init_hctx,
37369+ .alloc_hctx = blk_mq_alloc_single_hw_queue,
37370+ .free_hctx = blk_mq_free_single_hw_queue,
37371+};
37372+
37373+static struct blk_mq_ops null_mq_per_node_ops = {
37374+ .queue_rq = null_queue_rq,
37375+ .map_queue = blk_mq_map_queue,
37376+ .init_hctx = null_init_hctx,
37377+ .alloc_hctx = null_alloc_hctx,
37378+ .free_hctx = null_free_hctx,
37379 };
37380
37381 static struct blk_mq_reg null_mq_reg = {
37382- .ops = &null_mq_ops,
37383+ .ops = &null_mq_single_ops,
37384 .queue_depth = 64,
37385 .cmd_size = sizeof(struct nullb_cmd),
37386 .flags = BLK_MQ_F_SHOULD_MERGE,
37387@@ -545,13 +555,8 @@ static int null_add_dev(void)
37388 null_mq_reg.queue_depth = hw_queue_depth;
37389 null_mq_reg.nr_hw_queues = submit_queues;
37390
37391- if (use_per_node_hctx) {
37392- null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
37393- null_mq_reg.ops->free_hctx = null_free_hctx;
37394- } else {
37395- null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
37396- null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
37397- }
37398+ if (use_per_node_hctx)
37399+ null_mq_reg.ops = &null_mq_per_node_ops;
37400
37401 nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
37402 } else if (queue_mode == NULL_Q_BIO) {
37403diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
37404index ff8668c..f62167a 100644
37405--- a/drivers/block/pktcdvd.c
37406+++ b/drivers/block/pktcdvd.c
37407@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
37408
37409 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
37410 {
37411- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
37412+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
37413 }
37414
37415 /*
37416@@ -1883,7 +1883,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
37417 return -EROFS;
37418 }
37419 pd->settings.fp = ti.fp;
37420- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
37421+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
37422
37423 if (ti.nwa_v) {
37424 pd->nwa = be32_to_cpu(ti.next_writable);
37425diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
37426index e5565fb..71be10b4 100644
37427--- a/drivers/block/smart1,2.h
37428+++ b/drivers/block/smart1,2.h
37429@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
37430 }
37431
37432 static struct access_method smart4_access = {
37433- smart4_submit_command,
37434- smart4_intr_mask,
37435- smart4_fifo_full,
37436- smart4_intr_pending,
37437- smart4_completed,
37438+ .submit_command = smart4_submit_command,
37439+ .set_intr_mask = smart4_intr_mask,
37440+ .fifo_full = smart4_fifo_full,
37441+ .intr_pending = smart4_intr_pending,
37442+ .command_completed = smart4_completed,
37443 };
37444
37445 /*
37446@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
37447 }
37448
37449 static struct access_method smart2_access = {
37450- smart2_submit_command,
37451- smart2_intr_mask,
37452- smart2_fifo_full,
37453- smart2_intr_pending,
37454- smart2_completed,
37455+ .submit_command = smart2_submit_command,
37456+ .set_intr_mask = smart2_intr_mask,
37457+ .fifo_full = smart2_fifo_full,
37458+ .intr_pending = smart2_intr_pending,
37459+ .command_completed = smart2_completed,
37460 };
37461
37462 /*
37463@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
37464 }
37465
37466 static struct access_method smart2e_access = {
37467- smart2e_submit_command,
37468- smart2e_intr_mask,
37469- smart2e_fifo_full,
37470- smart2e_intr_pending,
37471- smart2e_completed,
37472+ .submit_command = smart2e_submit_command,
37473+ .set_intr_mask = smart2e_intr_mask,
37474+ .fifo_full = smart2e_fifo_full,
37475+ .intr_pending = smart2e_intr_pending,
37476+ .command_completed = smart2e_completed,
37477 };
37478
37479 /*
37480@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
37481 }
37482
37483 static struct access_method smart1_access = {
37484- smart1_submit_command,
37485- smart1_intr_mask,
37486- smart1_fifo_full,
37487- smart1_intr_pending,
37488- smart1_completed,
37489+ .submit_command = smart1_submit_command,
37490+ .set_intr_mask = smart1_intr_mask,
37491+ .fifo_full = smart1_fifo_full,
37492+ .intr_pending = smart1_intr_pending,
37493+ .command_completed = smart1_completed,
37494 };
37495diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
37496index f038dba..bb74c08 100644
37497--- a/drivers/bluetooth/btwilink.c
37498+++ b/drivers/bluetooth/btwilink.c
37499@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
37500
37501 static int bt_ti_probe(struct platform_device *pdev)
37502 {
37503- static struct ti_st *hst;
37504+ struct ti_st *hst;
37505 struct hci_dev *hdev;
37506 int err;
37507
37508diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
37509index b6739cb..962fd35 100644
37510--- a/drivers/bus/arm-cci.c
37511+++ b/drivers/bus/arm-cci.c
37512@@ -979,7 +979,7 @@ static int cci_probe(void)
37513
37514 nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
37515
37516- ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL);
37517+ ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
37518 if (!ports)
37519 return -ENOMEM;
37520
37521diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
37522index 8a3aff7..d7538c2 100644
37523--- a/drivers/cdrom/cdrom.c
37524+++ b/drivers/cdrom/cdrom.c
37525@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
37526 ENSURE(reset, CDC_RESET);
37527 ENSURE(generic_packet, CDC_GENERIC_PACKET);
37528 cdi->mc_flags = 0;
37529- cdo->n_minors = 0;
37530 cdi->options = CDO_USE_FFLAGS;
37531
37532 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
37533@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
37534 else
37535 cdi->cdda_method = CDDA_OLD;
37536
37537- if (!cdo->generic_packet)
37538- cdo->generic_packet = cdrom_dummy_generic_packet;
37539+ if (!cdo->generic_packet) {
37540+ pax_open_kernel();
37541+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
37542+ pax_close_kernel();
37543+ }
37544
37545 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
37546 mutex_lock(&cdrom_mutex);
37547@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
37548 if (cdi->exit)
37549 cdi->exit(cdi);
37550
37551- cdi->ops->n_minors--;
37552 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
37553 }
37554
37555@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
37556 */
37557 nr = nframes;
37558 do {
37559- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
37560+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
37561 if (cgc.buffer)
37562 break;
37563
37564@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
37565 struct cdrom_device_info *cdi;
37566 int ret;
37567
37568- ret = scnprintf(info + *pos, max_size - *pos, header);
37569+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
37570 if (!ret)
37571 return 1;
37572
37573diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
37574index 5980cb9..6d7bd7e 100644
37575--- a/drivers/cdrom/gdrom.c
37576+++ b/drivers/cdrom/gdrom.c
37577@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
37578 .audio_ioctl = gdrom_audio_ioctl,
37579 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
37580 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
37581- .n_minors = 1,
37582 };
37583
37584 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
37585diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
37586index fa3243d..8c98297 100644
37587--- a/drivers/char/Kconfig
37588+++ b/drivers/char/Kconfig
37589@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
37590
37591 config DEVKMEM
37592 bool "/dev/kmem virtual device support"
37593- default y
37594+ default n
37595+ depends on !GRKERNSEC_KMEM
37596 help
37597 Say Y here if you want to support the /dev/kmem device. The
37598 /dev/kmem device is rarely used, but can be used for certain
37599@@ -576,6 +577,7 @@ config DEVPORT
37600 bool
37601 depends on !M68K
37602 depends on ISA || PCI
37603+ depends on !GRKERNSEC_KMEM
37604 default y
37605
37606 source "drivers/s390/char/Kconfig"
37607diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
37608index a48e05b..6bac831 100644
37609--- a/drivers/char/agp/compat_ioctl.c
37610+++ b/drivers/char/agp/compat_ioctl.c
37611@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
37612 return -ENOMEM;
37613 }
37614
37615- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
37616+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
37617 sizeof(*usegment) * ureserve.seg_count)) {
37618 kfree(usegment);
37619 kfree(ksegment);
37620diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
37621index 1b19239..b87b143 100644
37622--- a/drivers/char/agp/frontend.c
37623+++ b/drivers/char/agp/frontend.c
37624@@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
37625 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
37626 return -EFAULT;
37627
37628- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
37629+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
37630 return -EFAULT;
37631
37632 client = agp_find_client_by_pid(reserve.pid);
37633@@ -849,7 +849,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
37634 if (segment == NULL)
37635 return -ENOMEM;
37636
37637- if (copy_from_user(segment, (void __user *) reserve.seg_list,
37638+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
37639 sizeof(struct agp_segment) * reserve.seg_count)) {
37640 kfree(segment);
37641 return -EFAULT;
37642diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
37643index 4f94375..413694e 100644
37644--- a/drivers/char/genrtc.c
37645+++ b/drivers/char/genrtc.c
37646@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
37647 switch (cmd) {
37648
37649 case RTC_PLL_GET:
37650+ memset(&pll, 0, sizeof(pll));
37651 if (get_rtc_pll(&pll))
37652 return -EINVAL;
37653 else
37654diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
37655index 5d9c31d..c94ccb5 100644
37656--- a/drivers/char/hpet.c
37657+++ b/drivers/char/hpet.c
37658@@ -578,7 +578,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
37659 }
37660
37661 static int
37662-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
37663+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
37664 struct hpet_info *info)
37665 {
37666 struct hpet_timer __iomem *timer;
37667diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
37668index 86fe45c..c0ea948 100644
37669--- a/drivers/char/hw_random/intel-rng.c
37670+++ b/drivers/char/hw_random/intel-rng.c
37671@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
37672
37673 if (no_fwh_detect)
37674 return -ENODEV;
37675- printk(warning);
37676+ printk("%s", warning);
37677 return -EBUSY;
37678 }
37679
37680diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
37681index ec4e10f..f2a763b 100644
37682--- a/drivers/char/ipmi/ipmi_msghandler.c
37683+++ b/drivers/char/ipmi/ipmi_msghandler.c
37684@@ -420,7 +420,7 @@ struct ipmi_smi {
37685 struct proc_dir_entry *proc_dir;
37686 char proc_dir_name[10];
37687
37688- atomic_t stats[IPMI_NUM_STATS];
37689+ atomic_unchecked_t stats[IPMI_NUM_STATS];
37690
37691 /*
37692 * run_to_completion duplicate of smb_info, smi_info
37693@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
37694
37695
37696 #define ipmi_inc_stat(intf, stat) \
37697- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
37698+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
37699 #define ipmi_get_stat(intf, stat) \
37700- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
37701+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
37702
37703 static int is_lan_addr(struct ipmi_addr *addr)
37704 {
37705@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
37706 INIT_LIST_HEAD(&intf->cmd_rcvrs);
37707 init_waitqueue_head(&intf->waitq);
37708 for (i = 0; i < IPMI_NUM_STATS; i++)
37709- atomic_set(&intf->stats[i], 0);
37710+ atomic_set_unchecked(&intf->stats[i], 0);
37711
37712 intf->proc_dir = NULL;
37713
37714diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
37715index 15e4a60..b046093 100644
37716--- a/drivers/char/ipmi/ipmi_si_intf.c
37717+++ b/drivers/char/ipmi/ipmi_si_intf.c
37718@@ -280,7 +280,7 @@ struct smi_info {
37719 unsigned char slave_addr;
37720
37721 /* Counters and things for the proc filesystem. */
37722- atomic_t stats[SI_NUM_STATS];
37723+ atomic_unchecked_t stats[SI_NUM_STATS];
37724
37725 struct task_struct *thread;
37726
37727@@ -289,9 +289,9 @@ struct smi_info {
37728 };
37729
37730 #define smi_inc_stat(smi, stat) \
37731- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
37732+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
37733 #define smi_get_stat(smi, stat) \
37734- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
37735+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
37736
37737 #define SI_MAX_PARMS 4
37738
37739@@ -3324,7 +3324,7 @@ static int try_smi_init(struct smi_info *new_smi)
37740 atomic_set(&new_smi->req_events, 0);
37741 new_smi->run_to_completion = 0;
37742 for (i = 0; i < SI_NUM_STATS; i++)
37743- atomic_set(&new_smi->stats[i], 0);
37744+ atomic_set_unchecked(&new_smi->stats[i], 0);
37745
37746 new_smi->interrupt_disabled = 1;
37747 atomic_set(&new_smi->stop_operation, 0);
37748diff --git a/drivers/char/mem.c b/drivers/char/mem.c
37749index f895a8c..2bc9147 100644
37750--- a/drivers/char/mem.c
37751+++ b/drivers/char/mem.c
37752@@ -18,6 +18,7 @@
37753 #include <linux/raw.h>
37754 #include <linux/tty.h>
37755 #include <linux/capability.h>
37756+#include <linux/security.h>
37757 #include <linux/ptrace.h>
37758 #include <linux/device.h>
37759 #include <linux/highmem.h>
37760@@ -37,6 +38,10 @@
37761
37762 #define DEVPORT_MINOR 4
37763
37764+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
37765+extern const struct file_operations grsec_fops;
37766+#endif
37767+
37768 static inline unsigned long size_inside_page(unsigned long start,
37769 unsigned long size)
37770 {
37771@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37772
37773 while (cursor < to) {
37774 if (!devmem_is_allowed(pfn)) {
37775+#ifdef CONFIG_GRKERNSEC_KMEM
37776+ gr_handle_mem_readwrite(from, to);
37777+#else
37778 printk(KERN_INFO
37779 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
37780 current->comm, from, to);
37781+#endif
37782 return 0;
37783 }
37784 cursor += PAGE_SIZE;
37785@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37786 }
37787 return 1;
37788 }
37789+#elif defined(CONFIG_GRKERNSEC_KMEM)
37790+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37791+{
37792+ return 0;
37793+}
37794 #else
37795 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37796 {
37797@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
37798
37799 while (count > 0) {
37800 unsigned long remaining;
37801+ char *temp;
37802
37803 sz = size_inside_page(p, count);
37804
37805@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
37806 if (!ptr)
37807 return -EFAULT;
37808
37809- remaining = copy_to_user(buf, ptr, sz);
37810+#ifdef CONFIG_PAX_USERCOPY
37811+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
37812+ if (!temp) {
37813+ unxlate_dev_mem_ptr(p, ptr);
37814+ return -ENOMEM;
37815+ }
37816+ memcpy(temp, ptr, sz);
37817+#else
37818+ temp = ptr;
37819+#endif
37820+
37821+ remaining = copy_to_user(buf, temp, sz);
37822+
37823+#ifdef CONFIG_PAX_USERCOPY
37824+ kfree(temp);
37825+#endif
37826+
37827 unxlate_dev_mem_ptr(p, ptr);
37828 if (remaining)
37829 return -EFAULT;
37830@@ -364,9 +395,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
37831 size_t count, loff_t *ppos)
37832 {
37833 unsigned long p = *ppos;
37834- ssize_t low_count, read, sz;
37835+ ssize_t low_count, read, sz, err = 0;
37836 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
37837- int err = 0;
37838
37839 read = 0;
37840 if (p < (unsigned long) high_memory) {
37841@@ -388,6 +418,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
37842 }
37843 #endif
37844 while (low_count > 0) {
37845+ char *temp;
37846+
37847 sz = size_inside_page(p, low_count);
37848
37849 /*
37850@@ -397,7 +429,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
37851 */
37852 kbuf = xlate_dev_kmem_ptr((char *)p);
37853
37854- if (copy_to_user(buf, kbuf, sz))
37855+#ifdef CONFIG_PAX_USERCOPY
37856+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
37857+ if (!temp)
37858+ return -ENOMEM;
37859+ memcpy(temp, kbuf, sz);
37860+#else
37861+ temp = kbuf;
37862+#endif
37863+
37864+ err = copy_to_user(buf, temp, sz);
37865+
37866+#ifdef CONFIG_PAX_USERCOPY
37867+ kfree(temp);
37868+#endif
37869+
37870+ if (err)
37871 return -EFAULT;
37872 buf += sz;
37873 p += sz;
37874@@ -822,6 +869,9 @@ static const struct memdev {
37875 #ifdef CONFIG_PRINTK
37876 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
37877 #endif
37878+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
37879+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
37880+#endif
37881 };
37882
37883 static int memory_open(struct inode *inode, struct file *filp)
37884@@ -893,7 +943,7 @@ static int __init chr_dev_init(void)
37885 continue;
37886
37887 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
37888- NULL, devlist[minor].name);
37889+ NULL, "%s", devlist[minor].name);
37890 }
37891
37892 return tty_init();
37893diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
37894index 9df78e2..01ba9ae 100644
37895--- a/drivers/char/nvram.c
37896+++ b/drivers/char/nvram.c
37897@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
37898
37899 spin_unlock_irq(&rtc_lock);
37900
37901- if (copy_to_user(buf, contents, tmp - contents))
37902+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
37903 return -EFAULT;
37904
37905 *ppos = i;
37906diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
37907index d39cca6..8c1e269 100644
37908--- a/drivers/char/pcmcia/synclink_cs.c
37909+++ b/drivers/char/pcmcia/synclink_cs.c
37910@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
37911
37912 if (debug_level >= DEBUG_LEVEL_INFO)
37913 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
37914- __FILE__, __LINE__, info->device_name, port->count);
37915+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
37916
37917- WARN_ON(!port->count);
37918+ WARN_ON(!atomic_read(&port->count));
37919
37920 if (tty_port_close_start(port, tty, filp) == 0)
37921 goto cleanup;
37922@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
37923 cleanup:
37924 if (debug_level >= DEBUG_LEVEL_INFO)
37925 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
37926- tty->driver->name, port->count);
37927+ tty->driver->name, atomic_read(&port->count));
37928 }
37929
37930 /* Wait until the transmitter is empty.
37931@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
37932
37933 if (debug_level >= DEBUG_LEVEL_INFO)
37934 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
37935- __FILE__, __LINE__, tty->driver->name, port->count);
37936+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
37937
37938 /* If port is closing, signal caller to try again */
37939 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
37940@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
37941 goto cleanup;
37942 }
37943 spin_lock(&port->lock);
37944- port->count++;
37945+ atomic_inc(&port->count);
37946 spin_unlock(&port->lock);
37947 spin_unlock_irqrestore(&info->netlock, flags);
37948
37949- if (port->count == 1) {
37950+ if (atomic_read(&port->count) == 1) {
37951 /* 1st open on this device, init hardware */
37952 retval = startup(info, tty);
37953 if (retval < 0)
37954@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
37955 unsigned short new_crctype;
37956
37957 /* return error if TTY interface open */
37958- if (info->port.count)
37959+ if (atomic_read(&info->port.count))
37960 return -EBUSY;
37961
37962 switch (encoding)
37963@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
37964
37965 /* arbitrate between network and tty opens */
37966 spin_lock_irqsave(&info->netlock, flags);
37967- if (info->port.count != 0 || info->netcount != 0) {
37968+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
37969 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
37970 spin_unlock_irqrestore(&info->netlock, flags);
37971 return -EBUSY;
37972@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37973 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
37974
37975 /* return error if TTY interface open */
37976- if (info->port.count)
37977+ if (atomic_read(&info->port.count))
37978 return -EBUSY;
37979
37980 if (cmd != SIOCWANDEV)
37981diff --git a/drivers/char/random.c b/drivers/char/random.c
37982index 429b75b..a4f540d 100644
37983--- a/drivers/char/random.c
37984+++ b/drivers/char/random.c
37985@@ -270,10 +270,17 @@
37986 /*
37987 * Configuration information
37988 */
37989+#ifdef CONFIG_GRKERNSEC_RANDNET
37990+#define INPUT_POOL_SHIFT 14
37991+#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
37992+#define OUTPUT_POOL_SHIFT 12
37993+#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
37994+#else
37995 #define INPUT_POOL_SHIFT 12
37996 #define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
37997 #define OUTPUT_POOL_SHIFT 10
37998 #define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
37999+#endif
38000 #define SEC_XFER_SIZE 512
38001 #define EXTRACT_SIZE 10
38002
38003@@ -284,9 +291,6 @@
38004 /*
38005 * To allow fractional bits to be tracked, the entropy_count field is
38006 * denominated in units of 1/8th bits.
38007- *
38008- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
38009- * credit_entropy_bits() needs to be 64 bits wide.
38010 */
38011 #define ENTROPY_SHIFT 3
38012 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
38013@@ -361,12 +365,19 @@ static struct poolinfo {
38014 #define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
38015 int tap1, tap2, tap3, tap4, tap5;
38016 } poolinfo_table[] = {
38017+#ifdef CONFIG_GRKERNSEC_RANDNET
38018+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
38019+ { S(512), 411, 308, 208, 104, 1 },
38020+ /* x^128 + x^104 + x^76 + x^51 + x^25 + x + 1 -- 105 */
38021+ { S(128), 104, 76, 51, 25, 1 },
38022+#else
38023 /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
38024 /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
38025 { S(128), 104, 76, 51, 25, 1 },
38026 /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
38027 /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
38028 { S(32), 26, 19, 14, 7, 1 },
38029+#endif
38030 #if 0
38031 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
38032 { S(2048), 1638, 1231, 819, 411, 1 },
38033@@ -524,8 +535,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
38034 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
38035 }
38036
38037- ACCESS_ONCE(r->input_rotate) = input_rotate;
38038- ACCESS_ONCE(r->add_ptr) = i;
38039+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
38040+ ACCESS_ONCE_RW(r->add_ptr) = i;
38041 smp_wmb();
38042
38043 if (out)
38044@@ -632,7 +643,7 @@ retry:
38045 /* The +2 corresponds to the /4 in the denominator */
38046
38047 do {
38048- unsigned int anfrac = min(pnfrac, pool_size/2);
38049+ u64 anfrac = min(pnfrac, pool_size/2);
38050 unsigned int add =
38051 ((pool_size - entropy_count)*anfrac*3) >> s;
38052
38053@@ -1151,7 +1162,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
38054
38055 extract_buf(r, tmp);
38056 i = min_t(int, nbytes, EXTRACT_SIZE);
38057- if (copy_to_user(buf, tmp, i)) {
38058+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
38059 ret = -EFAULT;
38060 break;
38061 }
38062@@ -1507,7 +1518,7 @@ EXPORT_SYMBOL(generate_random_uuid);
38063 #include <linux/sysctl.h>
38064
38065 static int min_read_thresh = 8, min_write_thresh;
38066-static int max_read_thresh = INPUT_POOL_WORDS * 32;
38067+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
38068 static int max_write_thresh = INPUT_POOL_WORDS * 32;
38069 static char sysctl_bootid[16];
38070
38071@@ -1523,7 +1534,7 @@ static char sysctl_bootid[16];
38072 static int proc_do_uuid(struct ctl_table *table, int write,
38073 void __user *buffer, size_t *lenp, loff_t *ppos)
38074 {
38075- struct ctl_table fake_table;
38076+ ctl_table_no_const fake_table;
38077 unsigned char buf[64], tmp_uuid[16], *uuid;
38078
38079 uuid = table->data;
38080@@ -1553,7 +1564,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
38081 static int proc_do_entropy(ctl_table *table, int write,
38082 void __user *buffer, size_t *lenp, loff_t *ppos)
38083 {
38084- ctl_table fake_table;
38085+ ctl_table_no_const fake_table;
38086 int entropy_count;
38087
38088 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
38089diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
38090index 7cc1fe22..b602d6b 100644
38091--- a/drivers/char/sonypi.c
38092+++ b/drivers/char/sonypi.c
38093@@ -54,6 +54,7 @@
38094
38095 #include <asm/uaccess.h>
38096 #include <asm/io.h>
38097+#include <asm/local.h>
38098
38099 #include <linux/sonypi.h>
38100
38101@@ -490,7 +491,7 @@ static struct sonypi_device {
38102 spinlock_t fifo_lock;
38103 wait_queue_head_t fifo_proc_list;
38104 struct fasync_struct *fifo_async;
38105- int open_count;
38106+ local_t open_count;
38107 int model;
38108 struct input_dev *input_jog_dev;
38109 struct input_dev *input_key_dev;
38110@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
38111 static int sonypi_misc_release(struct inode *inode, struct file *file)
38112 {
38113 mutex_lock(&sonypi_device.lock);
38114- sonypi_device.open_count--;
38115+ local_dec(&sonypi_device.open_count);
38116 mutex_unlock(&sonypi_device.lock);
38117 return 0;
38118 }
38119@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
38120 {
38121 mutex_lock(&sonypi_device.lock);
38122 /* Flush input queue on first open */
38123- if (!sonypi_device.open_count)
38124+ if (!local_read(&sonypi_device.open_count))
38125 kfifo_reset(&sonypi_device.fifo);
38126- sonypi_device.open_count++;
38127+ local_inc(&sonypi_device.open_count);
38128 mutex_unlock(&sonypi_device.lock);
38129
38130 return 0;
38131diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
38132index 64420b3..5c40b56 100644
38133--- a/drivers/char/tpm/tpm_acpi.c
38134+++ b/drivers/char/tpm/tpm_acpi.c
38135@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
38136 virt = acpi_os_map_memory(start, len);
38137 if (!virt) {
38138 kfree(log->bios_event_log);
38139+ log->bios_event_log = NULL;
38140 printk("%s: ERROR - Unable to map memory\n", __func__);
38141 return -EIO;
38142 }
38143
38144- memcpy_fromio(log->bios_event_log, virt, len);
38145+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
38146
38147 acpi_os_unmap_memory(virt, len);
38148 return 0;
38149diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
38150index 59f7cb2..bac8b6d 100644
38151--- a/drivers/char/tpm/tpm_eventlog.c
38152+++ b/drivers/char/tpm/tpm_eventlog.c
38153@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
38154 event = addr;
38155
38156 if ((event->event_type == 0 && event->event_size == 0) ||
38157- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
38158+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
38159 return NULL;
38160
38161 return addr;
38162@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
38163 return NULL;
38164
38165 if ((event->event_type == 0 && event->event_size == 0) ||
38166- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
38167+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
38168 return NULL;
38169
38170 (*pos)++;
38171@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
38172 int i;
38173
38174 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
38175- seq_putc(m, data[i]);
38176+ if (!seq_putc(m, data[i]))
38177+ return -EFAULT;
38178
38179 return 0;
38180 }
38181diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
38182index feea87c..18aefff 100644
38183--- a/drivers/char/virtio_console.c
38184+++ b/drivers/char/virtio_console.c
38185@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
38186 if (to_user) {
38187 ssize_t ret;
38188
38189- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
38190+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
38191 if (ret)
38192 return -EFAULT;
38193 } else {
38194@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
38195 if (!port_has_data(port) && !port->host_connected)
38196 return 0;
38197
38198- return fill_readbuf(port, ubuf, count, true);
38199+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
38200 }
38201
38202 static int wait_port_writable(struct port *port, bool nonblock)
38203diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
38204index a33f46f..a720eed 100644
38205--- a/drivers/clk/clk-composite.c
38206+++ b/drivers/clk/clk-composite.c
38207@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
38208 struct clk *clk;
38209 struct clk_init_data init;
38210 struct clk_composite *composite;
38211- struct clk_ops *clk_composite_ops;
38212+ clk_ops_no_const *clk_composite_ops;
38213
38214 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
38215 if (!composite) {
38216diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
38217index 81dd31a..ef5c542 100644
38218--- a/drivers/clk/socfpga/clk.c
38219+++ b/drivers/clk/socfpga/clk.c
38220@@ -22,6 +22,7 @@
38221 #include <linux/clk-provider.h>
38222 #include <linux/io.h>
38223 #include <linux/of.h>
38224+#include <asm/pgtable.h>
38225
38226 /* Clock Manager offsets */
38227 #define CLKMGR_CTRL 0x0
38228@@ -152,8 +153,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
38229 streq(clk_name, "periph_pll") ||
38230 streq(clk_name, "sdram_pll")) {
38231 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
38232- clk_pll_ops.enable = clk_gate_ops.enable;
38233- clk_pll_ops.disable = clk_gate_ops.disable;
38234+ pax_open_kernel();
38235+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
38236+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
38237+ pax_close_kernel();
38238 }
38239
38240 clk = clk_register(NULL, &socfpga_clk->hw.hw);
38241@@ -244,7 +247,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
38242 return parent_rate / div;
38243 }
38244
38245-static struct clk_ops gateclk_ops = {
38246+static clk_ops_no_const gateclk_ops __read_only = {
38247 .recalc_rate = socfpga_clk_recalc_rate,
38248 .get_parent = socfpga_clk_get_parent,
38249 .set_parent = socfpga_clk_set_parent,
38250diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
38251index caf41eb..223d27a 100644
38252--- a/drivers/cpufreq/acpi-cpufreq.c
38253+++ b/drivers/cpufreq/acpi-cpufreq.c
38254@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
38255 return sprintf(buf, "%u\n", boost_enabled);
38256 }
38257
38258-static struct global_attr global_boost = __ATTR(boost, 0644,
38259+static global_attr_no_const global_boost = __ATTR(boost, 0644,
38260 show_global_boost,
38261 store_global_boost);
38262
38263@@ -693,8 +693,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
38264 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
38265 per_cpu(acfreq_data, cpu) = data;
38266
38267- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
38268- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
38269+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
38270+ pax_open_kernel();
38271+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
38272+ pax_close_kernel();
38273+ }
38274
38275 result = acpi_processor_register_performance(data->acpi_data, cpu);
38276 if (result)
38277@@ -827,7 +830,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
38278 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
38279 break;
38280 case ACPI_ADR_SPACE_FIXED_HARDWARE:
38281- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
38282+ pax_open_kernel();
38283+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
38284+ pax_close_kernel();
38285 break;
38286 default:
38287 break;
38288diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
38289index 8d19f7c..6bc2daa 100644
38290--- a/drivers/cpufreq/cpufreq.c
38291+++ b/drivers/cpufreq/cpufreq.c
38292@@ -1885,7 +1885,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
38293 #endif
38294
38295 mutex_lock(&cpufreq_governor_mutex);
38296- list_del(&governor->governor_list);
38297+ pax_list_del(&governor->governor_list);
38298 mutex_unlock(&cpufreq_governor_mutex);
38299 return;
38300 }
38301@@ -2115,7 +2115,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
38302 return NOTIFY_OK;
38303 }
38304
38305-static struct notifier_block __refdata cpufreq_cpu_notifier = {
38306+static struct notifier_block cpufreq_cpu_notifier = {
38307 .notifier_call = cpufreq_cpu_callback,
38308 };
38309
38310@@ -2148,8 +2148,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
38311
38312 pr_debug("trying to register driver %s\n", driver_data->name);
38313
38314- if (driver_data->setpolicy)
38315- driver_data->flags |= CPUFREQ_CONST_LOOPS;
38316+ if (driver_data->setpolicy) {
38317+ pax_open_kernel();
38318+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
38319+ pax_close_kernel();
38320+ }
38321
38322 write_lock_irqsave(&cpufreq_driver_lock, flags);
38323 if (cpufreq_driver) {
38324diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
38325index e6be635..f8a90dc 100644
38326--- a/drivers/cpufreq/cpufreq_governor.c
38327+++ b/drivers/cpufreq/cpufreq_governor.c
38328@@ -187,7 +187,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
38329 struct dbs_data *dbs_data;
38330 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
38331 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
38332- struct od_ops *od_ops = NULL;
38333+ const struct od_ops *od_ops = NULL;
38334 struct od_dbs_tuners *od_tuners = NULL;
38335 struct cs_dbs_tuners *cs_tuners = NULL;
38336 struct cpu_dbs_common_info *cpu_cdbs;
38337@@ -253,7 +253,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
38338
38339 if ((cdata->governor == GOV_CONSERVATIVE) &&
38340 (!policy->governor->initialized)) {
38341- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38342+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38343
38344 cpufreq_register_notifier(cs_ops->notifier_block,
38345 CPUFREQ_TRANSITION_NOTIFIER);
38346@@ -273,7 +273,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
38347
38348 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
38349 (policy->governor->initialized == 1)) {
38350- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38351+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38352
38353 cpufreq_unregister_notifier(cs_ops->notifier_block,
38354 CPUFREQ_TRANSITION_NOTIFIER);
38355diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
38356index b5f2b86..daa801b 100644
38357--- a/drivers/cpufreq/cpufreq_governor.h
38358+++ b/drivers/cpufreq/cpufreq_governor.h
38359@@ -205,7 +205,7 @@ struct common_dbs_data {
38360 void (*exit)(struct dbs_data *dbs_data);
38361
38362 /* Governor specific ops, see below */
38363- void *gov_ops;
38364+ const void *gov_ops;
38365 };
38366
38367 /* Governor Per policy data */
38368@@ -225,7 +225,7 @@ struct od_ops {
38369 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
38370 unsigned int freq_next, unsigned int relation);
38371 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
38372-};
38373+} __no_const;
38374
38375 struct cs_ops {
38376 struct notifier_block *notifier_block;
38377diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
38378index 18d4091..434be15 100644
38379--- a/drivers/cpufreq/cpufreq_ondemand.c
38380+++ b/drivers/cpufreq/cpufreq_ondemand.c
38381@@ -521,7 +521,7 @@ static void od_exit(struct dbs_data *dbs_data)
38382
38383 define_get_cpu_dbs_routines(od_cpu_dbs_info);
38384
38385-static struct od_ops od_ops = {
38386+static struct od_ops od_ops __read_only = {
38387 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
38388 .powersave_bias_target = generic_powersave_bias_target,
38389 .freq_increase = dbs_freq_increase,
38390@@ -576,14 +576,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
38391 (struct cpufreq_policy *, unsigned int, unsigned int),
38392 unsigned int powersave_bias)
38393 {
38394- od_ops.powersave_bias_target = f;
38395+ pax_open_kernel();
38396+ *(void **)&od_ops.powersave_bias_target = f;
38397+ pax_close_kernel();
38398 od_set_powersave_bias(powersave_bias);
38399 }
38400 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
38401
38402 void od_unregister_powersave_bias_handler(void)
38403 {
38404- od_ops.powersave_bias_target = generic_powersave_bias_target;
38405+ pax_open_kernel();
38406+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
38407+ pax_close_kernel();
38408 od_set_powersave_bias(0);
38409 }
38410 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
38411diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
38412index 4cf0d28..5830372 100644
38413--- a/drivers/cpufreq/cpufreq_stats.c
38414+++ b/drivers/cpufreq/cpufreq_stats.c
38415@@ -352,7 +352,7 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
38416 }
38417
38418 /* priority=1 so this will get called before cpufreq_remove_dev */
38419-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
38420+static struct notifier_block cpufreq_stat_cpu_notifier = {
38421 .notifier_call = cpufreq_stat_cpu_callback,
38422 .priority = 1,
38423 };
38424diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
38425index d51f17ed..9f43b15 100644
38426--- a/drivers/cpufreq/intel_pstate.c
38427+++ b/drivers/cpufreq/intel_pstate.c
38428@@ -112,10 +112,10 @@ struct pstate_funcs {
38429 struct cpu_defaults {
38430 struct pstate_adjust_policy pid_policy;
38431 struct pstate_funcs funcs;
38432-};
38433+} __do_const;
38434
38435 static struct pstate_adjust_policy pid_params;
38436-static struct pstate_funcs pstate_funcs;
38437+static struct pstate_funcs *pstate_funcs;
38438
38439 struct perf_limits {
38440 int no_turbo;
38441@@ -462,7 +462,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
38442
38443 cpu->pstate.current_pstate = pstate;
38444
38445- pstate_funcs.set(pstate);
38446+ pstate_funcs->set(pstate);
38447 }
38448
38449 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
38450@@ -484,9 +484,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
38451 {
38452 sprintf(cpu->name, "Intel 2nd generation core");
38453
38454- cpu->pstate.min_pstate = pstate_funcs.get_min();
38455- cpu->pstate.max_pstate = pstate_funcs.get_max();
38456- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
38457+ cpu->pstate.min_pstate = pstate_funcs->get_min();
38458+ cpu->pstate.max_pstate = pstate_funcs->get_max();
38459+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
38460
38461 /*
38462 * goto max pstate so we don't slow up boot if we are built-in if we are
38463@@ -750,9 +750,9 @@ static int intel_pstate_msrs_not_valid(void)
38464 rdmsrl(MSR_IA32_APERF, aperf);
38465 rdmsrl(MSR_IA32_MPERF, mperf);
38466
38467- if (!pstate_funcs.get_max() ||
38468- !pstate_funcs.get_min() ||
38469- !pstate_funcs.get_turbo())
38470+ if (!pstate_funcs->get_max() ||
38471+ !pstate_funcs->get_min() ||
38472+ !pstate_funcs->get_turbo())
38473 return -ENODEV;
38474
38475 rdmsrl(MSR_IA32_APERF, tmp);
38476@@ -766,7 +766,7 @@ static int intel_pstate_msrs_not_valid(void)
38477 return 0;
38478 }
38479
38480-static void copy_pid_params(struct pstate_adjust_policy *policy)
38481+static void copy_pid_params(const struct pstate_adjust_policy *policy)
38482 {
38483 pid_params.sample_rate_ms = policy->sample_rate_ms;
38484 pid_params.p_gain_pct = policy->p_gain_pct;
38485@@ -778,10 +778,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
38486
38487 static void copy_cpu_funcs(struct pstate_funcs *funcs)
38488 {
38489- pstate_funcs.get_max = funcs->get_max;
38490- pstate_funcs.get_min = funcs->get_min;
38491- pstate_funcs.get_turbo = funcs->get_turbo;
38492- pstate_funcs.set = funcs->set;
38493+ pstate_funcs = funcs;
38494 }
38495
38496 #if IS_ENABLED(CONFIG_ACPI)
38497diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
38498index 3d1cba9..0ab21d2 100644
38499--- a/drivers/cpufreq/p4-clockmod.c
38500+++ b/drivers/cpufreq/p4-clockmod.c
38501@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
38502 case 0x0F: /* Core Duo */
38503 case 0x16: /* Celeron Core */
38504 case 0x1C: /* Atom */
38505- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
38506+ pax_open_kernel();
38507+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
38508+ pax_close_kernel();
38509 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
38510 case 0x0D: /* Pentium M (Dothan) */
38511- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
38512+ pax_open_kernel();
38513+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
38514+ pax_close_kernel();
38515 /* fall through */
38516 case 0x09: /* Pentium M (Banias) */
38517 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
38518@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
38519
38520 /* on P-4s, the TSC runs with constant frequency independent whether
38521 * throttling is active or not. */
38522- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
38523+ pax_open_kernel();
38524+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
38525+ pax_close_kernel();
38526
38527 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
38528 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
38529diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
38530index 724ffbd..ad83692 100644
38531--- a/drivers/cpufreq/sparc-us3-cpufreq.c
38532+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
38533@@ -18,14 +18,12 @@
38534 #include <asm/head.h>
38535 #include <asm/timer.h>
38536
38537-static struct cpufreq_driver *cpufreq_us3_driver;
38538-
38539 struct us3_freq_percpu_info {
38540 struct cpufreq_frequency_table table[4];
38541 };
38542
38543 /* Indexed by cpu number. */
38544-static struct us3_freq_percpu_info *us3_freq_table;
38545+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
38546
38547 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
38548 * in the Safari config register.
38549@@ -156,14 +154,26 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
38550
38551 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
38552 {
38553- if (cpufreq_us3_driver) {
38554- cpufreq_frequency_table_put_attr(policy->cpu);
38555- us3_freq_target(policy, 0);
38556- }
38557+ cpufreq_frequency_table_put_attr(policy->cpu);
38558+ us3_freq_target(policy, 0);
38559
38560 return 0;
38561 }
38562
38563+static int __init us3_freq_init(void);
38564+static void __exit us3_freq_exit(void);
38565+
38566+static struct cpufreq_driver cpufreq_us3_driver = {
38567+ .init = us3_freq_cpu_init,
38568+ .verify = cpufreq_generic_frequency_table_verify,
38569+ .target_index = us3_freq_target,
38570+ .get = us3_freq_get,
38571+ .exit = us3_freq_cpu_exit,
38572+ .owner = THIS_MODULE,
38573+ .name = "UltraSPARC-III",
38574+
38575+};
38576+
38577 static int __init us3_freq_init(void)
38578 {
38579 unsigned long manuf, impl, ver;
38580@@ -180,55 +190,15 @@ static int __init us3_freq_init(void)
38581 (impl == CHEETAH_IMPL ||
38582 impl == CHEETAH_PLUS_IMPL ||
38583 impl == JAGUAR_IMPL ||
38584- impl == PANTHER_IMPL)) {
38585- struct cpufreq_driver *driver;
38586-
38587- ret = -ENOMEM;
38588- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
38589- if (!driver)
38590- goto err_out;
38591-
38592- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
38593- GFP_KERNEL);
38594- if (!us3_freq_table)
38595- goto err_out;
38596-
38597- driver->init = us3_freq_cpu_init;
38598- driver->verify = cpufreq_generic_frequency_table_verify;
38599- driver->target_index = us3_freq_target;
38600- driver->get = us3_freq_get;
38601- driver->exit = us3_freq_cpu_exit;
38602- strcpy(driver->name, "UltraSPARC-III");
38603-
38604- cpufreq_us3_driver = driver;
38605- ret = cpufreq_register_driver(driver);
38606- if (ret)
38607- goto err_out;
38608-
38609- return 0;
38610-
38611-err_out:
38612- if (driver) {
38613- kfree(driver);
38614- cpufreq_us3_driver = NULL;
38615- }
38616- kfree(us3_freq_table);
38617- us3_freq_table = NULL;
38618- return ret;
38619- }
38620+ impl == PANTHER_IMPL))
38621+ return cpufreq_register_driver(&cpufreq_us3_driver);
38622
38623 return -ENODEV;
38624 }
38625
38626 static void __exit us3_freq_exit(void)
38627 {
38628- if (cpufreq_us3_driver) {
38629- cpufreq_unregister_driver(cpufreq_us3_driver);
38630- kfree(cpufreq_us3_driver);
38631- cpufreq_us3_driver = NULL;
38632- kfree(us3_freq_table);
38633- us3_freq_table = NULL;
38634- }
38635+ cpufreq_unregister_driver(&cpufreq_us3_driver);
38636 }
38637
38638 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
38639diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
38640index 4e1daca..e707b61 100644
38641--- a/drivers/cpufreq/speedstep-centrino.c
38642+++ b/drivers/cpufreq/speedstep-centrino.c
38643@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
38644 !cpu_has(cpu, X86_FEATURE_EST))
38645 return -ENODEV;
38646
38647- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
38648- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
38649+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
38650+ pax_open_kernel();
38651+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
38652+ pax_close_kernel();
38653+ }
38654
38655 if (policy->cpu != 0)
38656 return -ENODEV;
38657diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
38658index 06dbe7c..c2c8671 100644
38659--- a/drivers/cpuidle/driver.c
38660+++ b/drivers/cpuidle/driver.c
38661@@ -202,7 +202,7 @@ static int poll_idle(struct cpuidle_device *dev,
38662
38663 static void poll_idle_init(struct cpuidle_driver *drv)
38664 {
38665- struct cpuidle_state *state = &drv->states[0];
38666+ cpuidle_state_no_const *state = &drv->states[0];
38667
38668 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
38669 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
38670diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
38671index ca89412..a7b9c49 100644
38672--- a/drivers/cpuidle/governor.c
38673+++ b/drivers/cpuidle/governor.c
38674@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
38675 mutex_lock(&cpuidle_lock);
38676 if (__cpuidle_find_governor(gov->name) == NULL) {
38677 ret = 0;
38678- list_add_tail(&gov->governor_list, &cpuidle_governors);
38679+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
38680 if (!cpuidle_curr_governor ||
38681 cpuidle_curr_governor->rating < gov->rating)
38682 cpuidle_switch_governor(gov);
38683diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
38684index e918b6d..f87ea80 100644
38685--- a/drivers/cpuidle/sysfs.c
38686+++ b/drivers/cpuidle/sysfs.c
38687@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
38688 NULL
38689 };
38690
38691-static struct attribute_group cpuidle_attr_group = {
38692+static attribute_group_no_const cpuidle_attr_group = {
38693 .attrs = cpuidle_default_attrs,
38694 .name = "cpuidle",
38695 };
38696diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
38697index 12fea3e..1e28f47 100644
38698--- a/drivers/crypto/hifn_795x.c
38699+++ b/drivers/crypto/hifn_795x.c
38700@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
38701 MODULE_PARM_DESC(hifn_pll_ref,
38702 "PLL reference clock (pci[freq] or ext[freq], default ext)");
38703
38704-static atomic_t hifn_dev_number;
38705+static atomic_unchecked_t hifn_dev_number;
38706
38707 #define ACRYPTO_OP_DECRYPT 0
38708 #define ACRYPTO_OP_ENCRYPT 1
38709@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
38710 goto err_out_disable_pci_device;
38711
38712 snprintf(name, sizeof(name), "hifn%d",
38713- atomic_inc_return(&hifn_dev_number)-1);
38714+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
38715
38716 err = pci_request_regions(pdev, name);
38717 if (err)
38718diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
38719index a0b2f7e..1b6f028 100644
38720--- a/drivers/devfreq/devfreq.c
38721+++ b/drivers/devfreq/devfreq.c
38722@@ -607,7 +607,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
38723 goto err_out;
38724 }
38725
38726- list_add(&governor->node, &devfreq_governor_list);
38727+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
38728
38729 list_for_each_entry(devfreq, &devfreq_list, node) {
38730 int ret = 0;
38731@@ -695,7 +695,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
38732 }
38733 }
38734
38735- list_del(&governor->node);
38736+ pax_list_del((struct list_head *)&governor->node);
38737 err_out:
38738 mutex_unlock(&devfreq_list_lock);
38739
38740diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
38741index 0d765c0..60b7480 100644
38742--- a/drivers/dma/sh/shdmac.c
38743+++ b/drivers/dma/sh/shdmac.c
38744@@ -511,7 +511,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
38745 return ret;
38746 }
38747
38748-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
38749+static struct notifier_block sh_dmae_nmi_notifier = {
38750 .notifier_call = sh_dmae_nmi_handler,
38751
38752 /* Run before NMI debug handler and KGDB */
38753diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
38754index 1026743..80b081c 100644
38755--- a/drivers/edac/edac_device.c
38756+++ b/drivers/edac/edac_device.c
38757@@ -474,9 +474,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
38758 */
38759 int edac_device_alloc_index(void)
38760 {
38761- static atomic_t device_indexes = ATOMIC_INIT(0);
38762+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
38763
38764- return atomic_inc_return(&device_indexes) - 1;
38765+ return atomic_inc_return_unchecked(&device_indexes) - 1;
38766 }
38767 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
38768
38769diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
38770index 9f7e0e60..348c875 100644
38771--- a/drivers/edac/edac_mc_sysfs.c
38772+++ b/drivers/edac/edac_mc_sysfs.c
38773@@ -150,7 +150,7 @@ static const char * const edac_caps[] = {
38774 struct dev_ch_attribute {
38775 struct device_attribute attr;
38776 int channel;
38777-};
38778+} __do_const;
38779
38780 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
38781 struct dev_ch_attribute dev_attr_legacy_##_name = \
38782@@ -1007,14 +1007,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
38783 }
38784
38785 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
38786+ pax_open_kernel();
38787 if (mci->get_sdram_scrub_rate) {
38788- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
38789- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
38790+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
38791+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
38792 }
38793 if (mci->set_sdram_scrub_rate) {
38794- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
38795- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
38796+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
38797+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
38798 }
38799+ pax_close_kernel();
38800 err = device_create_file(&mci->dev,
38801 &dev_attr_sdram_scrub_rate);
38802 if (err) {
38803diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
38804index 2cf44b4d..6dd2dc7 100644
38805--- a/drivers/edac/edac_pci.c
38806+++ b/drivers/edac/edac_pci.c
38807@@ -29,7 +29,7 @@
38808
38809 static DEFINE_MUTEX(edac_pci_ctls_mutex);
38810 static LIST_HEAD(edac_pci_list);
38811-static atomic_t pci_indexes = ATOMIC_INIT(0);
38812+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
38813
38814 /*
38815 * edac_pci_alloc_ctl_info
38816@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
38817 */
38818 int edac_pci_alloc_index(void)
38819 {
38820- return atomic_inc_return(&pci_indexes) - 1;
38821+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
38822 }
38823 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
38824
38825diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
38826index e8658e4..22746d6 100644
38827--- a/drivers/edac/edac_pci_sysfs.c
38828+++ b/drivers/edac/edac_pci_sysfs.c
38829@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
38830 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
38831 static int edac_pci_poll_msec = 1000; /* one second workq period */
38832
38833-static atomic_t pci_parity_count = ATOMIC_INIT(0);
38834-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
38835+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
38836+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
38837
38838 static struct kobject *edac_pci_top_main_kobj;
38839 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
38840@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
38841 void *value;
38842 ssize_t(*show) (void *, char *);
38843 ssize_t(*store) (void *, const char *, size_t);
38844-};
38845+} __do_const;
38846
38847 /* Set of show/store abstract level functions for PCI Parity object */
38848 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
38849@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38850 edac_printk(KERN_CRIT, EDAC_PCI,
38851 "Signaled System Error on %s\n",
38852 pci_name(dev));
38853- atomic_inc(&pci_nonparity_count);
38854+ atomic_inc_unchecked(&pci_nonparity_count);
38855 }
38856
38857 if (status & (PCI_STATUS_PARITY)) {
38858@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38859 "Master Data Parity Error on %s\n",
38860 pci_name(dev));
38861
38862- atomic_inc(&pci_parity_count);
38863+ atomic_inc_unchecked(&pci_parity_count);
38864 }
38865
38866 if (status & (PCI_STATUS_DETECTED_PARITY)) {
38867@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38868 "Detected Parity Error on %s\n",
38869 pci_name(dev));
38870
38871- atomic_inc(&pci_parity_count);
38872+ atomic_inc_unchecked(&pci_parity_count);
38873 }
38874 }
38875
38876@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38877 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
38878 "Signaled System Error on %s\n",
38879 pci_name(dev));
38880- atomic_inc(&pci_nonparity_count);
38881+ atomic_inc_unchecked(&pci_nonparity_count);
38882 }
38883
38884 if (status & (PCI_STATUS_PARITY)) {
38885@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38886 "Master Data Parity Error on "
38887 "%s\n", pci_name(dev));
38888
38889- atomic_inc(&pci_parity_count);
38890+ atomic_inc_unchecked(&pci_parity_count);
38891 }
38892
38893 if (status & (PCI_STATUS_DETECTED_PARITY)) {
38894@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38895 "Detected Parity Error on %s\n",
38896 pci_name(dev));
38897
38898- atomic_inc(&pci_parity_count);
38899+ atomic_inc_unchecked(&pci_parity_count);
38900 }
38901 }
38902 }
38903@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
38904 if (!check_pci_errors)
38905 return;
38906
38907- before_count = atomic_read(&pci_parity_count);
38908+ before_count = atomic_read_unchecked(&pci_parity_count);
38909
38910 /* scan all PCI devices looking for a Parity Error on devices and
38911 * bridges.
38912@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
38913 /* Only if operator has selected panic on PCI Error */
38914 if (edac_pci_get_panic_on_pe()) {
38915 /* If the count is different 'after' from 'before' */
38916- if (before_count != atomic_read(&pci_parity_count))
38917+ if (before_count != atomic_read_unchecked(&pci_parity_count))
38918 panic("EDAC: PCI Parity Error");
38919 }
38920 }
38921diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
38922index 51b7e3a..aa8a3e8 100644
38923--- a/drivers/edac/mce_amd.h
38924+++ b/drivers/edac/mce_amd.h
38925@@ -77,7 +77,7 @@ struct amd_decoder_ops {
38926 bool (*mc0_mce)(u16, u8);
38927 bool (*mc1_mce)(u16, u8);
38928 bool (*mc2_mce)(u16, u8);
38929-};
38930+} __no_const;
38931
38932 void amd_report_gart_errors(bool);
38933 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
38934diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
38935index 57ea7f4..af06b76 100644
38936--- a/drivers/firewire/core-card.c
38937+++ b/drivers/firewire/core-card.c
38938@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
38939 const struct fw_card_driver *driver,
38940 struct device *device)
38941 {
38942- static atomic_t index = ATOMIC_INIT(-1);
38943+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
38944
38945- card->index = atomic_inc_return(&index);
38946+ card->index = atomic_inc_return_unchecked(&index);
38947 card->driver = driver;
38948 card->device = device;
38949 card->current_tlabel = 0;
38950@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
38951
38952 void fw_core_remove_card(struct fw_card *card)
38953 {
38954- struct fw_card_driver dummy_driver = dummy_driver_template;
38955+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
38956
38957 card->driver->update_phy_reg(card, 4,
38958 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
38959diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
38960index de4aa40..49ab1f2 100644
38961--- a/drivers/firewire/core-device.c
38962+++ b/drivers/firewire/core-device.c
38963@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
38964 struct config_rom_attribute {
38965 struct device_attribute attr;
38966 u32 key;
38967-};
38968+} __do_const;
38969
38970 static ssize_t show_immediate(struct device *dev,
38971 struct device_attribute *dattr, char *buf)
38972diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
38973index 0e79951..b180217 100644
38974--- a/drivers/firewire/core-transaction.c
38975+++ b/drivers/firewire/core-transaction.c
38976@@ -38,6 +38,7 @@
38977 #include <linux/timer.h>
38978 #include <linux/types.h>
38979 #include <linux/workqueue.h>
38980+#include <linux/sched.h>
38981
38982 #include <asm/byteorder.h>
38983
38984diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
38985index 515a42c..5ecf3ba 100644
38986--- a/drivers/firewire/core.h
38987+++ b/drivers/firewire/core.h
38988@@ -111,6 +111,7 @@ struct fw_card_driver {
38989
38990 int (*stop_iso)(struct fw_iso_context *ctx);
38991 };
38992+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
38993
38994 void fw_card_initialize(struct fw_card *card,
38995 const struct fw_card_driver *driver, struct device *device);
38996diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
38997index 94a58a0..f5eba42 100644
38998--- a/drivers/firmware/dmi-id.c
38999+++ b/drivers/firmware/dmi-id.c
39000@@ -16,7 +16,7 @@
39001 struct dmi_device_attribute{
39002 struct device_attribute dev_attr;
39003 int field;
39004-};
39005+} __do_const;
39006 #define to_dmi_dev_attr(_dev_attr) \
39007 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
39008
39009diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
39010index c7e81ff..94a7401 100644
39011--- a/drivers/firmware/dmi_scan.c
39012+++ b/drivers/firmware/dmi_scan.c
39013@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
39014 if (buf == NULL)
39015 return -1;
39016
39017- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
39018+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
39019
39020 iounmap(buf);
39021 return 0;
39022diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
39023index 1491dd4..aa910db 100644
39024--- a/drivers/firmware/efi/cper.c
39025+++ b/drivers/firmware/efi/cper.c
39026@@ -41,12 +41,12 @@
39027 */
39028 u64 cper_next_record_id(void)
39029 {
39030- static atomic64_t seq;
39031+ static atomic64_unchecked_t seq;
39032
39033- if (!atomic64_read(&seq))
39034- atomic64_set(&seq, ((u64)get_seconds()) << 32);
39035+ if (!atomic64_read_unchecked(&seq))
39036+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
39037
39038- return atomic64_inc_return(&seq);
39039+ return atomic64_inc_return_unchecked(&seq);
39040 }
39041 EXPORT_SYMBOL_GPL(cper_next_record_id);
39042
39043diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
39044index 2e2fbde..7676c8b 100644
39045--- a/drivers/firmware/efi/efi.c
39046+++ b/drivers/firmware/efi/efi.c
39047@@ -81,14 +81,16 @@ static struct attribute_group efi_subsys_attr_group = {
39048 };
39049
39050 static struct efivars generic_efivars;
39051-static struct efivar_operations generic_ops;
39052+static efivar_operations_no_const generic_ops __read_only;
39053
39054 static int generic_ops_register(void)
39055 {
39056- generic_ops.get_variable = efi.get_variable;
39057- generic_ops.set_variable = efi.set_variable;
39058- generic_ops.get_next_variable = efi.get_next_variable;
39059- generic_ops.query_variable_store = efi_query_variable_store;
39060+ pax_open_kernel();
39061+ *(void **)&generic_ops.get_variable = efi.get_variable;
39062+ *(void **)&generic_ops.set_variable = efi.set_variable;
39063+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
39064+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
39065+ pax_close_kernel();
39066
39067 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
39068 }
39069diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
39070index 3dc2482..7bd2f61 100644
39071--- a/drivers/firmware/efi/efivars.c
39072+++ b/drivers/firmware/efi/efivars.c
39073@@ -456,7 +456,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
39074 static int
39075 create_efivars_bin_attributes(void)
39076 {
39077- struct bin_attribute *attr;
39078+ bin_attribute_no_const *attr;
39079 int error;
39080
39081 /* new_var */
39082diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
39083index 2a90ba6..07f3733 100644
39084--- a/drivers/firmware/google/memconsole.c
39085+++ b/drivers/firmware/google/memconsole.c
39086@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
39087 if (!found_memconsole())
39088 return -ENODEV;
39089
39090- memconsole_bin_attr.size = memconsole_length;
39091+ pax_open_kernel();
39092+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
39093+ pax_close_kernel();
39094
39095 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
39096
39097diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
39098index 814addb..0937d7f 100644
39099--- a/drivers/gpio/gpio-ich.c
39100+++ b/drivers/gpio/gpio-ich.c
39101@@ -71,7 +71,7 @@ struct ichx_desc {
39102 /* Some chipsets have quirks, let these use their own request/get */
39103 int (*request)(struct gpio_chip *chip, unsigned offset);
39104 int (*get)(struct gpio_chip *chip, unsigned offset);
39105-};
39106+} __do_const;
39107
39108 static struct {
39109 spinlock_t lock;
39110diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
39111index 9902732..64b62dd 100644
39112--- a/drivers/gpio/gpio-vr41xx.c
39113+++ b/drivers/gpio/gpio-vr41xx.c
39114@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
39115 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
39116 maskl, pendl, maskh, pendh);
39117
39118- atomic_inc(&irq_err_count);
39119+ atomic_inc_unchecked(&irq_err_count);
39120
39121 return -EINVAL;
39122 }
39123diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
39124index 62d0ff3..073dbf3 100644
39125--- a/drivers/gpu/drm/armada/armada_drv.c
39126+++ b/drivers/gpu/drm/armada/armada_drv.c
39127@@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev,
39128 {
39129 struct armada_private *priv = dev->dev_private;
39130
39131- /*
39132- * Yes, we really must jump through these hoops just to store a
39133- * _pointer_ to something into the kfifo. This is utterly insane
39134- * and idiotic, because it kfifo requires the _data_ pointed to by
39135- * the pointer const, not the pointer itself. Not only that, but
39136- * you have to pass a pointer _to_ the pointer you want stored.
39137- */
39138- const struct drm_framebuffer *silly_api_alert = fb;
39139- WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
39140+ WARN_ON(!kfifo_put(&priv->fb_unref, fb));
39141 schedule_work(&priv->fb_unref_work);
39142 }
39143
39144diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
39145index d6cf77c..2842146 100644
39146--- a/drivers/gpu/drm/drm_crtc.c
39147+++ b/drivers/gpu/drm/drm_crtc.c
39148@@ -3102,7 +3102,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
39149 goto done;
39150 }
39151
39152- if (copy_to_user(&enum_ptr[copied].name,
39153+ if (copy_to_user(enum_ptr[copied].name,
39154 &prop_enum->name, DRM_PROP_NAME_LEN)) {
39155 ret = -EFAULT;
39156 goto done;
39157diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
39158index 01361ab..891e821 100644
39159--- a/drivers/gpu/drm/drm_crtc_helper.c
39160+++ b/drivers/gpu/drm/drm_crtc_helper.c
39161@@ -338,7 +338,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
39162 struct drm_crtc *tmp;
39163 int crtc_mask = 1;
39164
39165- WARN(!crtc, "checking null crtc?\n");
39166+ BUG_ON(!crtc);
39167
39168 dev = crtc->dev;
39169
39170diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
39171index d9137e4..69b73a0 100644
39172--- a/drivers/gpu/drm/drm_drv.c
39173+++ b/drivers/gpu/drm/drm_drv.c
39174@@ -233,7 +233,7 @@ module_exit(drm_core_exit);
39175 /**
39176 * Copy and IOCTL return string to user space
39177 */
39178-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
39179+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
39180 {
39181 int len;
39182
39183@@ -303,7 +303,7 @@ long drm_ioctl(struct file *filp,
39184 struct drm_file *file_priv = filp->private_data;
39185 struct drm_device *dev;
39186 const struct drm_ioctl_desc *ioctl = NULL;
39187- drm_ioctl_t *func;
39188+ drm_ioctl_no_const_t func;
39189 unsigned int nr = DRM_IOCTL_NR(cmd);
39190 int retcode = -EINVAL;
39191 char stack_kdata[128];
39192diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
39193index c5b929c..8a3b8be 100644
39194--- a/drivers/gpu/drm/drm_fops.c
39195+++ b/drivers/gpu/drm/drm_fops.c
39196@@ -97,7 +97,7 @@ int drm_open(struct inode *inode, struct file *filp)
39197 if (drm_device_is_unplugged(dev))
39198 return -ENODEV;
39199
39200- if (!dev->open_count++)
39201+ if (local_inc_return(&dev->open_count) == 1)
39202 need_setup = 1;
39203 mutex_lock(&dev->struct_mutex);
39204 old_imapping = inode->i_mapping;
39205@@ -127,7 +127,7 @@ err_undo:
39206 iput(container_of(dev->dev_mapping, struct inode, i_data));
39207 dev->dev_mapping = old_mapping;
39208 mutex_unlock(&dev->struct_mutex);
39209- dev->open_count--;
39210+ local_dec(&dev->open_count);
39211 return retcode;
39212 }
39213 EXPORT_SYMBOL(drm_open);
39214@@ -467,7 +467,7 @@ int drm_release(struct inode *inode, struct file *filp)
39215
39216 mutex_lock(&drm_global_mutex);
39217
39218- DRM_DEBUG("open_count = %d\n", dev->open_count);
39219+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
39220
39221 if (dev->driver->preclose)
39222 dev->driver->preclose(dev, file_priv);
39223@@ -476,10 +476,10 @@ int drm_release(struct inode *inode, struct file *filp)
39224 * Begin inline drm_release
39225 */
39226
39227- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
39228+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
39229 task_pid_nr(current),
39230 (long)old_encode_dev(file_priv->minor->device),
39231- dev->open_count);
39232+ local_read(&dev->open_count));
39233
39234 /* Release any auth tokens that might point to this file_priv,
39235 (do that under the drm_global_mutex) */
39236@@ -577,7 +577,7 @@ int drm_release(struct inode *inode, struct file *filp)
39237 * End inline drm_release
39238 */
39239
39240- if (!--dev->open_count) {
39241+ if (local_dec_and_test(&dev->open_count)) {
39242 if (atomic_read(&dev->ioctl_count)) {
39243 DRM_ERROR("Device busy: %d\n",
39244 atomic_read(&dev->ioctl_count));
39245diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
39246index 3d2e91c..d31c4c9 100644
39247--- a/drivers/gpu/drm/drm_global.c
39248+++ b/drivers/gpu/drm/drm_global.c
39249@@ -36,7 +36,7 @@
39250 struct drm_global_item {
39251 struct mutex mutex;
39252 void *object;
39253- int refcount;
39254+ atomic_t refcount;
39255 };
39256
39257 static struct drm_global_item glob[DRM_GLOBAL_NUM];
39258@@ -49,7 +49,7 @@ void drm_global_init(void)
39259 struct drm_global_item *item = &glob[i];
39260 mutex_init(&item->mutex);
39261 item->object = NULL;
39262- item->refcount = 0;
39263+ atomic_set(&item->refcount, 0);
39264 }
39265 }
39266
39267@@ -59,7 +59,7 @@ void drm_global_release(void)
39268 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
39269 struct drm_global_item *item = &glob[i];
39270 BUG_ON(item->object != NULL);
39271- BUG_ON(item->refcount != 0);
39272+ BUG_ON(atomic_read(&item->refcount) != 0);
39273 }
39274 }
39275
39276@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
39277 struct drm_global_item *item = &glob[ref->global_type];
39278
39279 mutex_lock(&item->mutex);
39280- if (item->refcount == 0) {
39281+ if (atomic_read(&item->refcount) == 0) {
39282 item->object = kzalloc(ref->size, GFP_KERNEL);
39283 if (unlikely(item->object == NULL)) {
39284 ret = -ENOMEM;
39285@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
39286 goto out_err;
39287
39288 }
39289- ++item->refcount;
39290+ atomic_inc(&item->refcount);
39291 ref->object = item->object;
39292 mutex_unlock(&item->mutex);
39293 return 0;
39294@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
39295 struct drm_global_item *item = &glob[ref->global_type];
39296
39297 mutex_lock(&item->mutex);
39298- BUG_ON(item->refcount == 0);
39299+ BUG_ON(atomic_read(&item->refcount) == 0);
39300 BUG_ON(ref->object != item->object);
39301- if (--item->refcount == 0) {
39302+ if (atomic_dec_and_test(&item->refcount)) {
39303 ref->release(ref);
39304 item->object = NULL;
39305 }
39306diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
39307index 7d5a152..d7186da 100644
39308--- a/drivers/gpu/drm/drm_info.c
39309+++ b/drivers/gpu/drm/drm_info.c
39310@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
39311 struct drm_local_map *map;
39312 struct drm_map_list *r_list;
39313
39314- /* Hardcoded from _DRM_FRAME_BUFFER,
39315- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
39316- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
39317- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
39318+ static const char * const types[] = {
39319+ [_DRM_FRAME_BUFFER] = "FB",
39320+ [_DRM_REGISTERS] = "REG",
39321+ [_DRM_SHM] = "SHM",
39322+ [_DRM_AGP] = "AGP",
39323+ [_DRM_SCATTER_GATHER] = "SG",
39324+ [_DRM_CONSISTENT] = "PCI",
39325+ [_DRM_GEM] = "GEM" };
39326 const char *type;
39327 int i;
39328
39329@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
39330 map = r_list->map;
39331 if (!map)
39332 continue;
39333- if (map->type < 0 || map->type > 5)
39334+ if (map->type >= ARRAY_SIZE(types))
39335 type = "??";
39336 else
39337 type = types[map->type];
39338@@ -257,7 +261,11 @@ int drm_vma_info(struct seq_file *m, void *data)
39339 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
39340 vma->vm_flags & VM_LOCKED ? 'l' : '-',
39341 vma->vm_flags & VM_IO ? 'i' : '-',
39342+#ifdef CONFIG_GRKERNSEC_HIDESYM
39343+ 0);
39344+#else
39345 vma->vm_pgoff);
39346+#endif
39347
39348 #if defined(__i386__)
39349 pgprot = pgprot_val(vma->vm_page_prot);
39350diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
39351index 2f4c434..dd12cd2 100644
39352--- a/drivers/gpu/drm/drm_ioc32.c
39353+++ b/drivers/gpu/drm/drm_ioc32.c
39354@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
39355 request = compat_alloc_user_space(nbytes);
39356 if (!access_ok(VERIFY_WRITE, request, nbytes))
39357 return -EFAULT;
39358- list = (struct drm_buf_desc *) (request + 1);
39359+ list = (struct drm_buf_desc __user *) (request + 1);
39360
39361 if (__put_user(count, &request->count)
39362 || __put_user(list, &request->list))
39363@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
39364 request = compat_alloc_user_space(nbytes);
39365 if (!access_ok(VERIFY_WRITE, request, nbytes))
39366 return -EFAULT;
39367- list = (struct drm_buf_pub *) (request + 1);
39368+ list = (struct drm_buf_pub __user *) (request + 1);
39369
39370 if (__put_user(count, &request->count)
39371 || __put_user(list, &request->list))
39372@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
39373 return 0;
39374 }
39375
39376-drm_ioctl_compat_t *drm_compat_ioctls[] = {
39377+drm_ioctl_compat_t drm_compat_ioctls[] = {
39378 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
39379 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
39380 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
39381@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
39382 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39383 {
39384 unsigned int nr = DRM_IOCTL_NR(cmd);
39385- drm_ioctl_compat_t *fn;
39386 int ret;
39387
39388 /* Assume that ioctls without an explicit compat routine will just
39389@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39390 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
39391 return drm_ioctl(filp, cmd, arg);
39392
39393- fn = drm_compat_ioctls[nr];
39394-
39395- if (fn != NULL)
39396- ret = (*fn) (filp, cmd, arg);
39397+ if (drm_compat_ioctls[nr] != NULL)
39398+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
39399 else
39400 ret = drm_ioctl(filp, cmd, arg);
39401
39402diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
39403index 66dd3a0..3bed6c4 100644
39404--- a/drivers/gpu/drm/drm_stub.c
39405+++ b/drivers/gpu/drm/drm_stub.c
39406@@ -403,7 +403,7 @@ void drm_unplug_dev(struct drm_device *dev)
39407
39408 drm_device_set_unplugged(dev);
39409
39410- if (dev->open_count == 0) {
39411+ if (local_read(&dev->open_count) == 0) {
39412 drm_put_dev(dev);
39413 }
39414 mutex_unlock(&drm_global_mutex);
39415diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
39416index c22c309..ae758c3 100644
39417--- a/drivers/gpu/drm/drm_sysfs.c
39418+++ b/drivers/gpu/drm/drm_sysfs.c
39419@@ -505,7 +505,7 @@ static void drm_sysfs_release(struct device *dev)
39420 */
39421 int drm_sysfs_device_add(struct drm_minor *minor)
39422 {
39423- char *minor_str;
39424+ const char *minor_str;
39425 int r;
39426
39427 if (minor->type == DRM_MINOR_CONTROL)
39428diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
39429index d4d16ed..8fb0b51 100644
39430--- a/drivers/gpu/drm/i810/i810_drv.h
39431+++ b/drivers/gpu/drm/i810/i810_drv.h
39432@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
39433 int page_flipping;
39434
39435 wait_queue_head_t irq_queue;
39436- atomic_t irq_received;
39437- atomic_t irq_emitted;
39438+ atomic_unchecked_t irq_received;
39439+ atomic_unchecked_t irq_emitted;
39440
39441 int front_offset;
39442 } drm_i810_private_t;
39443diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
39444index 6ed45a9..eb6dc41 100644
39445--- a/drivers/gpu/drm/i915/i915_debugfs.c
39446+++ b/drivers/gpu/drm/i915/i915_debugfs.c
39447@@ -702,7 +702,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
39448 I915_READ(GTIMR));
39449 }
39450 seq_printf(m, "Interrupts received: %d\n",
39451- atomic_read(&dev_priv->irq_received));
39452+ atomic_read_unchecked(&dev_priv->irq_received));
39453 for_each_ring(ring, dev_priv, i) {
39454 if (INTEL_INFO(dev)->gen >= 6) {
39455 seq_printf(m,
39456diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
39457index 5c64842..f14bdf8 100644
39458--- a/drivers/gpu/drm/i915/i915_dma.c
39459+++ b/drivers/gpu/drm/i915/i915_dma.c
39460@@ -1271,7 +1271,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
39461 bool can_switch;
39462
39463 spin_lock(&dev->count_lock);
39464- can_switch = (dev->open_count == 0);
39465+ can_switch = (local_read(&dev->open_count) == 0);
39466 spin_unlock(&dev->count_lock);
39467 return can_switch;
39468 }
39469diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
39470index 90fcccb..b8aabc9 100644
39471--- a/drivers/gpu/drm/i915/i915_drv.h
39472+++ b/drivers/gpu/drm/i915/i915_drv.h
39473@@ -1325,7 +1325,7 @@ typedef struct drm_i915_private {
39474 drm_dma_handle_t *status_page_dmah;
39475 struct resource mch_res;
39476
39477- atomic_t irq_received;
39478+ atomic_unchecked_t irq_received;
39479
39480 /* protects the irq masks */
39481 spinlock_t irq_lock;
39482diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
39483index a3ba9a8..ee52ddd 100644
39484--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
39485+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
39486@@ -861,9 +861,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
39487
39488 static int
39489 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
39490- int count)
39491+ unsigned int count)
39492 {
39493- int i;
39494+ unsigned int i;
39495 unsigned relocs_total = 0;
39496 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
39497
39498diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
39499index 3c59584..500f2e9 100644
39500--- a/drivers/gpu/drm/i915/i915_ioc32.c
39501+++ b/drivers/gpu/drm/i915/i915_ioc32.c
39502@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
39503 (unsigned long)request);
39504 }
39505
39506-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
39507+static drm_ioctl_compat_t i915_compat_ioctls[] = {
39508 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
39509 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
39510 [DRM_I915_GETPARAM] = compat_i915_getparam,
39511@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
39512 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39513 {
39514 unsigned int nr = DRM_IOCTL_NR(cmd);
39515- drm_ioctl_compat_t *fn = NULL;
39516 int ret;
39517
39518 if (nr < DRM_COMMAND_BASE)
39519 return drm_compat_ioctl(filp, cmd, arg);
39520
39521- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
39522- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
39523-
39524- if (fn != NULL)
39525+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
39526+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
39527 ret = (*fn) (filp, cmd, arg);
39528- else
39529+ } else
39530 ret = drm_ioctl(filp, cmd, arg);
39531
39532 return ret;
39533diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
39534index f13d5ed..8e6f36d 100644
39535--- a/drivers/gpu/drm/i915/i915_irq.c
39536+++ b/drivers/gpu/drm/i915/i915_irq.c
39537@@ -1420,7 +1420,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
39538 int pipe;
39539 u32 pipe_stats[I915_MAX_PIPES];
39540
39541- atomic_inc(&dev_priv->irq_received);
39542+ atomic_inc_unchecked(&dev_priv->irq_received);
39543
39544 while (true) {
39545 iir = I915_READ(VLV_IIR);
39546@@ -1730,7 +1730,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
39547 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
39548 irqreturn_t ret = IRQ_NONE;
39549
39550- atomic_inc(&dev_priv->irq_received);
39551+ atomic_inc_unchecked(&dev_priv->irq_received);
39552
39553 /* We get interrupts on unclaimed registers, so check for this before we
39554 * do any I915_{READ,WRITE}. */
39555@@ -1800,7 +1800,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
39556 uint32_t tmp = 0;
39557 enum pipe pipe;
39558
39559- atomic_inc(&dev_priv->irq_received);
39560+ atomic_inc_unchecked(&dev_priv->irq_received);
39561
39562 master_ctl = I915_READ(GEN8_MASTER_IRQ);
39563 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
39564@@ -2624,7 +2624,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
39565 {
39566 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39567
39568- atomic_set(&dev_priv->irq_received, 0);
39569+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39570
39571 I915_WRITE(HWSTAM, 0xeffe);
39572
39573@@ -2642,7 +2642,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
39574 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39575 int pipe;
39576
39577- atomic_set(&dev_priv->irq_received, 0);
39578+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39579
39580 /* VLV magic */
39581 I915_WRITE(VLV_IMR, 0);
39582@@ -2673,7 +2673,7 @@ static void gen8_irq_preinstall(struct drm_device *dev)
39583 struct drm_i915_private *dev_priv = dev->dev_private;
39584 int pipe;
39585
39586- atomic_set(&dev_priv->irq_received, 0);
39587+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39588
39589 I915_WRITE(GEN8_MASTER_IRQ, 0);
39590 POSTING_READ(GEN8_MASTER_IRQ);
39591@@ -2999,7 +2999,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
39592 if (!dev_priv)
39593 return;
39594
39595- atomic_set(&dev_priv->irq_received, 0);
39596+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39597
39598 I915_WRITE(GEN8_MASTER_IRQ, 0);
39599
39600@@ -3093,7 +3093,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
39601 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39602 int pipe;
39603
39604- atomic_set(&dev_priv->irq_received, 0);
39605+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39606
39607 for_each_pipe(pipe)
39608 I915_WRITE(PIPESTAT(pipe), 0);
39609@@ -3179,7 +3179,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
39610 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
39611 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
39612
39613- atomic_inc(&dev_priv->irq_received);
39614+ atomic_inc_unchecked(&dev_priv->irq_received);
39615
39616 iir = I915_READ16(IIR);
39617 if (iir == 0)
39618@@ -3254,7 +3254,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
39619 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39620 int pipe;
39621
39622- atomic_set(&dev_priv->irq_received, 0);
39623+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39624
39625 if (I915_HAS_HOTPLUG(dev)) {
39626 I915_WRITE(PORT_HOTPLUG_EN, 0);
39627@@ -3361,7 +3361,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
39628 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
39629 int pipe, ret = IRQ_NONE;
39630
39631- atomic_inc(&dev_priv->irq_received);
39632+ atomic_inc_unchecked(&dev_priv->irq_received);
39633
39634 iir = I915_READ(IIR);
39635 do {
39636@@ -3488,7 +3488,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
39637 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39638 int pipe;
39639
39640- atomic_set(&dev_priv->irq_received, 0);
39641+ atomic_set_unchecked(&dev_priv->irq_received, 0);
39642
39643 I915_WRITE(PORT_HOTPLUG_EN, 0);
39644 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
39645@@ -3604,7 +3604,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
39646 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
39647 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
39648
39649- atomic_inc(&dev_priv->irq_received);
39650+ atomic_inc_unchecked(&dev_priv->irq_received);
39651
39652 iir = I915_READ(IIR);
39653
39654diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
39655index 2bde35d..529646c 100644
39656--- a/drivers/gpu/drm/i915/intel_display.c
39657+++ b/drivers/gpu/drm/i915/intel_display.c
39658@@ -10492,13 +10492,13 @@ struct intel_quirk {
39659 int subsystem_vendor;
39660 int subsystem_device;
39661 void (*hook)(struct drm_device *dev);
39662-};
39663+} __do_const;
39664
39665 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
39666 struct intel_dmi_quirk {
39667 void (*hook)(struct drm_device *dev);
39668 const struct dmi_system_id (*dmi_id_list)[];
39669-};
39670+} __do_const;
39671
39672 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
39673 {
39674@@ -10506,18 +10506,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
39675 return 1;
39676 }
39677
39678-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
39679+static const struct dmi_system_id intel_dmi_quirks_table[] = {
39680 {
39681- .dmi_id_list = &(const struct dmi_system_id[]) {
39682- {
39683- .callback = intel_dmi_reverse_brightness,
39684- .ident = "NCR Corporation",
39685- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
39686- DMI_MATCH(DMI_PRODUCT_NAME, ""),
39687- },
39688- },
39689- { } /* terminating entry */
39690+ .callback = intel_dmi_reverse_brightness,
39691+ .ident = "NCR Corporation",
39692+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
39693+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
39694 },
39695+ },
39696+ { } /* terminating entry */
39697+};
39698+
39699+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
39700+ {
39701+ .dmi_id_list = &intel_dmi_quirks_table,
39702 .hook = quirk_invert_brightness,
39703 },
39704 };
39705diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
39706index ca4bc54..ee598a2 100644
39707--- a/drivers/gpu/drm/mga/mga_drv.h
39708+++ b/drivers/gpu/drm/mga/mga_drv.h
39709@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
39710 u32 clear_cmd;
39711 u32 maccess;
39712
39713- atomic_t vbl_received; /**< Number of vblanks received. */
39714+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
39715 wait_queue_head_t fence_queue;
39716- atomic_t last_fence_retired;
39717+ atomic_unchecked_t last_fence_retired;
39718 u32 next_fence_to_post;
39719
39720 unsigned int fb_cpp;
39721diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
39722index 709e90d..89a1c0d 100644
39723--- a/drivers/gpu/drm/mga/mga_ioc32.c
39724+++ b/drivers/gpu/drm/mga/mga_ioc32.c
39725@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
39726 return 0;
39727 }
39728
39729-drm_ioctl_compat_t *mga_compat_ioctls[] = {
39730+drm_ioctl_compat_t mga_compat_ioctls[] = {
39731 [DRM_MGA_INIT] = compat_mga_init,
39732 [DRM_MGA_GETPARAM] = compat_mga_getparam,
39733 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
39734@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
39735 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39736 {
39737 unsigned int nr = DRM_IOCTL_NR(cmd);
39738- drm_ioctl_compat_t *fn = NULL;
39739 int ret;
39740
39741 if (nr < DRM_COMMAND_BASE)
39742 return drm_compat_ioctl(filp, cmd, arg);
39743
39744- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
39745- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
39746-
39747- if (fn != NULL)
39748+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
39749+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
39750 ret = (*fn) (filp, cmd, arg);
39751- else
39752+ } else
39753 ret = drm_ioctl(filp, cmd, arg);
39754
39755 return ret;
39756diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
39757index 2b0ceb8..517e99e 100644
39758--- a/drivers/gpu/drm/mga/mga_irq.c
39759+++ b/drivers/gpu/drm/mga/mga_irq.c
39760@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
39761 if (crtc != 0)
39762 return 0;
39763
39764- return atomic_read(&dev_priv->vbl_received);
39765+ return atomic_read_unchecked(&dev_priv->vbl_received);
39766 }
39767
39768
39769@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
39770 /* VBLANK interrupt */
39771 if (status & MGA_VLINEPEN) {
39772 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
39773- atomic_inc(&dev_priv->vbl_received);
39774+ atomic_inc_unchecked(&dev_priv->vbl_received);
39775 drm_handle_vblank(dev, 0);
39776 handled = 1;
39777 }
39778@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
39779 if ((prim_start & ~0x03) != (prim_end & ~0x03))
39780 MGA_WRITE(MGA_PRIMEND, prim_end);
39781
39782- atomic_inc(&dev_priv->last_fence_retired);
39783+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
39784 DRM_WAKEUP(&dev_priv->fence_queue);
39785 handled = 1;
39786 }
39787@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
39788 * using fences.
39789 */
39790 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
39791- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
39792+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
39793 - *sequence) <= (1 << 23)));
39794
39795 *sequence = cur_fence;
39796diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
39797index 4c3feaa..26391ce 100644
39798--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
39799+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
39800@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
39801 struct bit_table {
39802 const char id;
39803 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
39804-};
39805+} __no_const;
39806
39807 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
39808
39809diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
39810index 4b0fb6c..67667a9 100644
39811--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
39812+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
39813@@ -96,7 +96,6 @@ struct nouveau_drm {
39814 struct drm_global_reference mem_global_ref;
39815 struct ttm_bo_global_ref bo_global_ref;
39816 struct ttm_bo_device bdev;
39817- atomic_t validate_sequence;
39818 int (*move)(struct nouveau_channel *,
39819 struct ttm_buffer_object *,
39820 struct ttm_mem_reg *, struct ttm_mem_reg *);
39821diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
39822index c1a7e5a..38b8539 100644
39823--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
39824+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
39825@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
39826 unsigned long arg)
39827 {
39828 unsigned int nr = DRM_IOCTL_NR(cmd);
39829- drm_ioctl_compat_t *fn = NULL;
39830+ drm_ioctl_compat_t fn = NULL;
39831 int ret;
39832
39833 if (nr < DRM_COMMAND_BASE)
39834diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
39835index 19e3757..ad16478 100644
39836--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
39837+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
39838@@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
39839 }
39840
39841 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
39842- nouveau_vram_manager_init,
39843- nouveau_vram_manager_fini,
39844- nouveau_vram_manager_new,
39845- nouveau_vram_manager_del,
39846- nouveau_vram_manager_debug
39847+ .init = nouveau_vram_manager_init,
39848+ .takedown = nouveau_vram_manager_fini,
39849+ .get_node = nouveau_vram_manager_new,
39850+ .put_node = nouveau_vram_manager_del,
39851+ .debug = nouveau_vram_manager_debug
39852 };
39853
39854 static int
39855@@ -198,11 +198,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
39856 }
39857
39858 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
39859- nouveau_gart_manager_init,
39860- nouveau_gart_manager_fini,
39861- nouveau_gart_manager_new,
39862- nouveau_gart_manager_del,
39863- nouveau_gart_manager_debug
39864+ .init = nouveau_gart_manager_init,
39865+ .takedown = nouveau_gart_manager_fini,
39866+ .get_node = nouveau_gart_manager_new,
39867+ .put_node = nouveau_gart_manager_del,
39868+ .debug = nouveau_gart_manager_debug
39869 };
39870
39871 #include <core/subdev/vm/nv04.h>
39872@@ -270,11 +270,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
39873 }
39874
39875 const struct ttm_mem_type_manager_func nv04_gart_manager = {
39876- nv04_gart_manager_init,
39877- nv04_gart_manager_fini,
39878- nv04_gart_manager_new,
39879- nv04_gart_manager_del,
39880- nv04_gart_manager_debug
39881+ .init = nv04_gart_manager_init,
39882+ .takedown = nv04_gart_manager_fini,
39883+ .get_node = nv04_gart_manager_new,
39884+ .put_node = nv04_gart_manager_del,
39885+ .debug = nv04_gart_manager_debug
39886 };
39887
39888 int
39889diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
39890index 81638d7..2e45854 100644
39891--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
39892+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
39893@@ -65,7 +65,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
39894 bool can_switch;
39895
39896 spin_lock(&dev->count_lock);
39897- can_switch = (dev->open_count == 0);
39898+ can_switch = (local_read(&dev->open_count) == 0);
39899 spin_unlock(&dev->count_lock);
39900 return can_switch;
39901 }
39902diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
39903index eb89653..613cf71 100644
39904--- a/drivers/gpu/drm/qxl/qxl_cmd.c
39905+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
39906@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
39907 int ret;
39908
39909 mutex_lock(&qdev->async_io_mutex);
39910- irq_num = atomic_read(&qdev->irq_received_io_cmd);
39911+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
39912 if (qdev->last_sent_io_cmd > irq_num) {
39913 if (intr)
39914 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
39915- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39916+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39917 else
39918 ret = wait_event_timeout(qdev->io_cmd_event,
39919- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39920+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39921 /* 0 is timeout, just bail the "hw" has gone away */
39922 if (ret <= 0)
39923 goto out;
39924- irq_num = atomic_read(&qdev->irq_received_io_cmd);
39925+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
39926 }
39927 outb(val, addr);
39928 qdev->last_sent_io_cmd = irq_num + 1;
39929 if (intr)
39930 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
39931- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39932+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39933 else
39934 ret = wait_event_timeout(qdev->io_cmd_event,
39935- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39936+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39937 out:
39938 if (ret > 0)
39939 ret = 0;
39940diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
39941index c3c2bbd..bc3c0fb 100644
39942--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
39943+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
39944@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
39945 struct drm_info_node *node = (struct drm_info_node *) m->private;
39946 struct qxl_device *qdev = node->minor->dev->dev_private;
39947
39948- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
39949- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
39950- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
39951- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
39952+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
39953+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
39954+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
39955+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
39956 seq_printf(m, "%d\n", qdev->irq_received_error);
39957 return 0;
39958 }
39959diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
39960index 7bda32f..dd98fc5 100644
39961--- a/drivers/gpu/drm/qxl/qxl_drv.h
39962+++ b/drivers/gpu/drm/qxl/qxl_drv.h
39963@@ -290,10 +290,10 @@ struct qxl_device {
39964 unsigned int last_sent_io_cmd;
39965
39966 /* interrupt handling */
39967- atomic_t irq_received;
39968- atomic_t irq_received_display;
39969- atomic_t irq_received_cursor;
39970- atomic_t irq_received_io_cmd;
39971+ atomic_unchecked_t irq_received;
39972+ atomic_unchecked_t irq_received_display;
39973+ atomic_unchecked_t irq_received_cursor;
39974+ atomic_unchecked_t irq_received_io_cmd;
39975 unsigned irq_received_error;
39976 wait_queue_head_t display_event;
39977 wait_queue_head_t cursor_event;
39978diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
39979index 7b95c75..9cffb4f 100644
39980--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
39981+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
39982@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
39983
39984 /* TODO copy slow path code from i915 */
39985 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
39986- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
39987+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
39988
39989 {
39990 struct qxl_drawable *draw = fb_cmd;
39991@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
39992 struct drm_qxl_reloc reloc;
39993
39994 if (DRM_COPY_FROM_USER(&reloc,
39995- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
39996+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
39997 sizeof(reloc))) {
39998 ret = -EFAULT;
39999 goto out_free_bos;
40000@@ -297,7 +297,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
40001 struct drm_qxl_command *commands =
40002 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
40003
40004- if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
40005+ if (DRM_COPY_FROM_USER(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
40006 sizeof(user_cmd)))
40007 return -EFAULT;
40008
40009diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
40010index 21393dc..329f3a9 100644
40011--- a/drivers/gpu/drm/qxl/qxl_irq.c
40012+++ b/drivers/gpu/drm/qxl/qxl_irq.c
40013@@ -33,19 +33,19 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
40014
40015 pending = xchg(&qdev->ram_header->int_pending, 0);
40016
40017- atomic_inc(&qdev->irq_received);
40018+ atomic_inc_unchecked(&qdev->irq_received);
40019
40020 if (pending & QXL_INTERRUPT_DISPLAY) {
40021- atomic_inc(&qdev->irq_received_display);
40022+ atomic_inc_unchecked(&qdev->irq_received_display);
40023 wake_up_all(&qdev->display_event);
40024 qxl_queue_garbage_collect(qdev, false);
40025 }
40026 if (pending & QXL_INTERRUPT_CURSOR) {
40027- atomic_inc(&qdev->irq_received_cursor);
40028+ atomic_inc_unchecked(&qdev->irq_received_cursor);
40029 wake_up_all(&qdev->cursor_event);
40030 }
40031 if (pending & QXL_INTERRUPT_IO_CMD) {
40032- atomic_inc(&qdev->irq_received_io_cmd);
40033+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
40034 wake_up_all(&qdev->io_cmd_event);
40035 }
40036 if (pending & QXL_INTERRUPT_ERROR) {
40037@@ -82,10 +82,10 @@ int qxl_irq_init(struct qxl_device *qdev)
40038 init_waitqueue_head(&qdev->io_cmd_event);
40039 INIT_WORK(&qdev->client_monitors_config_work,
40040 qxl_client_monitors_config_work_func);
40041- atomic_set(&qdev->irq_received, 0);
40042- atomic_set(&qdev->irq_received_display, 0);
40043- atomic_set(&qdev->irq_received_cursor, 0);
40044- atomic_set(&qdev->irq_received_io_cmd, 0);
40045+ atomic_set_unchecked(&qdev->irq_received, 0);
40046+ atomic_set_unchecked(&qdev->irq_received_display, 0);
40047+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
40048+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
40049 qdev->irq_received_error = 0;
40050 ret = drm_irq_install(qdev->ddev);
40051 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
40052diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
40053index c7e7e65..7dddd4d 100644
40054--- a/drivers/gpu/drm/qxl/qxl_ttm.c
40055+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
40056@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
40057 }
40058 }
40059
40060-static struct vm_operations_struct qxl_ttm_vm_ops;
40061+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
40062 static const struct vm_operations_struct *ttm_vm_ops;
40063
40064 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40065@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
40066 return r;
40067 if (unlikely(ttm_vm_ops == NULL)) {
40068 ttm_vm_ops = vma->vm_ops;
40069+ pax_open_kernel();
40070 qxl_ttm_vm_ops = *ttm_vm_ops;
40071 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
40072+ pax_close_kernel();
40073 }
40074 vma->vm_ops = &qxl_ttm_vm_ops;
40075 return 0;
40076@@ -560,25 +562,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
40077 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
40078 {
40079 #if defined(CONFIG_DEBUG_FS)
40080- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
40081- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
40082- unsigned i;
40083+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
40084+ {
40085+ .name = "qxl_mem_mm",
40086+ .show = &qxl_mm_dump_table,
40087+ },
40088+ {
40089+ .name = "qxl_surf_mm",
40090+ .show = &qxl_mm_dump_table,
40091+ }
40092+ };
40093
40094- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
40095- if (i == 0)
40096- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
40097- else
40098- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
40099- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
40100- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
40101- qxl_mem_types_list[i].driver_features = 0;
40102- if (i == 0)
40103- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
40104- else
40105- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
40106+ pax_open_kernel();
40107+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
40108+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
40109+ pax_close_kernel();
40110
40111- }
40112- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
40113+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
40114 #else
40115 return 0;
40116 #endif
40117diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
40118index c451257..0ad2134 100644
40119--- a/drivers/gpu/drm/r128/r128_cce.c
40120+++ b/drivers/gpu/drm/r128/r128_cce.c
40121@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
40122
40123 /* GH: Simple idle check.
40124 */
40125- atomic_set(&dev_priv->idle_count, 0);
40126+ atomic_set_unchecked(&dev_priv->idle_count, 0);
40127
40128 /* We don't support anything other than bus-mastering ring mode,
40129 * but the ring can be in either AGP or PCI space for the ring
40130diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
40131index 56eb5e3..c4ec43d 100644
40132--- a/drivers/gpu/drm/r128/r128_drv.h
40133+++ b/drivers/gpu/drm/r128/r128_drv.h
40134@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
40135 int is_pci;
40136 unsigned long cce_buffers_offset;
40137
40138- atomic_t idle_count;
40139+ atomic_unchecked_t idle_count;
40140
40141 int page_flipping;
40142 int current_page;
40143 u32 crtc_offset;
40144 u32 crtc_offset_cntl;
40145
40146- atomic_t vbl_received;
40147+ atomic_unchecked_t vbl_received;
40148
40149 u32 color_fmt;
40150 unsigned int front_offset;
40151diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
40152index a954c54..9cc595c 100644
40153--- a/drivers/gpu/drm/r128/r128_ioc32.c
40154+++ b/drivers/gpu/drm/r128/r128_ioc32.c
40155@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
40156 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
40157 }
40158
40159-drm_ioctl_compat_t *r128_compat_ioctls[] = {
40160+drm_ioctl_compat_t r128_compat_ioctls[] = {
40161 [DRM_R128_INIT] = compat_r128_init,
40162 [DRM_R128_DEPTH] = compat_r128_depth,
40163 [DRM_R128_STIPPLE] = compat_r128_stipple,
40164@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
40165 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40166 {
40167 unsigned int nr = DRM_IOCTL_NR(cmd);
40168- drm_ioctl_compat_t *fn = NULL;
40169 int ret;
40170
40171 if (nr < DRM_COMMAND_BASE)
40172 return drm_compat_ioctl(filp, cmd, arg);
40173
40174- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
40175- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
40176-
40177- if (fn != NULL)
40178+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
40179+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
40180 ret = (*fn) (filp, cmd, arg);
40181- else
40182+ } else
40183 ret = drm_ioctl(filp, cmd, arg);
40184
40185 return ret;
40186diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
40187index 2ea4f09..d391371 100644
40188--- a/drivers/gpu/drm/r128/r128_irq.c
40189+++ b/drivers/gpu/drm/r128/r128_irq.c
40190@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
40191 if (crtc != 0)
40192 return 0;
40193
40194- return atomic_read(&dev_priv->vbl_received);
40195+ return atomic_read_unchecked(&dev_priv->vbl_received);
40196 }
40197
40198 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
40199@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
40200 /* VBLANK interrupt */
40201 if (status & R128_CRTC_VBLANK_INT) {
40202 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
40203- atomic_inc(&dev_priv->vbl_received);
40204+ atomic_inc_unchecked(&dev_priv->vbl_received);
40205 drm_handle_vblank(dev, 0);
40206 return IRQ_HANDLED;
40207 }
40208diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
40209index 01dd9ae..6352f04 100644
40210--- a/drivers/gpu/drm/r128/r128_state.c
40211+++ b/drivers/gpu/drm/r128/r128_state.c
40212@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
40213
40214 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
40215 {
40216- if (atomic_read(&dev_priv->idle_count) == 0)
40217+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
40218 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
40219 else
40220- atomic_set(&dev_priv->idle_count, 0);
40221+ atomic_set_unchecked(&dev_priv->idle_count, 0);
40222 }
40223
40224 #endif
40225diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
40226index af85299..ed9ac8d 100644
40227--- a/drivers/gpu/drm/radeon/mkregtable.c
40228+++ b/drivers/gpu/drm/radeon/mkregtable.c
40229@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
40230 regex_t mask_rex;
40231 regmatch_t match[4];
40232 char buf[1024];
40233- size_t end;
40234+ long end;
40235 int len;
40236 int done = 0;
40237 int r;
40238 unsigned o;
40239 struct offset *offset;
40240 char last_reg_s[10];
40241- int last_reg;
40242+ unsigned long last_reg;
40243
40244 if (regcomp
40245 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
40246diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
40247index 39b033b..6efc056 100644
40248--- a/drivers/gpu/drm/radeon/radeon_device.c
40249+++ b/drivers/gpu/drm/radeon/radeon_device.c
40250@@ -1120,7 +1120,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
40251 bool can_switch;
40252
40253 spin_lock(&dev->count_lock);
40254- can_switch = (dev->open_count == 0);
40255+ can_switch = (local_read(&dev->open_count) == 0);
40256 spin_unlock(&dev->count_lock);
40257 return can_switch;
40258 }
40259diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
40260index 00e0d44..08381a4 100644
40261--- a/drivers/gpu/drm/radeon/radeon_drv.h
40262+++ b/drivers/gpu/drm/radeon/radeon_drv.h
40263@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
40264
40265 /* SW interrupt */
40266 wait_queue_head_t swi_queue;
40267- atomic_t swi_emitted;
40268+ atomic_unchecked_t swi_emitted;
40269 int vblank_crtc;
40270 uint32_t irq_enable_reg;
40271 uint32_t r500_disp_irq_reg;
40272diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
40273index bdb0f93..5ff558f 100644
40274--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
40275+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
40276@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
40277 request = compat_alloc_user_space(sizeof(*request));
40278 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
40279 || __put_user(req32.param, &request->param)
40280- || __put_user((void __user *)(unsigned long)req32.value,
40281+ || __put_user((unsigned long)req32.value,
40282 &request->value))
40283 return -EFAULT;
40284
40285@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
40286 #define compat_radeon_cp_setparam NULL
40287 #endif /* X86_64 || IA64 */
40288
40289-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
40290+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
40291 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
40292 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
40293 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
40294@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
40295 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40296 {
40297 unsigned int nr = DRM_IOCTL_NR(cmd);
40298- drm_ioctl_compat_t *fn = NULL;
40299 int ret;
40300
40301 if (nr < DRM_COMMAND_BASE)
40302 return drm_compat_ioctl(filp, cmd, arg);
40303
40304- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
40305- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
40306-
40307- if (fn != NULL)
40308+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
40309+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
40310 ret = (*fn) (filp, cmd, arg);
40311- else
40312+ } else
40313 ret = drm_ioctl(filp, cmd, arg);
40314
40315 return ret;
40316diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
40317index 8d68e97..9dcfed8 100644
40318--- a/drivers/gpu/drm/radeon/radeon_irq.c
40319+++ b/drivers/gpu/drm/radeon/radeon_irq.c
40320@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
40321 unsigned int ret;
40322 RING_LOCALS;
40323
40324- atomic_inc(&dev_priv->swi_emitted);
40325- ret = atomic_read(&dev_priv->swi_emitted);
40326+ atomic_inc_unchecked(&dev_priv->swi_emitted);
40327+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
40328
40329 BEGIN_RING(4);
40330 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
40331@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
40332 drm_radeon_private_t *dev_priv =
40333 (drm_radeon_private_t *) dev->dev_private;
40334
40335- atomic_set(&dev_priv->swi_emitted, 0);
40336+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
40337 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
40338
40339 dev->max_vblank_count = 0x001fffff;
40340diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
40341index 4d20910..6726b6d 100644
40342--- a/drivers/gpu/drm/radeon/radeon_state.c
40343+++ b/drivers/gpu/drm/radeon/radeon_state.c
40344@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
40345 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
40346 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
40347
40348- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
40349+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
40350 sarea_priv->nbox * sizeof(depth_boxes[0])))
40351 return -EFAULT;
40352
40353@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
40354 {
40355 drm_radeon_private_t *dev_priv = dev->dev_private;
40356 drm_radeon_getparam_t *param = data;
40357- int value;
40358+ int value = 0;
40359
40360 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
40361
40362diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
40363index 71245d6..94c556d 100644
40364--- a/drivers/gpu/drm/radeon/radeon_ttm.c
40365+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
40366@@ -784,7 +784,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
40367 man->size = size >> PAGE_SHIFT;
40368 }
40369
40370-static struct vm_operations_struct radeon_ttm_vm_ops;
40371+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
40372 static const struct vm_operations_struct *ttm_vm_ops = NULL;
40373
40374 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40375@@ -825,8 +825,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
40376 }
40377 if (unlikely(ttm_vm_ops == NULL)) {
40378 ttm_vm_ops = vma->vm_ops;
40379+ pax_open_kernel();
40380 radeon_ttm_vm_ops = *ttm_vm_ops;
40381 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
40382+ pax_close_kernel();
40383 }
40384 vma->vm_ops = &radeon_ttm_vm_ops;
40385 return 0;
40386@@ -855,38 +857,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
40387 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
40388 {
40389 #if defined(CONFIG_DEBUG_FS)
40390- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
40391- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
40392- unsigned i;
40393+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
40394+ {
40395+ .name = "radeon_vram_mm",
40396+ .show = &radeon_mm_dump_table,
40397+ },
40398+ {
40399+ .name = "radeon_gtt_mm",
40400+ .show = &radeon_mm_dump_table,
40401+ },
40402+ {
40403+ .name = "ttm_page_pool",
40404+ .show = &ttm_page_alloc_debugfs,
40405+ },
40406+ {
40407+ .name = "ttm_dma_page_pool",
40408+ .show = &ttm_dma_page_alloc_debugfs,
40409+ },
40410+ };
40411+ unsigned i = RADEON_DEBUGFS_MEM_TYPES + 1;
40412
40413- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
40414- if (i == 0)
40415- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
40416- else
40417- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
40418- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
40419- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
40420- radeon_mem_types_list[i].driver_features = 0;
40421- if (i == 0)
40422- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
40423- else
40424- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
40425-
40426- }
40427- /* Add ttm page pool to debugfs */
40428- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
40429- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
40430- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
40431- radeon_mem_types_list[i].driver_features = 0;
40432- radeon_mem_types_list[i++].data = NULL;
40433+ pax_open_kernel();
40434+ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
40435+ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
40436+ pax_close_kernel();
40437 #ifdef CONFIG_SWIOTLB
40438- if (swiotlb_nr_tbl()) {
40439- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
40440- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
40441- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
40442- radeon_mem_types_list[i].driver_features = 0;
40443- radeon_mem_types_list[i++].data = NULL;
40444- }
40445+ if (swiotlb_nr_tbl())
40446+ i++;
40447 #endif
40448 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
40449
40450diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
40451index ae1cb31..5b5b6b7c 100644
40452--- a/drivers/gpu/drm/tegra/dc.c
40453+++ b/drivers/gpu/drm/tegra/dc.c
40454@@ -1064,7 +1064,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
40455 }
40456
40457 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
40458- dc->debugfs_files[i].data = dc;
40459+ *(void **)&dc->debugfs_files[i].data = dc;
40460
40461 err = drm_debugfs_create_files(dc->debugfs_files,
40462 ARRAY_SIZE(debugfs_files),
40463diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
40464index 0cd9bc2..9759be4 100644
40465--- a/drivers/gpu/drm/tegra/hdmi.c
40466+++ b/drivers/gpu/drm/tegra/hdmi.c
40467@@ -57,7 +57,7 @@ struct tegra_hdmi {
40468 bool stereo;
40469 bool dvi;
40470
40471- struct drm_info_list *debugfs_files;
40472+ drm_info_list_no_const *debugfs_files;
40473 struct drm_minor *minor;
40474 struct dentry *debugfs;
40475 };
40476diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
40477index c58eba33..83c2728 100644
40478--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
40479+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
40480@@ -141,10 +141,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
40481 }
40482
40483 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
40484- ttm_bo_man_init,
40485- ttm_bo_man_takedown,
40486- ttm_bo_man_get_node,
40487- ttm_bo_man_put_node,
40488- ttm_bo_man_debug
40489+ .init = ttm_bo_man_init,
40490+ .takedown = ttm_bo_man_takedown,
40491+ .get_node = ttm_bo_man_get_node,
40492+ .put_node = ttm_bo_man_put_node,
40493+ .debug = ttm_bo_man_debug
40494 };
40495 EXPORT_SYMBOL(ttm_bo_manager_func);
40496diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
40497index dbc2def..0a9f710 100644
40498--- a/drivers/gpu/drm/ttm/ttm_memory.c
40499+++ b/drivers/gpu/drm/ttm/ttm_memory.c
40500@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
40501 zone->glob = glob;
40502 glob->zone_kernel = zone;
40503 ret = kobject_init_and_add(
40504- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
40505+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
40506 if (unlikely(ret != 0)) {
40507 kobject_put(&zone->kobj);
40508 return ret;
40509@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
40510 zone->glob = glob;
40511 glob->zone_dma32 = zone;
40512 ret = kobject_init_and_add(
40513- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
40514+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
40515 if (unlikely(ret != 0)) {
40516 kobject_put(&zone->kobj);
40517 return ret;
40518diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
40519index 863bef9..cba15cf 100644
40520--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
40521+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
40522@@ -391,9 +391,9 @@ out:
40523 static unsigned long
40524 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
40525 {
40526- static atomic_t start_pool = ATOMIC_INIT(0);
40527+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
40528 unsigned i;
40529- unsigned pool_offset = atomic_add_return(1, &start_pool);
40530+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
40531 struct ttm_page_pool *pool;
40532 int shrink_pages = sc->nr_to_scan;
40533 unsigned long freed = 0;
40534diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
40535index 97e9d61..bf23c461 100644
40536--- a/drivers/gpu/drm/udl/udl_fb.c
40537+++ b/drivers/gpu/drm/udl/udl_fb.c
40538@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
40539 fb_deferred_io_cleanup(info);
40540 kfree(info->fbdefio);
40541 info->fbdefio = NULL;
40542- info->fbops->fb_mmap = udl_fb_mmap;
40543 }
40544
40545 pr_warn("released /dev/fb%d user=%d count=%d\n",
40546diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
40547index a811ef2..ff99b05 100644
40548--- a/drivers/gpu/drm/via/via_drv.h
40549+++ b/drivers/gpu/drm/via/via_drv.h
40550@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
40551 typedef uint32_t maskarray_t[5];
40552
40553 typedef struct drm_via_irq {
40554- atomic_t irq_received;
40555+ atomic_unchecked_t irq_received;
40556 uint32_t pending_mask;
40557 uint32_t enable_mask;
40558 wait_queue_head_t irq_queue;
40559@@ -75,7 +75,7 @@ typedef struct drm_via_private {
40560 struct timeval last_vblank;
40561 int last_vblank_valid;
40562 unsigned usec_per_vblank;
40563- atomic_t vbl_received;
40564+ atomic_unchecked_t vbl_received;
40565 drm_via_state_t hc_state;
40566 char pci_buf[VIA_PCI_BUF_SIZE];
40567 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
40568diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
40569index ac98964..5dbf512 100644
40570--- a/drivers/gpu/drm/via/via_irq.c
40571+++ b/drivers/gpu/drm/via/via_irq.c
40572@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
40573 if (crtc != 0)
40574 return 0;
40575
40576- return atomic_read(&dev_priv->vbl_received);
40577+ return atomic_read_unchecked(&dev_priv->vbl_received);
40578 }
40579
40580 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
40581@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
40582
40583 status = VIA_READ(VIA_REG_INTERRUPT);
40584 if (status & VIA_IRQ_VBLANK_PENDING) {
40585- atomic_inc(&dev_priv->vbl_received);
40586- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
40587+ atomic_inc_unchecked(&dev_priv->vbl_received);
40588+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
40589 do_gettimeofday(&cur_vblank);
40590 if (dev_priv->last_vblank_valid) {
40591 dev_priv->usec_per_vblank =
40592@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
40593 dev_priv->last_vblank = cur_vblank;
40594 dev_priv->last_vblank_valid = 1;
40595 }
40596- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
40597+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
40598 DRM_DEBUG("US per vblank is: %u\n",
40599 dev_priv->usec_per_vblank);
40600 }
40601@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
40602
40603 for (i = 0; i < dev_priv->num_irqs; ++i) {
40604 if (status & cur_irq->pending_mask) {
40605- atomic_inc(&cur_irq->irq_received);
40606+ atomic_inc_unchecked(&cur_irq->irq_received);
40607 DRM_WAKEUP(&cur_irq->irq_queue);
40608 handled = 1;
40609 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
40610@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
40611 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
40612 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
40613 masks[irq][4]));
40614- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
40615+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
40616 } else {
40617 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
40618 (((cur_irq_sequence =
40619- atomic_read(&cur_irq->irq_received)) -
40620+ atomic_read_unchecked(&cur_irq->irq_received)) -
40621 *sequence) <= (1 << 23)));
40622 }
40623 *sequence = cur_irq_sequence;
40624@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
40625 }
40626
40627 for (i = 0; i < dev_priv->num_irqs; ++i) {
40628- atomic_set(&cur_irq->irq_received, 0);
40629+ atomic_set_unchecked(&cur_irq->irq_received, 0);
40630 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
40631 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
40632 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
40633@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
40634 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
40635 case VIA_IRQ_RELATIVE:
40636 irqwait->request.sequence +=
40637- atomic_read(&cur_irq->irq_received);
40638+ atomic_read_unchecked(&cur_irq->irq_received);
40639 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
40640 case VIA_IRQ_ABSOLUTE:
40641 break;
40642diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
40643index 20890ad..699e4f2 100644
40644--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
40645+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
40646@@ -342,7 +342,7 @@ struct vmw_private {
40647 * Fencing and IRQs.
40648 */
40649
40650- atomic_t marker_seq;
40651+ atomic_unchecked_t marker_seq;
40652 wait_queue_head_t fence_queue;
40653 wait_queue_head_t fifo_queue;
40654 int fence_queue_waiters; /* Protected by hw_mutex */
40655diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
40656index 3eb1486..0a47ee9 100644
40657--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
40658+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
40659@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
40660 (unsigned int) min,
40661 (unsigned int) fifo->capabilities);
40662
40663- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
40664+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
40665 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
40666 vmw_marker_queue_init(&fifo->marker_queue);
40667 return vmw_fifo_send_fence(dev_priv, &dummy);
40668@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
40669 if (reserveable)
40670 iowrite32(bytes, fifo_mem +
40671 SVGA_FIFO_RESERVED);
40672- return fifo_mem + (next_cmd >> 2);
40673+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
40674 } else {
40675 need_bounce = true;
40676 }
40677@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
40678
40679 fm = vmw_fifo_reserve(dev_priv, bytes);
40680 if (unlikely(fm == NULL)) {
40681- *seqno = atomic_read(&dev_priv->marker_seq);
40682+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
40683 ret = -ENOMEM;
40684 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
40685 false, 3*HZ);
40686@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
40687 }
40688
40689 do {
40690- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
40691+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
40692 } while (*seqno == 0);
40693
40694 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
40695diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
40696index c5c054a..46f0548 100644
40697--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
40698+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
40699@@ -153,9 +153,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
40700 }
40701
40702 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
40703- vmw_gmrid_man_init,
40704- vmw_gmrid_man_takedown,
40705- vmw_gmrid_man_get_node,
40706- vmw_gmrid_man_put_node,
40707- vmw_gmrid_man_debug
40708+ .init = vmw_gmrid_man_init,
40709+ .takedown = vmw_gmrid_man_takedown,
40710+ .get_node = vmw_gmrid_man_get_node,
40711+ .put_node = vmw_gmrid_man_put_node,
40712+ .debug = vmw_gmrid_man_debug
40713 };
40714diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
40715index 45d5b5a..f3f5e4e 100644
40716--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
40717+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
40718@@ -141,7 +141,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
40719 int ret;
40720
40721 num_clips = arg->num_clips;
40722- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
40723+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
40724
40725 if (unlikely(num_clips == 0))
40726 return 0;
40727@@ -225,7 +225,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
40728 int ret;
40729
40730 num_clips = arg->num_clips;
40731- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
40732+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
40733
40734 if (unlikely(num_clips == 0))
40735 return 0;
40736diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
40737index 4640adb..e1384ed 100644
40738--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
40739+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
40740@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
40741 * emitted. Then the fence is stale and signaled.
40742 */
40743
40744- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
40745+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
40746 > VMW_FENCE_WRAP);
40747
40748 return ret;
40749@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
40750
40751 if (fifo_idle)
40752 down_read(&fifo_state->rwsem);
40753- signal_seq = atomic_read(&dev_priv->marker_seq);
40754+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
40755 ret = 0;
40756
40757 for (;;) {
40758diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
40759index 8a8725c2..afed796 100644
40760--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
40761+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
40762@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
40763 while (!vmw_lag_lt(queue, us)) {
40764 spin_lock(&queue->lock);
40765 if (list_empty(&queue->head))
40766- seqno = atomic_read(&dev_priv->marker_seq);
40767+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
40768 else {
40769 marker = list_first_entry(&queue->head,
40770 struct vmw_marker, head);
40771diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
40772index ec0ae2d..dc0780b 100644
40773--- a/drivers/gpu/vga/vga_switcheroo.c
40774+++ b/drivers/gpu/vga/vga_switcheroo.c
40775@@ -643,7 +643,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
40776
40777 /* this version is for the case where the power switch is separate
40778 to the device being powered down. */
40779-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
40780+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
40781 {
40782 /* copy over all the bus versions */
40783 if (dev->bus && dev->bus->pm) {
40784@@ -688,7 +688,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
40785 return ret;
40786 }
40787
40788-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
40789+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
40790 {
40791 /* copy over all the bus versions */
40792 if (dev->bus && dev->bus->pm) {
40793diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
40794index 253fe23..0dfec5f 100644
40795--- a/drivers/hid/hid-core.c
40796+++ b/drivers/hid/hid-core.c
40797@@ -2416,7 +2416,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
40798
40799 int hid_add_device(struct hid_device *hdev)
40800 {
40801- static atomic_t id = ATOMIC_INIT(0);
40802+ static atomic_unchecked_t id = ATOMIC_INIT(0);
40803 int ret;
40804
40805 if (WARN_ON(hdev->status & HID_STAT_ADDED))
40806@@ -2450,7 +2450,7 @@ int hid_add_device(struct hid_device *hdev)
40807 /* XXX hack, any other cleaner solution after the driver core
40808 * is converted to allow more than 20 bytes as the device name? */
40809 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
40810- hdev->vendor, hdev->product, atomic_inc_return(&id));
40811+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
40812
40813 hid_debug_register(hdev, dev_name(&hdev->dev));
40814 ret = device_add(&hdev->dev);
40815diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
40816index c13fb5b..55a3802 100644
40817--- a/drivers/hid/hid-wiimote-debug.c
40818+++ b/drivers/hid/hid-wiimote-debug.c
40819@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
40820 else if (size == 0)
40821 return -EIO;
40822
40823- if (copy_to_user(u, buf, size))
40824+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
40825 return -EFAULT;
40826
40827 *off += size;
40828diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
40829index cedc6da..2c3da2a 100644
40830--- a/drivers/hid/uhid.c
40831+++ b/drivers/hid/uhid.c
40832@@ -47,7 +47,7 @@ struct uhid_device {
40833 struct mutex report_lock;
40834 wait_queue_head_t report_wait;
40835 atomic_t report_done;
40836- atomic_t report_id;
40837+ atomic_unchecked_t report_id;
40838 struct uhid_event report_buf;
40839 };
40840
40841@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
40842
40843 spin_lock_irqsave(&uhid->qlock, flags);
40844 ev->type = UHID_FEATURE;
40845- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
40846+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
40847 ev->u.feature.rnum = rnum;
40848 ev->u.feature.rtype = report_type;
40849
40850@@ -446,7 +446,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
40851 spin_lock_irqsave(&uhid->qlock, flags);
40852
40853 /* id for old report; drop it silently */
40854- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
40855+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
40856 goto unlock;
40857 if (atomic_read(&uhid->report_done))
40858 goto unlock;
40859diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
40860index cea623c..73011b0 100644
40861--- a/drivers/hv/channel.c
40862+++ b/drivers/hv/channel.c
40863@@ -362,8 +362,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
40864 int ret = 0;
40865 int t;
40866
40867- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
40868- atomic_inc(&vmbus_connection.next_gpadl_handle);
40869+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
40870+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
40871
40872 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
40873 if (ret)
40874diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
40875index f0c5e07..399256e 100644
40876--- a/drivers/hv/hv.c
40877+++ b/drivers/hv/hv.c
40878@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
40879 u64 output_address = (output) ? virt_to_phys(output) : 0;
40880 u32 output_address_hi = output_address >> 32;
40881 u32 output_address_lo = output_address & 0xFFFFFFFF;
40882- void *hypercall_page = hv_context.hypercall_page;
40883+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
40884
40885 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
40886 "=a"(hv_status_lo) : "d" (control_hi),
40887diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
40888index 7e17a54..a50a33d 100644
40889--- a/drivers/hv/hv_balloon.c
40890+++ b/drivers/hv/hv_balloon.c
40891@@ -464,7 +464,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
40892
40893 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
40894 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
40895-static atomic_t trans_id = ATOMIC_INIT(0);
40896+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
40897
40898 static int dm_ring_size = (5 * PAGE_SIZE);
40899
40900@@ -886,7 +886,7 @@ static void hot_add_req(struct work_struct *dummy)
40901 pr_info("Memory hot add failed\n");
40902
40903 dm->state = DM_INITIALIZED;
40904- resp.hdr.trans_id = atomic_inc_return(&trans_id);
40905+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40906 vmbus_sendpacket(dm->dev->channel, &resp,
40907 sizeof(struct dm_hot_add_response),
40908 (unsigned long)NULL,
40909@@ -960,7 +960,7 @@ static void post_status(struct hv_dynmem_device *dm)
40910 memset(&status, 0, sizeof(struct dm_status));
40911 status.hdr.type = DM_STATUS_REPORT;
40912 status.hdr.size = sizeof(struct dm_status);
40913- status.hdr.trans_id = atomic_inc_return(&trans_id);
40914+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40915
40916 /*
40917 * The host expects the guest to report free memory.
40918@@ -980,7 +980,7 @@ static void post_status(struct hv_dynmem_device *dm)
40919 * send the status. This can happen if we were interrupted
40920 * after we picked our transaction ID.
40921 */
40922- if (status.hdr.trans_id != atomic_read(&trans_id))
40923+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
40924 return;
40925
40926 vmbus_sendpacket(dm->dev->channel, &status,
40927@@ -1108,7 +1108,7 @@ static void balloon_up(struct work_struct *dummy)
40928 */
40929
40930 do {
40931- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
40932+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40933 ret = vmbus_sendpacket(dm_device.dev->channel,
40934 bl_resp,
40935 bl_resp->hdr.size,
40936@@ -1152,7 +1152,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
40937
40938 memset(&resp, 0, sizeof(struct dm_unballoon_response));
40939 resp.hdr.type = DM_UNBALLOON_RESPONSE;
40940- resp.hdr.trans_id = atomic_inc_return(&trans_id);
40941+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40942 resp.hdr.size = sizeof(struct dm_unballoon_response);
40943
40944 vmbus_sendpacket(dm_device.dev->channel, &resp,
40945@@ -1215,7 +1215,7 @@ static void version_resp(struct hv_dynmem_device *dm,
40946 memset(&version_req, 0, sizeof(struct dm_version_request));
40947 version_req.hdr.type = DM_VERSION_REQUEST;
40948 version_req.hdr.size = sizeof(struct dm_version_request);
40949- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
40950+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40951 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
40952 version_req.is_last_attempt = 1;
40953
40954@@ -1385,7 +1385,7 @@ static int balloon_probe(struct hv_device *dev,
40955 memset(&version_req, 0, sizeof(struct dm_version_request));
40956 version_req.hdr.type = DM_VERSION_REQUEST;
40957 version_req.hdr.size = sizeof(struct dm_version_request);
40958- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
40959+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40960 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
40961 version_req.is_last_attempt = 0;
40962
40963@@ -1416,7 +1416,7 @@ static int balloon_probe(struct hv_device *dev,
40964 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
40965 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
40966 cap_msg.hdr.size = sizeof(struct dm_capabilities);
40967- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
40968+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40969
40970 cap_msg.caps.cap_bits.balloon = 1;
40971 cap_msg.caps.cap_bits.hot_add = 1;
40972diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
40973index e055176..c22ff1f 100644
40974--- a/drivers/hv/hyperv_vmbus.h
40975+++ b/drivers/hv/hyperv_vmbus.h
40976@@ -602,7 +602,7 @@ enum vmbus_connect_state {
40977 struct vmbus_connection {
40978 enum vmbus_connect_state conn_state;
40979
40980- atomic_t next_gpadl_handle;
40981+ atomic_unchecked_t next_gpadl_handle;
40982
40983 /*
40984 * Represents channel interrupts. Each bit position represents a
40985diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
40986index 48aad4f..c768fb2 100644
40987--- a/drivers/hv/vmbus_drv.c
40988+++ b/drivers/hv/vmbus_drv.c
40989@@ -846,10 +846,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
40990 {
40991 int ret = 0;
40992
40993- static atomic_t device_num = ATOMIC_INIT(0);
40994+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
40995
40996 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
40997- atomic_inc_return(&device_num));
40998+ atomic_inc_return_unchecked(&device_num));
40999
41000 child_device_obj->device.bus = &hv_bus;
41001 child_device_obj->device.parent = &hv_acpi_dev->dev;
41002diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
41003index 6a34f7f..aa4c3a6 100644
41004--- a/drivers/hwmon/acpi_power_meter.c
41005+++ b/drivers/hwmon/acpi_power_meter.c
41006@@ -117,7 +117,7 @@ struct sensor_template {
41007 struct device_attribute *devattr,
41008 const char *buf, size_t count);
41009 int index;
41010-};
41011+} __do_const;
41012
41013 /* Averaging interval */
41014 static int update_avg_interval(struct acpi_power_meter_resource *resource)
41015@@ -632,7 +632,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
41016 struct sensor_template *attrs)
41017 {
41018 struct device *dev = &resource->acpi_dev->dev;
41019- struct sensor_device_attribute *sensors =
41020+ sensor_device_attribute_no_const *sensors =
41021 &resource->sensors[resource->num_sensors];
41022 int res = 0;
41023
41024diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
41025index 3288f13..71cfb4e 100644
41026--- a/drivers/hwmon/applesmc.c
41027+++ b/drivers/hwmon/applesmc.c
41028@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
41029 {
41030 struct applesmc_node_group *grp;
41031 struct applesmc_dev_attr *node;
41032- struct attribute *attr;
41033+ attribute_no_const *attr;
41034 int ret, i;
41035
41036 for (grp = groups; grp->format; grp++) {
41037diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
41038index dafc63c..4abb96c 100644
41039--- a/drivers/hwmon/asus_atk0110.c
41040+++ b/drivers/hwmon/asus_atk0110.c
41041@@ -151,10 +151,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
41042 struct atk_sensor_data {
41043 struct list_head list;
41044 struct atk_data *data;
41045- struct device_attribute label_attr;
41046- struct device_attribute input_attr;
41047- struct device_attribute limit1_attr;
41048- struct device_attribute limit2_attr;
41049+ device_attribute_no_const label_attr;
41050+ device_attribute_no_const input_attr;
41051+ device_attribute_no_const limit1_attr;
41052+ device_attribute_no_const limit2_attr;
41053 char label_attr_name[ATTR_NAME_SIZE];
41054 char input_attr_name[ATTR_NAME_SIZE];
41055 char limit1_attr_name[ATTR_NAME_SIZE];
41056@@ -274,7 +274,7 @@ static ssize_t atk_name_show(struct device *dev,
41057 static struct device_attribute atk_name_attr =
41058 __ATTR(name, 0444, atk_name_show, NULL);
41059
41060-static void atk_init_attribute(struct device_attribute *attr, char *name,
41061+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
41062 sysfs_show_func show)
41063 {
41064 sysfs_attr_init(&attr->attr);
41065diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
41066index 9425098..7646cc5 100644
41067--- a/drivers/hwmon/coretemp.c
41068+++ b/drivers/hwmon/coretemp.c
41069@@ -797,7 +797,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
41070 return NOTIFY_OK;
41071 }
41072
41073-static struct notifier_block coretemp_cpu_notifier __refdata = {
41074+static struct notifier_block coretemp_cpu_notifier = {
41075 .notifier_call = coretemp_cpu_callback,
41076 };
41077
41078diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
41079index 632f1dc..57e6a58 100644
41080--- a/drivers/hwmon/ibmaem.c
41081+++ b/drivers/hwmon/ibmaem.c
41082@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
41083 struct aem_rw_sensor_template *rw)
41084 {
41085 struct device *dev = &data->pdev->dev;
41086- struct sensor_device_attribute *sensors = data->sensors;
41087+ sensor_device_attribute_no_const *sensors = data->sensors;
41088 int err;
41089
41090 /* Set up read-only sensors */
41091diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
41092index 708081b..fe2d4ab 100644
41093--- a/drivers/hwmon/iio_hwmon.c
41094+++ b/drivers/hwmon/iio_hwmon.c
41095@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
41096 {
41097 struct device *dev = &pdev->dev;
41098 struct iio_hwmon_state *st;
41099- struct sensor_device_attribute *a;
41100+ sensor_device_attribute_no_const *a;
41101 int ret, i;
41102 int in_i = 1, temp_i = 1, curr_i = 1;
41103 enum iio_chan_type type;
41104diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
41105index cf811c1..4c17110 100644
41106--- a/drivers/hwmon/nct6775.c
41107+++ b/drivers/hwmon/nct6775.c
41108@@ -944,10 +944,10 @@ static struct attribute_group *
41109 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
41110 int repeat)
41111 {
41112- struct attribute_group *group;
41113+ attribute_group_no_const *group;
41114 struct sensor_device_attr_u *su;
41115- struct sensor_device_attribute *a;
41116- struct sensor_device_attribute_2 *a2;
41117+ sensor_device_attribute_no_const *a;
41118+ sensor_device_attribute_2_no_const *a2;
41119 struct attribute **attrs;
41120 struct sensor_device_template **t;
41121 int i, count;
41122diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
41123index 3cbf66e..8c5cc2a 100644
41124--- a/drivers/hwmon/pmbus/pmbus_core.c
41125+++ b/drivers/hwmon/pmbus/pmbus_core.c
41126@@ -782,7 +782,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
41127 return 0;
41128 }
41129
41130-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
41131+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
41132 const char *name,
41133 umode_t mode,
41134 ssize_t (*show)(struct device *dev,
41135@@ -799,7 +799,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
41136 dev_attr->store = store;
41137 }
41138
41139-static void pmbus_attr_init(struct sensor_device_attribute *a,
41140+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
41141 const char *name,
41142 umode_t mode,
41143 ssize_t (*show)(struct device *dev,
41144@@ -821,7 +821,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
41145 u16 reg, u8 mask)
41146 {
41147 struct pmbus_boolean *boolean;
41148- struct sensor_device_attribute *a;
41149+ sensor_device_attribute_no_const *a;
41150
41151 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
41152 if (!boolean)
41153@@ -846,7 +846,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
41154 bool update, bool readonly)
41155 {
41156 struct pmbus_sensor *sensor;
41157- struct device_attribute *a;
41158+ device_attribute_no_const *a;
41159
41160 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
41161 if (!sensor)
41162@@ -877,7 +877,7 @@ static int pmbus_add_label(struct pmbus_data *data,
41163 const char *lstring, int index)
41164 {
41165 struct pmbus_label *label;
41166- struct device_attribute *a;
41167+ device_attribute_no_const *a;
41168
41169 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
41170 if (!label)
41171diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
41172index 97cd45a..ac54d8b 100644
41173--- a/drivers/hwmon/sht15.c
41174+++ b/drivers/hwmon/sht15.c
41175@@ -169,7 +169,7 @@ struct sht15_data {
41176 int supply_uv;
41177 bool supply_uv_valid;
41178 struct work_struct update_supply_work;
41179- atomic_t interrupt_handled;
41180+ atomic_unchecked_t interrupt_handled;
41181 };
41182
41183 /**
41184@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
41185 ret = gpio_direction_input(data->pdata->gpio_data);
41186 if (ret)
41187 return ret;
41188- atomic_set(&data->interrupt_handled, 0);
41189+ atomic_set_unchecked(&data->interrupt_handled, 0);
41190
41191 enable_irq(gpio_to_irq(data->pdata->gpio_data));
41192 if (gpio_get_value(data->pdata->gpio_data) == 0) {
41193 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
41194 /* Only relevant if the interrupt hasn't occurred. */
41195- if (!atomic_read(&data->interrupt_handled))
41196+ if (!atomic_read_unchecked(&data->interrupt_handled))
41197 schedule_work(&data->read_work);
41198 }
41199 ret = wait_event_timeout(data->wait_queue,
41200@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
41201
41202 /* First disable the interrupt */
41203 disable_irq_nosync(irq);
41204- atomic_inc(&data->interrupt_handled);
41205+ atomic_inc_unchecked(&data->interrupt_handled);
41206 /* Then schedule a reading work struct */
41207 if (data->state != SHT15_READING_NOTHING)
41208 schedule_work(&data->read_work);
41209@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
41210 * If not, then start the interrupt again - care here as could
41211 * have gone low in meantime so verify it hasn't!
41212 */
41213- atomic_set(&data->interrupt_handled, 0);
41214+ atomic_set_unchecked(&data->interrupt_handled, 0);
41215 enable_irq(gpio_to_irq(data->pdata->gpio_data));
41216 /* If still not occurred or another handler was scheduled */
41217 if (gpio_get_value(data->pdata->gpio_data)
41218- || atomic_read(&data->interrupt_handled))
41219+ || atomic_read_unchecked(&data->interrupt_handled))
41220 return;
41221 }
41222
41223diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
41224index 38944e9..ae9e5ed 100644
41225--- a/drivers/hwmon/via-cputemp.c
41226+++ b/drivers/hwmon/via-cputemp.c
41227@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
41228 return NOTIFY_OK;
41229 }
41230
41231-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
41232+static struct notifier_block via_cputemp_cpu_notifier = {
41233 .notifier_call = via_cputemp_cpu_callback,
41234 };
41235
41236diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
41237index 07f01ac..d79ad3d 100644
41238--- a/drivers/i2c/busses/i2c-amd756-s4882.c
41239+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
41240@@ -43,7 +43,7 @@
41241 extern struct i2c_adapter amd756_smbus;
41242
41243 static struct i2c_adapter *s4882_adapter;
41244-static struct i2c_algorithm *s4882_algo;
41245+static i2c_algorithm_no_const *s4882_algo;
41246
41247 /* Wrapper access functions for multiplexed SMBus */
41248 static DEFINE_MUTEX(amd756_lock);
41249diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
41250index 721f7eb..0fd2a09 100644
41251--- a/drivers/i2c/busses/i2c-diolan-u2c.c
41252+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
41253@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
41254 /* usb layer */
41255
41256 /* Send command to device, and get response. */
41257-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
41258+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
41259 {
41260 int ret = 0;
41261 int actual;
41262diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
41263index 2ca268d..c6acbdf 100644
41264--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
41265+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
41266@@ -41,7 +41,7 @@
41267 extern struct i2c_adapter *nforce2_smbus;
41268
41269 static struct i2c_adapter *s4985_adapter;
41270-static struct i2c_algorithm *s4985_algo;
41271+static i2c_algorithm_no_const *s4985_algo;
41272
41273 /* Wrapper access functions for multiplexed SMBus */
41274 static DEFINE_MUTEX(nforce2_lock);
41275diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
41276index 80b47e8..1a6040d9 100644
41277--- a/drivers/i2c/i2c-dev.c
41278+++ b/drivers/i2c/i2c-dev.c
41279@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
41280 break;
41281 }
41282
41283- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
41284+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
41285 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
41286 if (IS_ERR(rdwr_pa[i].buf)) {
41287 res = PTR_ERR(rdwr_pa[i].buf);
41288diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
41289index 0b510ba..4fbb5085 100644
41290--- a/drivers/ide/ide-cd.c
41291+++ b/drivers/ide/ide-cd.c
41292@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
41293 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
41294 if ((unsigned long)buf & alignment
41295 || blk_rq_bytes(rq) & q->dma_pad_mask
41296- || object_is_on_stack(buf))
41297+ || object_starts_on_stack(buf))
41298 drive->dma = 0;
41299 }
41300 }
41301diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
41302index 18f72e3..3722327 100644
41303--- a/drivers/iio/industrialio-core.c
41304+++ b/drivers/iio/industrialio-core.c
41305@@ -521,7 +521,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
41306 }
41307
41308 static
41309-int __iio_device_attr_init(struct device_attribute *dev_attr,
41310+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
41311 const char *postfix,
41312 struct iio_chan_spec const *chan,
41313 ssize_t (*readfunc)(struct device *dev,
41314diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
41315index f2ef7ef..743d02f 100644
41316--- a/drivers/infiniband/core/cm.c
41317+++ b/drivers/infiniband/core/cm.c
41318@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
41319
41320 struct cm_counter_group {
41321 struct kobject obj;
41322- atomic_long_t counter[CM_ATTR_COUNT];
41323+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
41324 };
41325
41326 struct cm_counter_attribute {
41327@@ -1392,7 +1392,7 @@ static void cm_dup_req_handler(struct cm_work *work,
41328 struct ib_mad_send_buf *msg = NULL;
41329 int ret;
41330
41331- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41332+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41333 counter[CM_REQ_COUNTER]);
41334
41335 /* Quick state check to discard duplicate REQs. */
41336@@ -1776,7 +1776,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
41337 if (!cm_id_priv)
41338 return;
41339
41340- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41341+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41342 counter[CM_REP_COUNTER]);
41343 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
41344 if (ret)
41345@@ -1943,7 +1943,7 @@ static int cm_rtu_handler(struct cm_work *work)
41346 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
41347 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
41348 spin_unlock_irq(&cm_id_priv->lock);
41349- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41350+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41351 counter[CM_RTU_COUNTER]);
41352 goto out;
41353 }
41354@@ -2126,7 +2126,7 @@ static int cm_dreq_handler(struct cm_work *work)
41355 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
41356 dreq_msg->local_comm_id);
41357 if (!cm_id_priv) {
41358- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41359+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41360 counter[CM_DREQ_COUNTER]);
41361 cm_issue_drep(work->port, work->mad_recv_wc);
41362 return -EINVAL;
41363@@ -2151,7 +2151,7 @@ static int cm_dreq_handler(struct cm_work *work)
41364 case IB_CM_MRA_REP_RCVD:
41365 break;
41366 case IB_CM_TIMEWAIT:
41367- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41368+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41369 counter[CM_DREQ_COUNTER]);
41370 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
41371 goto unlock;
41372@@ -2165,7 +2165,7 @@ static int cm_dreq_handler(struct cm_work *work)
41373 cm_free_msg(msg);
41374 goto deref;
41375 case IB_CM_DREQ_RCVD:
41376- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41377+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41378 counter[CM_DREQ_COUNTER]);
41379 goto unlock;
41380 default:
41381@@ -2532,7 +2532,7 @@ static int cm_mra_handler(struct cm_work *work)
41382 ib_modify_mad(cm_id_priv->av.port->mad_agent,
41383 cm_id_priv->msg, timeout)) {
41384 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
41385- atomic_long_inc(&work->port->
41386+ atomic_long_inc_unchecked(&work->port->
41387 counter_group[CM_RECV_DUPLICATES].
41388 counter[CM_MRA_COUNTER]);
41389 goto out;
41390@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
41391 break;
41392 case IB_CM_MRA_REQ_RCVD:
41393 case IB_CM_MRA_REP_RCVD:
41394- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41395+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41396 counter[CM_MRA_COUNTER]);
41397 /* fall through */
41398 default:
41399@@ -2703,7 +2703,7 @@ static int cm_lap_handler(struct cm_work *work)
41400 case IB_CM_LAP_IDLE:
41401 break;
41402 case IB_CM_MRA_LAP_SENT:
41403- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41404+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41405 counter[CM_LAP_COUNTER]);
41406 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
41407 goto unlock;
41408@@ -2719,7 +2719,7 @@ static int cm_lap_handler(struct cm_work *work)
41409 cm_free_msg(msg);
41410 goto deref;
41411 case IB_CM_LAP_RCVD:
41412- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41413+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41414 counter[CM_LAP_COUNTER]);
41415 goto unlock;
41416 default:
41417@@ -3003,7 +3003,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
41418 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
41419 if (cur_cm_id_priv) {
41420 spin_unlock_irq(&cm.lock);
41421- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41422+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41423 counter[CM_SIDR_REQ_COUNTER]);
41424 goto out; /* Duplicate message. */
41425 }
41426@@ -3215,10 +3215,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
41427 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
41428 msg->retries = 1;
41429
41430- atomic_long_add(1 + msg->retries,
41431+ atomic_long_add_unchecked(1 + msg->retries,
41432 &port->counter_group[CM_XMIT].counter[attr_index]);
41433 if (msg->retries)
41434- atomic_long_add(msg->retries,
41435+ atomic_long_add_unchecked(msg->retries,
41436 &port->counter_group[CM_XMIT_RETRIES].
41437 counter[attr_index]);
41438
41439@@ -3428,7 +3428,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
41440 }
41441
41442 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
41443- atomic_long_inc(&port->counter_group[CM_RECV].
41444+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
41445 counter[attr_id - CM_ATTR_ID_OFFSET]);
41446
41447 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
41448@@ -3633,7 +3633,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
41449 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
41450
41451 return sprintf(buf, "%ld\n",
41452- atomic_long_read(&group->counter[cm_attr->index]));
41453+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
41454 }
41455
41456 static const struct sysfs_ops cm_counter_ops = {
41457diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
41458index 9f5ad7c..588cd84 100644
41459--- a/drivers/infiniband/core/fmr_pool.c
41460+++ b/drivers/infiniband/core/fmr_pool.c
41461@@ -98,8 +98,8 @@ struct ib_fmr_pool {
41462
41463 struct task_struct *thread;
41464
41465- atomic_t req_ser;
41466- atomic_t flush_ser;
41467+ atomic_unchecked_t req_ser;
41468+ atomic_unchecked_t flush_ser;
41469
41470 wait_queue_head_t force_wait;
41471 };
41472@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
41473 struct ib_fmr_pool *pool = pool_ptr;
41474
41475 do {
41476- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
41477+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
41478 ib_fmr_batch_release(pool);
41479
41480- atomic_inc(&pool->flush_ser);
41481+ atomic_inc_unchecked(&pool->flush_ser);
41482 wake_up_interruptible(&pool->force_wait);
41483
41484 if (pool->flush_function)
41485@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
41486 }
41487
41488 set_current_state(TASK_INTERRUPTIBLE);
41489- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
41490+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
41491 !kthread_should_stop())
41492 schedule();
41493 __set_current_state(TASK_RUNNING);
41494@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
41495 pool->dirty_watermark = params->dirty_watermark;
41496 pool->dirty_len = 0;
41497 spin_lock_init(&pool->pool_lock);
41498- atomic_set(&pool->req_ser, 0);
41499- atomic_set(&pool->flush_ser, 0);
41500+ atomic_set_unchecked(&pool->req_ser, 0);
41501+ atomic_set_unchecked(&pool->flush_ser, 0);
41502 init_waitqueue_head(&pool->force_wait);
41503
41504 pool->thread = kthread_run(ib_fmr_cleanup_thread,
41505@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
41506 }
41507 spin_unlock_irq(&pool->pool_lock);
41508
41509- serial = atomic_inc_return(&pool->req_ser);
41510+ serial = atomic_inc_return_unchecked(&pool->req_ser);
41511 wake_up_process(pool->thread);
41512
41513 if (wait_event_interruptible(pool->force_wait,
41514- atomic_read(&pool->flush_ser) - serial >= 0))
41515+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
41516 return -EINTR;
41517
41518 return 0;
41519@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
41520 } else {
41521 list_add_tail(&fmr->list, &pool->dirty_list);
41522 if (++pool->dirty_len >= pool->dirty_watermark) {
41523- atomic_inc(&pool->req_ser);
41524+ atomic_inc_unchecked(&pool->req_ser);
41525 wake_up_process(pool->thread);
41526 }
41527 }
41528diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
41529index 84e4500..2c9beeb 100644
41530--- a/drivers/infiniband/hw/cxgb4/mem.c
41531+++ b/drivers/infiniband/hw/cxgb4/mem.c
41532@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
41533 int err;
41534 struct fw_ri_tpte tpt;
41535 u32 stag_idx;
41536- static atomic_t key;
41537+ static atomic_unchecked_t key;
41538
41539 if (c4iw_fatal_error(rdev))
41540 return -EIO;
41541@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
41542 if (rdev->stats.stag.cur > rdev->stats.stag.max)
41543 rdev->stats.stag.max = rdev->stats.stag.cur;
41544 mutex_unlock(&rdev->stats.lock);
41545- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
41546+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
41547 }
41548 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
41549 __func__, stag_state, type, pdid, stag_idx);
41550diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
41551index 644c2c7..ecf0879 100644
41552--- a/drivers/infiniband/hw/ipath/ipath_dma.c
41553+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
41554@@ -176,17 +176,17 @@ static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
41555 }
41556
41557 struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
41558- ipath_mapping_error,
41559- ipath_dma_map_single,
41560- ipath_dma_unmap_single,
41561- ipath_dma_map_page,
41562- ipath_dma_unmap_page,
41563- ipath_map_sg,
41564- ipath_unmap_sg,
41565- ipath_sg_dma_address,
41566- ipath_sg_dma_len,
41567- ipath_sync_single_for_cpu,
41568- ipath_sync_single_for_device,
41569- ipath_dma_alloc_coherent,
41570- ipath_dma_free_coherent
41571+ .mapping_error = ipath_mapping_error,
41572+ .map_single = ipath_dma_map_single,
41573+ .unmap_single = ipath_dma_unmap_single,
41574+ .map_page = ipath_dma_map_page,
41575+ .unmap_page = ipath_dma_unmap_page,
41576+ .map_sg = ipath_map_sg,
41577+ .unmap_sg = ipath_unmap_sg,
41578+ .dma_address = ipath_sg_dma_address,
41579+ .dma_len = ipath_sg_dma_len,
41580+ .sync_single_for_cpu = ipath_sync_single_for_cpu,
41581+ .sync_single_for_device = ipath_sync_single_for_device,
41582+ .alloc_coherent = ipath_dma_alloc_coherent,
41583+ .free_coherent = ipath_dma_free_coherent
41584 };
41585diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
41586index 79b3dbc..96e5fcc 100644
41587--- a/drivers/infiniband/hw/ipath/ipath_rc.c
41588+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
41589@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
41590 struct ib_atomic_eth *ateth;
41591 struct ipath_ack_entry *e;
41592 u64 vaddr;
41593- atomic64_t *maddr;
41594+ atomic64_unchecked_t *maddr;
41595 u64 sdata;
41596 u32 rkey;
41597 u8 next;
41598@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
41599 IB_ACCESS_REMOTE_ATOMIC)))
41600 goto nack_acc_unlck;
41601 /* Perform atomic OP and save result. */
41602- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
41603+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
41604 sdata = be64_to_cpu(ateth->swap_data);
41605 e = &qp->s_ack_queue[qp->r_head_ack_queue];
41606 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
41607- (u64) atomic64_add_return(sdata, maddr) - sdata :
41608+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
41609 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
41610 be64_to_cpu(ateth->compare_data),
41611 sdata);
41612diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
41613index 1f95bba..9530f87 100644
41614--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
41615+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
41616@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
41617 unsigned long flags;
41618 struct ib_wc wc;
41619 u64 sdata;
41620- atomic64_t *maddr;
41621+ atomic64_unchecked_t *maddr;
41622 enum ib_wc_status send_status;
41623
41624 /*
41625@@ -382,11 +382,11 @@ again:
41626 IB_ACCESS_REMOTE_ATOMIC)))
41627 goto acc_err;
41628 /* Perform atomic OP and save result. */
41629- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
41630+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
41631 sdata = wqe->wr.wr.atomic.compare_add;
41632 *(u64 *) sqp->s_sge.sge.vaddr =
41633 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
41634- (u64) atomic64_add_return(sdata, maddr) - sdata :
41635+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
41636 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
41637 sdata, wqe->wr.wr.atomic.swap);
41638 goto send_comp;
41639diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
41640index f2a3f48..673ec79 100644
41641--- a/drivers/infiniband/hw/mlx4/mad.c
41642+++ b/drivers/infiniband/hw/mlx4/mad.c
41643@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
41644
41645 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
41646 {
41647- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
41648+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
41649 cpu_to_be64(0xff00000000000000LL);
41650 }
41651
41652diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
41653index 25b2cdf..099ff97 100644
41654--- a/drivers/infiniband/hw/mlx4/mcg.c
41655+++ b/drivers/infiniband/hw/mlx4/mcg.c
41656@@ -1040,7 +1040,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
41657 {
41658 char name[20];
41659
41660- atomic_set(&ctx->tid, 0);
41661+ atomic_set_unchecked(&ctx->tid, 0);
41662 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
41663 ctx->mcg_wq = create_singlethread_workqueue(name);
41664 if (!ctx->mcg_wq)
41665diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
41666index 036b663..c9a8c73 100644
41667--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
41668+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
41669@@ -404,7 +404,7 @@ struct mlx4_ib_demux_ctx {
41670 struct list_head mcg_mgid0_list;
41671 struct workqueue_struct *mcg_wq;
41672 struct mlx4_ib_demux_pv_ctx **tun;
41673- atomic_t tid;
41674+ atomic_unchecked_t tid;
41675 int flushing; /* flushing the work queue */
41676 };
41677
41678diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
41679index 9d3e5c1..6f166df 100644
41680--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
41681+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
41682@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
41683 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
41684 }
41685
41686-int mthca_QUERY_FW(struct mthca_dev *dev)
41687+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
41688 {
41689 struct mthca_mailbox *mailbox;
41690 u32 *outbox;
41691@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41692 CMD_TIME_CLASS_B);
41693 }
41694
41695-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41696+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41697 int num_mtt)
41698 {
41699 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
41700@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
41701 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
41702 }
41703
41704-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41705+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41706 int eq_num)
41707 {
41708 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
41709@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
41710 CMD_TIME_CLASS_B);
41711 }
41712
41713-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
41714+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
41715 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
41716 void *in_mad, void *response_mad)
41717 {
41718diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
41719index 87897b9..7e79542 100644
41720--- a/drivers/infiniband/hw/mthca/mthca_main.c
41721+++ b/drivers/infiniband/hw/mthca/mthca_main.c
41722@@ -692,7 +692,7 @@ err_close:
41723 return err;
41724 }
41725
41726-static int mthca_setup_hca(struct mthca_dev *dev)
41727+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
41728 {
41729 int err;
41730
41731diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
41732index ed9a989..6aa5dc2 100644
41733--- a/drivers/infiniband/hw/mthca/mthca_mr.c
41734+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
41735@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
41736 * through the bitmaps)
41737 */
41738
41739-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
41740+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
41741 {
41742 int o;
41743 int m;
41744@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
41745 return key;
41746 }
41747
41748-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
41749+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
41750 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
41751 {
41752 struct mthca_mailbox *mailbox;
41753@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
41754 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
41755 }
41756
41757-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
41758+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
41759 u64 *buffer_list, int buffer_size_shift,
41760 int list_len, u64 iova, u64 total_size,
41761 u32 access, struct mthca_mr *mr)
41762diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
41763index 5b71d43..35a9e14 100644
41764--- a/drivers/infiniband/hw/mthca/mthca_provider.c
41765+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
41766@@ -763,7 +763,7 @@ unlock:
41767 return 0;
41768 }
41769
41770-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
41771+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
41772 {
41773 struct mthca_dev *dev = to_mdev(ibcq->device);
41774 struct mthca_cq *cq = to_mcq(ibcq);
41775diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
41776index 4291410..d2ab1fb 100644
41777--- a/drivers/infiniband/hw/nes/nes.c
41778+++ b/drivers/infiniband/hw/nes/nes.c
41779@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
41780 LIST_HEAD(nes_adapter_list);
41781 static LIST_HEAD(nes_dev_list);
41782
41783-atomic_t qps_destroyed;
41784+atomic_unchecked_t qps_destroyed;
41785
41786 static unsigned int ee_flsh_adapter;
41787 static unsigned int sysfs_nonidx_addr;
41788@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
41789 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
41790 struct nes_adapter *nesadapter = nesdev->nesadapter;
41791
41792- atomic_inc(&qps_destroyed);
41793+ atomic_inc_unchecked(&qps_destroyed);
41794
41795 /* Free the control structures */
41796
41797diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
41798index 33cc589..3bd6538 100644
41799--- a/drivers/infiniband/hw/nes/nes.h
41800+++ b/drivers/infiniband/hw/nes/nes.h
41801@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
41802 extern unsigned int wqm_quanta;
41803 extern struct list_head nes_adapter_list;
41804
41805-extern atomic_t cm_connects;
41806-extern atomic_t cm_accepts;
41807-extern atomic_t cm_disconnects;
41808-extern atomic_t cm_closes;
41809-extern atomic_t cm_connecteds;
41810-extern atomic_t cm_connect_reqs;
41811-extern atomic_t cm_rejects;
41812-extern atomic_t mod_qp_timouts;
41813-extern atomic_t qps_created;
41814-extern atomic_t qps_destroyed;
41815-extern atomic_t sw_qps_destroyed;
41816+extern atomic_unchecked_t cm_connects;
41817+extern atomic_unchecked_t cm_accepts;
41818+extern atomic_unchecked_t cm_disconnects;
41819+extern atomic_unchecked_t cm_closes;
41820+extern atomic_unchecked_t cm_connecteds;
41821+extern atomic_unchecked_t cm_connect_reqs;
41822+extern atomic_unchecked_t cm_rejects;
41823+extern atomic_unchecked_t mod_qp_timouts;
41824+extern atomic_unchecked_t qps_created;
41825+extern atomic_unchecked_t qps_destroyed;
41826+extern atomic_unchecked_t sw_qps_destroyed;
41827 extern u32 mh_detected;
41828 extern u32 mh_pauses_sent;
41829 extern u32 cm_packets_sent;
41830@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
41831 extern u32 cm_packets_received;
41832 extern u32 cm_packets_dropped;
41833 extern u32 cm_packets_retrans;
41834-extern atomic_t cm_listens_created;
41835-extern atomic_t cm_listens_destroyed;
41836+extern atomic_unchecked_t cm_listens_created;
41837+extern atomic_unchecked_t cm_listens_destroyed;
41838 extern u32 cm_backlog_drops;
41839-extern atomic_t cm_loopbacks;
41840-extern atomic_t cm_nodes_created;
41841-extern atomic_t cm_nodes_destroyed;
41842-extern atomic_t cm_accel_dropped_pkts;
41843-extern atomic_t cm_resets_recvd;
41844-extern atomic_t pau_qps_created;
41845-extern atomic_t pau_qps_destroyed;
41846+extern atomic_unchecked_t cm_loopbacks;
41847+extern atomic_unchecked_t cm_nodes_created;
41848+extern atomic_unchecked_t cm_nodes_destroyed;
41849+extern atomic_unchecked_t cm_accel_dropped_pkts;
41850+extern atomic_unchecked_t cm_resets_recvd;
41851+extern atomic_unchecked_t pau_qps_created;
41852+extern atomic_unchecked_t pau_qps_destroyed;
41853
41854 extern u32 int_mod_timer_init;
41855 extern u32 int_mod_cq_depth_256;
41856diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
41857index 6b29249..461d143 100644
41858--- a/drivers/infiniband/hw/nes/nes_cm.c
41859+++ b/drivers/infiniband/hw/nes/nes_cm.c
41860@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
41861 u32 cm_packets_retrans;
41862 u32 cm_packets_created;
41863 u32 cm_packets_received;
41864-atomic_t cm_listens_created;
41865-atomic_t cm_listens_destroyed;
41866+atomic_unchecked_t cm_listens_created;
41867+atomic_unchecked_t cm_listens_destroyed;
41868 u32 cm_backlog_drops;
41869-atomic_t cm_loopbacks;
41870-atomic_t cm_nodes_created;
41871-atomic_t cm_nodes_destroyed;
41872-atomic_t cm_accel_dropped_pkts;
41873-atomic_t cm_resets_recvd;
41874+atomic_unchecked_t cm_loopbacks;
41875+atomic_unchecked_t cm_nodes_created;
41876+atomic_unchecked_t cm_nodes_destroyed;
41877+atomic_unchecked_t cm_accel_dropped_pkts;
41878+atomic_unchecked_t cm_resets_recvd;
41879
41880 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
41881 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
41882@@ -133,28 +133,28 @@ static void print_core(struct nes_cm_core *core);
41883 /* instance of function pointers for client API */
41884 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
41885 static struct nes_cm_ops nes_cm_api = {
41886- mini_cm_accelerated,
41887- mini_cm_listen,
41888- mini_cm_del_listen,
41889- mini_cm_connect,
41890- mini_cm_close,
41891- mini_cm_accept,
41892- mini_cm_reject,
41893- mini_cm_recv_pkt,
41894- mini_cm_dealloc_core,
41895- mini_cm_get,
41896- mini_cm_set
41897+ .accelerated = mini_cm_accelerated,
41898+ .listen = mini_cm_listen,
41899+ .stop_listener = mini_cm_del_listen,
41900+ .connect = mini_cm_connect,
41901+ .close = mini_cm_close,
41902+ .accept = mini_cm_accept,
41903+ .reject = mini_cm_reject,
41904+ .recv_pkt = mini_cm_recv_pkt,
41905+ .destroy_cm_core = mini_cm_dealloc_core,
41906+ .get = mini_cm_get,
41907+ .set = mini_cm_set
41908 };
41909
41910 static struct nes_cm_core *g_cm_core;
41911
41912-atomic_t cm_connects;
41913-atomic_t cm_accepts;
41914-atomic_t cm_disconnects;
41915-atomic_t cm_closes;
41916-atomic_t cm_connecteds;
41917-atomic_t cm_connect_reqs;
41918-atomic_t cm_rejects;
41919+atomic_unchecked_t cm_connects;
41920+atomic_unchecked_t cm_accepts;
41921+atomic_unchecked_t cm_disconnects;
41922+atomic_unchecked_t cm_closes;
41923+atomic_unchecked_t cm_connecteds;
41924+atomic_unchecked_t cm_connect_reqs;
41925+atomic_unchecked_t cm_rejects;
41926
41927 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
41928 {
41929@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
41930 kfree(listener);
41931 listener = NULL;
41932 ret = 0;
41933- atomic_inc(&cm_listens_destroyed);
41934+ atomic_inc_unchecked(&cm_listens_destroyed);
41935 } else {
41936 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
41937 }
41938@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
41939 cm_node->rem_mac);
41940
41941 add_hte_node(cm_core, cm_node);
41942- atomic_inc(&cm_nodes_created);
41943+ atomic_inc_unchecked(&cm_nodes_created);
41944
41945 return cm_node;
41946 }
41947@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
41948 }
41949
41950 atomic_dec(&cm_core->node_cnt);
41951- atomic_inc(&cm_nodes_destroyed);
41952+ atomic_inc_unchecked(&cm_nodes_destroyed);
41953 nesqp = cm_node->nesqp;
41954 if (nesqp) {
41955 nesqp->cm_node = NULL;
41956@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
41957
41958 static void drop_packet(struct sk_buff *skb)
41959 {
41960- atomic_inc(&cm_accel_dropped_pkts);
41961+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
41962 dev_kfree_skb_any(skb);
41963 }
41964
41965@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
41966 {
41967
41968 int reset = 0; /* whether to send reset in case of err.. */
41969- atomic_inc(&cm_resets_recvd);
41970+ atomic_inc_unchecked(&cm_resets_recvd);
41971 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
41972 " refcnt=%d\n", cm_node, cm_node->state,
41973 atomic_read(&cm_node->ref_count));
41974@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
41975 rem_ref_cm_node(cm_node->cm_core, cm_node);
41976 return NULL;
41977 }
41978- atomic_inc(&cm_loopbacks);
41979+ atomic_inc_unchecked(&cm_loopbacks);
41980 loopbackremotenode->loopbackpartner = cm_node;
41981 loopbackremotenode->tcp_cntxt.rcv_wscale =
41982 NES_CM_DEFAULT_RCV_WND_SCALE;
41983@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
41984 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
41985 else {
41986 rem_ref_cm_node(cm_core, cm_node);
41987- atomic_inc(&cm_accel_dropped_pkts);
41988+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
41989 dev_kfree_skb_any(skb);
41990 }
41991 break;
41992@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
41993
41994 if ((cm_id) && (cm_id->event_handler)) {
41995 if (issue_disconn) {
41996- atomic_inc(&cm_disconnects);
41997+ atomic_inc_unchecked(&cm_disconnects);
41998 cm_event.event = IW_CM_EVENT_DISCONNECT;
41999 cm_event.status = disconn_status;
42000 cm_event.local_addr = cm_id->local_addr;
42001@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42002 }
42003
42004 if (issue_close) {
42005- atomic_inc(&cm_closes);
42006+ atomic_inc_unchecked(&cm_closes);
42007 nes_disconnect(nesqp, 1);
42008
42009 cm_id->provider_data = nesqp;
42010@@ -3035,7 +3035,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
42011
42012 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
42013 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
42014- atomic_inc(&cm_accepts);
42015+ atomic_inc_unchecked(&cm_accepts);
42016
42017 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
42018 netdev_refcnt_read(nesvnic->netdev));
42019@@ -3224,7 +3224,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
42020 struct nes_cm_core *cm_core;
42021 u8 *start_buff;
42022
42023- atomic_inc(&cm_rejects);
42024+ atomic_inc_unchecked(&cm_rejects);
42025 cm_node = (struct nes_cm_node *)cm_id->provider_data;
42026 loopback = cm_node->loopbackpartner;
42027 cm_core = cm_node->cm_core;
42028@@ -3286,7 +3286,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
42029 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
42030 ntohs(laddr->sin_port));
42031
42032- atomic_inc(&cm_connects);
42033+ atomic_inc_unchecked(&cm_connects);
42034 nesqp->active_conn = 1;
42035
42036 /* cache the cm_id in the qp */
42037@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
42038 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
42039 return err;
42040 }
42041- atomic_inc(&cm_listens_created);
42042+ atomic_inc_unchecked(&cm_listens_created);
42043 }
42044
42045 cm_id->add_ref(cm_id);
42046@@ -3505,7 +3505,7 @@ static void cm_event_connected(struct nes_cm_event *event)
42047
42048 if (nesqp->destroyed)
42049 return;
42050- atomic_inc(&cm_connecteds);
42051+ atomic_inc_unchecked(&cm_connecteds);
42052 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
42053 " local port 0x%04X. jiffies = %lu.\n",
42054 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
42055@@ -3686,7 +3686,7 @@ static void cm_event_reset(struct nes_cm_event *event)
42056
42057 cm_id->add_ref(cm_id);
42058 ret = cm_id->event_handler(cm_id, &cm_event);
42059- atomic_inc(&cm_closes);
42060+ atomic_inc_unchecked(&cm_closes);
42061 cm_event.event = IW_CM_EVENT_CLOSE;
42062 cm_event.status = 0;
42063 cm_event.provider_data = cm_id->provider_data;
42064@@ -3726,7 +3726,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
42065 return;
42066 cm_id = cm_node->cm_id;
42067
42068- atomic_inc(&cm_connect_reqs);
42069+ atomic_inc_unchecked(&cm_connect_reqs);
42070 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
42071 cm_node, cm_id, jiffies);
42072
42073@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
42074 return;
42075 cm_id = cm_node->cm_id;
42076
42077- atomic_inc(&cm_connect_reqs);
42078+ atomic_inc_unchecked(&cm_connect_reqs);
42079 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
42080 cm_node, cm_id, jiffies);
42081
42082diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
42083index 4166452..fc952c3 100644
42084--- a/drivers/infiniband/hw/nes/nes_mgt.c
42085+++ b/drivers/infiniband/hw/nes/nes_mgt.c
42086@@ -40,8 +40,8 @@
42087 #include "nes.h"
42088 #include "nes_mgt.h"
42089
42090-atomic_t pau_qps_created;
42091-atomic_t pau_qps_destroyed;
42092+atomic_unchecked_t pau_qps_created;
42093+atomic_unchecked_t pau_qps_destroyed;
42094
42095 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
42096 {
42097@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
42098 {
42099 struct sk_buff *skb;
42100 unsigned long flags;
42101- atomic_inc(&pau_qps_destroyed);
42102+ atomic_inc_unchecked(&pau_qps_destroyed);
42103
42104 /* Free packets that have not yet been forwarded */
42105 /* Lock is acquired by skb_dequeue when removing the skb */
42106@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
42107 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
42108 skb_queue_head_init(&nesqp->pau_list);
42109 spin_lock_init(&nesqp->pau_lock);
42110- atomic_inc(&pau_qps_created);
42111+ atomic_inc_unchecked(&pau_qps_created);
42112 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
42113 }
42114
42115diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
42116index 49eb511..a774366 100644
42117--- a/drivers/infiniband/hw/nes/nes_nic.c
42118+++ b/drivers/infiniband/hw/nes/nes_nic.c
42119@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
42120 target_stat_values[++index] = mh_detected;
42121 target_stat_values[++index] = mh_pauses_sent;
42122 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
42123- target_stat_values[++index] = atomic_read(&cm_connects);
42124- target_stat_values[++index] = atomic_read(&cm_accepts);
42125- target_stat_values[++index] = atomic_read(&cm_disconnects);
42126- target_stat_values[++index] = atomic_read(&cm_connecteds);
42127- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
42128- target_stat_values[++index] = atomic_read(&cm_rejects);
42129- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
42130- target_stat_values[++index] = atomic_read(&qps_created);
42131- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
42132- target_stat_values[++index] = atomic_read(&qps_destroyed);
42133- target_stat_values[++index] = atomic_read(&cm_closes);
42134+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
42135+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
42136+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
42137+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
42138+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
42139+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
42140+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
42141+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
42142+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
42143+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
42144+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
42145 target_stat_values[++index] = cm_packets_sent;
42146 target_stat_values[++index] = cm_packets_bounced;
42147 target_stat_values[++index] = cm_packets_created;
42148 target_stat_values[++index] = cm_packets_received;
42149 target_stat_values[++index] = cm_packets_dropped;
42150 target_stat_values[++index] = cm_packets_retrans;
42151- target_stat_values[++index] = atomic_read(&cm_listens_created);
42152- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
42153+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
42154+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
42155 target_stat_values[++index] = cm_backlog_drops;
42156- target_stat_values[++index] = atomic_read(&cm_loopbacks);
42157- target_stat_values[++index] = atomic_read(&cm_nodes_created);
42158- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
42159- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
42160- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
42161+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
42162+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
42163+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
42164+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
42165+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
42166 target_stat_values[++index] = nesadapter->free_4kpbl;
42167 target_stat_values[++index] = nesadapter->free_256pbl;
42168 target_stat_values[++index] = int_mod_timer_init;
42169 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
42170 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
42171 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
42172- target_stat_values[++index] = atomic_read(&pau_qps_created);
42173- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
42174+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
42175+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
42176 }
42177
42178 /**
42179diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
42180index 8308e36..ae0d3b5 100644
42181--- a/drivers/infiniband/hw/nes/nes_verbs.c
42182+++ b/drivers/infiniband/hw/nes/nes_verbs.c
42183@@ -46,9 +46,9 @@
42184
42185 #include <rdma/ib_umem.h>
42186
42187-atomic_t mod_qp_timouts;
42188-atomic_t qps_created;
42189-atomic_t sw_qps_destroyed;
42190+atomic_unchecked_t mod_qp_timouts;
42191+atomic_unchecked_t qps_created;
42192+atomic_unchecked_t sw_qps_destroyed;
42193
42194 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
42195
42196@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
42197 if (init_attr->create_flags)
42198 return ERR_PTR(-EINVAL);
42199
42200- atomic_inc(&qps_created);
42201+ atomic_inc_unchecked(&qps_created);
42202 switch (init_attr->qp_type) {
42203 case IB_QPT_RC:
42204 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
42205@@ -1466,7 +1466,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
42206 struct iw_cm_event cm_event;
42207 int ret = 0;
42208
42209- atomic_inc(&sw_qps_destroyed);
42210+ atomic_inc_unchecked(&sw_qps_destroyed);
42211 nesqp->destroyed = 1;
42212
42213 /* Blow away the connection if it exists. */
42214diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
42215index 1946101..09766d2 100644
42216--- a/drivers/infiniband/hw/qib/qib.h
42217+++ b/drivers/infiniband/hw/qib/qib.h
42218@@ -52,6 +52,7 @@
42219 #include <linux/kref.h>
42220 #include <linux/sched.h>
42221 #include <linux/kthread.h>
42222+#include <linux/slab.h>
42223
42224 #include "qib_common.h"
42225 #include "qib_verbs.h"
42226diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
42227index 24c41ba..102d71f 100644
42228--- a/drivers/input/gameport/gameport.c
42229+++ b/drivers/input/gameport/gameport.c
42230@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
42231 */
42232 static void gameport_init_port(struct gameport *gameport)
42233 {
42234- static atomic_t gameport_no = ATOMIC_INIT(0);
42235+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
42236
42237 __module_get(THIS_MODULE);
42238
42239 mutex_init(&gameport->drv_mutex);
42240 device_initialize(&gameport->dev);
42241 dev_set_name(&gameport->dev, "gameport%lu",
42242- (unsigned long)atomic_inc_return(&gameport_no) - 1);
42243+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
42244 gameport->dev.bus = &gameport_bus;
42245 gameport->dev.release = gameport_release_port;
42246 if (gameport->parent)
42247diff --git a/drivers/input/input.c b/drivers/input/input.c
42248index d2965e4..f52b7d7 100644
42249--- a/drivers/input/input.c
42250+++ b/drivers/input/input.c
42251@@ -1734,7 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
42252 */
42253 struct input_dev *input_allocate_device(void)
42254 {
42255- static atomic_t input_no = ATOMIC_INIT(0);
42256+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
42257 struct input_dev *dev;
42258
42259 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
42260@@ -1749,7 +1749,7 @@ struct input_dev *input_allocate_device(void)
42261 INIT_LIST_HEAD(&dev->node);
42262
42263 dev_set_name(&dev->dev, "input%ld",
42264- (unsigned long) atomic_inc_return(&input_no) - 1);
42265+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
42266
42267 __module_get(THIS_MODULE);
42268 }
42269diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
42270index 04c69af..5f92d00 100644
42271--- a/drivers/input/joystick/sidewinder.c
42272+++ b/drivers/input/joystick/sidewinder.c
42273@@ -30,6 +30,7 @@
42274 #include <linux/kernel.h>
42275 #include <linux/module.h>
42276 #include <linux/slab.h>
42277+#include <linux/sched.h>
42278 #include <linux/init.h>
42279 #include <linux/input.h>
42280 #include <linux/gameport.h>
42281diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
42282index 75e3b10..fb390fd 100644
42283--- a/drivers/input/joystick/xpad.c
42284+++ b/drivers/input/joystick/xpad.c
42285@@ -736,7 +736,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
42286
42287 static int xpad_led_probe(struct usb_xpad *xpad)
42288 {
42289- static atomic_t led_seq = ATOMIC_INIT(0);
42290+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
42291 long led_no;
42292 struct xpad_led *led;
42293 struct led_classdev *led_cdev;
42294@@ -749,7 +749,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
42295 if (!led)
42296 return -ENOMEM;
42297
42298- led_no = (long)atomic_inc_return(&led_seq) - 1;
42299+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
42300
42301 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
42302 led->xpad = xpad;
42303diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
42304index e204f26..8459f15 100644
42305--- a/drivers/input/misc/ims-pcu.c
42306+++ b/drivers/input/misc/ims-pcu.c
42307@@ -1621,7 +1621,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
42308
42309 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
42310 {
42311- static atomic_t device_no = ATOMIC_INIT(0);
42312+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
42313
42314 const struct ims_pcu_device_info *info;
42315 u8 device_id;
42316@@ -1653,7 +1653,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
42317 }
42318
42319 /* Device appears to be operable, complete initialization */
42320- pcu->device_no = atomic_inc_return(&device_no) - 1;
42321+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
42322
42323 error = ims_pcu_setup_backlight(pcu);
42324 if (error)
42325diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
42326index 2f0b39d..7370f13 100644
42327--- a/drivers/input/mouse/psmouse.h
42328+++ b/drivers/input/mouse/psmouse.h
42329@@ -116,7 +116,7 @@ struct psmouse_attribute {
42330 ssize_t (*set)(struct psmouse *psmouse, void *data,
42331 const char *buf, size_t count);
42332 bool protect;
42333-};
42334+} __do_const;
42335 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
42336
42337 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
42338diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
42339index 4c842c3..590b0bf 100644
42340--- a/drivers/input/mousedev.c
42341+++ b/drivers/input/mousedev.c
42342@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
42343
42344 spin_unlock_irq(&client->packet_lock);
42345
42346- if (copy_to_user(buffer, data, count))
42347+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
42348 return -EFAULT;
42349
42350 return count;
42351diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
42352index 8f4c4ab..5fc8a45 100644
42353--- a/drivers/input/serio/serio.c
42354+++ b/drivers/input/serio/serio.c
42355@@ -505,7 +505,7 @@ static void serio_release_port(struct device *dev)
42356 */
42357 static void serio_init_port(struct serio *serio)
42358 {
42359- static atomic_t serio_no = ATOMIC_INIT(0);
42360+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
42361
42362 __module_get(THIS_MODULE);
42363
42364@@ -516,7 +516,7 @@ static void serio_init_port(struct serio *serio)
42365 mutex_init(&serio->drv_mutex);
42366 device_initialize(&serio->dev);
42367 dev_set_name(&serio->dev, "serio%ld",
42368- (long)atomic_inc_return(&serio_no) - 1);
42369+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
42370 serio->dev.bus = &serio_bus;
42371 serio->dev.release = serio_release_port;
42372 serio->dev.groups = serio_device_attr_groups;
42373diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
42374index 59df2e7..8f1cafb 100644
42375--- a/drivers/input/serio/serio_raw.c
42376+++ b/drivers/input/serio/serio_raw.c
42377@@ -293,7 +293,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
42378
42379 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
42380 {
42381- static atomic_t serio_raw_no = ATOMIC_INIT(0);
42382+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
42383 struct serio_raw *serio_raw;
42384 int err;
42385
42386@@ -304,7 +304,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
42387 }
42388
42389 snprintf(serio_raw->name, sizeof(serio_raw->name),
42390- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
42391+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
42392 kref_init(&serio_raw->kref);
42393 INIT_LIST_HEAD(&serio_raw->client_list);
42394 init_waitqueue_head(&serio_raw->wait);
42395diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
42396index e5555fc..937986d 100644
42397--- a/drivers/iommu/iommu.c
42398+++ b/drivers/iommu/iommu.c
42399@@ -588,7 +588,7 @@ static struct notifier_block iommu_bus_nb = {
42400 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
42401 {
42402 bus_register_notifier(bus, &iommu_bus_nb);
42403- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
42404+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
42405 }
42406
42407 /**
42408diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
42409index 39f81ae..2660096 100644
42410--- a/drivers/iommu/irq_remapping.c
42411+++ b/drivers/iommu/irq_remapping.c
42412@@ -356,7 +356,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
42413 void panic_if_irq_remap(const char *msg)
42414 {
42415 if (irq_remapping_enabled)
42416- panic(msg);
42417+ panic("%s", msg);
42418 }
42419
42420 static void ir_ack_apic_edge(struct irq_data *data)
42421@@ -377,10 +377,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
42422
42423 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
42424 {
42425- chip->irq_print_chip = ir_print_prefix;
42426- chip->irq_ack = ir_ack_apic_edge;
42427- chip->irq_eoi = ir_ack_apic_level;
42428- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
42429+ pax_open_kernel();
42430+ *(void **)&chip->irq_print_chip = ir_print_prefix;
42431+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
42432+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
42433+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
42434+ pax_close_kernel();
42435 }
42436
42437 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
42438diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
42439index 341c601..e5f407e 100644
42440--- a/drivers/irqchip/irq-gic.c
42441+++ b/drivers/irqchip/irq-gic.c
42442@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
42443 * Supported arch specific GIC irq extension.
42444 * Default make them NULL.
42445 */
42446-struct irq_chip gic_arch_extn = {
42447+irq_chip_no_const gic_arch_extn = {
42448 .irq_eoi = NULL,
42449 .irq_mask = NULL,
42450 .irq_unmask = NULL,
42451@@ -332,7 +332,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
42452 chained_irq_exit(chip, desc);
42453 }
42454
42455-static struct irq_chip gic_chip = {
42456+static irq_chip_no_const gic_chip __read_only = {
42457 .name = "GIC",
42458 .irq_mask = gic_mask_irq,
42459 .irq_unmask = gic_unmask_irq,
42460diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
42461index ac6f72b..81150f2 100644
42462--- a/drivers/isdn/capi/capi.c
42463+++ b/drivers/isdn/capi/capi.c
42464@@ -81,8 +81,8 @@ struct capiminor {
42465
42466 struct capi20_appl *ap;
42467 u32 ncci;
42468- atomic_t datahandle;
42469- atomic_t msgid;
42470+ atomic_unchecked_t datahandle;
42471+ atomic_unchecked_t msgid;
42472
42473 struct tty_port port;
42474 int ttyinstop;
42475@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
42476 capimsg_setu16(s, 2, mp->ap->applid);
42477 capimsg_setu8 (s, 4, CAPI_DATA_B3);
42478 capimsg_setu8 (s, 5, CAPI_RESP);
42479- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
42480+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
42481 capimsg_setu32(s, 8, mp->ncci);
42482 capimsg_setu16(s, 12, datahandle);
42483 }
42484@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
42485 mp->outbytes -= len;
42486 spin_unlock_bh(&mp->outlock);
42487
42488- datahandle = atomic_inc_return(&mp->datahandle);
42489+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
42490 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
42491 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
42492 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
42493 capimsg_setu16(skb->data, 2, mp->ap->applid);
42494 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
42495 capimsg_setu8 (skb->data, 5, CAPI_REQ);
42496- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
42497+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
42498 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
42499 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
42500 capimsg_setu16(skb->data, 16, len); /* Data length */
42501diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
42502index c44950d..10ac276 100644
42503--- a/drivers/isdn/gigaset/bas-gigaset.c
42504+++ b/drivers/isdn/gigaset/bas-gigaset.c
42505@@ -2564,22 +2564,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
42506
42507
42508 static const struct gigaset_ops gigops = {
42509- gigaset_write_cmd,
42510- gigaset_write_room,
42511- gigaset_chars_in_buffer,
42512- gigaset_brkchars,
42513- gigaset_init_bchannel,
42514- gigaset_close_bchannel,
42515- gigaset_initbcshw,
42516- gigaset_freebcshw,
42517- gigaset_reinitbcshw,
42518- gigaset_initcshw,
42519- gigaset_freecshw,
42520- gigaset_set_modem_ctrl,
42521- gigaset_baud_rate,
42522- gigaset_set_line_ctrl,
42523- gigaset_isoc_send_skb,
42524- gigaset_isoc_input,
42525+ .write_cmd = gigaset_write_cmd,
42526+ .write_room = gigaset_write_room,
42527+ .chars_in_buffer = gigaset_chars_in_buffer,
42528+ .brkchars = gigaset_brkchars,
42529+ .init_bchannel = gigaset_init_bchannel,
42530+ .close_bchannel = gigaset_close_bchannel,
42531+ .initbcshw = gigaset_initbcshw,
42532+ .freebcshw = gigaset_freebcshw,
42533+ .reinitbcshw = gigaset_reinitbcshw,
42534+ .initcshw = gigaset_initcshw,
42535+ .freecshw = gigaset_freecshw,
42536+ .set_modem_ctrl = gigaset_set_modem_ctrl,
42537+ .baud_rate = gigaset_baud_rate,
42538+ .set_line_ctrl = gigaset_set_line_ctrl,
42539+ .send_skb = gigaset_isoc_send_skb,
42540+ .handle_input = gigaset_isoc_input,
42541 };
42542
42543 /* bas_gigaset_init
42544diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
42545index 600c79b..3752bab 100644
42546--- a/drivers/isdn/gigaset/interface.c
42547+++ b/drivers/isdn/gigaset/interface.c
42548@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
42549 }
42550 tty->driver_data = cs;
42551
42552- ++cs->port.count;
42553+ atomic_inc(&cs->port.count);
42554
42555- if (cs->port.count == 1) {
42556+ if (atomic_read(&cs->port.count) == 1) {
42557 tty_port_tty_set(&cs->port, tty);
42558 cs->port.low_latency = 1;
42559 }
42560@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
42561
42562 if (!cs->connected)
42563 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
42564- else if (!cs->port.count)
42565+ else if (!atomic_read(&cs->port.count))
42566 dev_warn(cs->dev, "%s: device not opened\n", __func__);
42567- else if (!--cs->port.count)
42568+ else if (!atomic_dec_return(&cs->port.count))
42569 tty_port_tty_set(&cs->port, NULL);
42570
42571 mutex_unlock(&cs->mutex);
42572diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
42573index 8c91fd5..14f13ce 100644
42574--- a/drivers/isdn/gigaset/ser-gigaset.c
42575+++ b/drivers/isdn/gigaset/ser-gigaset.c
42576@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
42577 }
42578
42579 static const struct gigaset_ops ops = {
42580- gigaset_write_cmd,
42581- gigaset_write_room,
42582- gigaset_chars_in_buffer,
42583- gigaset_brkchars,
42584- gigaset_init_bchannel,
42585- gigaset_close_bchannel,
42586- gigaset_initbcshw,
42587- gigaset_freebcshw,
42588- gigaset_reinitbcshw,
42589- gigaset_initcshw,
42590- gigaset_freecshw,
42591- gigaset_set_modem_ctrl,
42592- gigaset_baud_rate,
42593- gigaset_set_line_ctrl,
42594- gigaset_m10x_send_skb, /* asyncdata.c */
42595- gigaset_m10x_input, /* asyncdata.c */
42596+ .write_cmd = gigaset_write_cmd,
42597+ .write_room = gigaset_write_room,
42598+ .chars_in_buffer = gigaset_chars_in_buffer,
42599+ .brkchars = gigaset_brkchars,
42600+ .init_bchannel = gigaset_init_bchannel,
42601+ .close_bchannel = gigaset_close_bchannel,
42602+ .initbcshw = gigaset_initbcshw,
42603+ .freebcshw = gigaset_freebcshw,
42604+ .reinitbcshw = gigaset_reinitbcshw,
42605+ .initcshw = gigaset_initcshw,
42606+ .freecshw = gigaset_freecshw,
42607+ .set_modem_ctrl = gigaset_set_modem_ctrl,
42608+ .baud_rate = gigaset_baud_rate,
42609+ .set_line_ctrl = gigaset_set_line_ctrl,
42610+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
42611+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
42612 };
42613
42614
42615diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
42616index d0a41cb..b953e50 100644
42617--- a/drivers/isdn/gigaset/usb-gigaset.c
42618+++ b/drivers/isdn/gigaset/usb-gigaset.c
42619@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
42620 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
42621 memcpy(cs->hw.usb->bchars, buf, 6);
42622 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
42623- 0, 0, &buf, 6, 2000);
42624+ 0, 0, buf, 6, 2000);
42625 }
42626
42627 static void gigaset_freebcshw(struct bc_state *bcs)
42628@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
42629 }
42630
42631 static const struct gigaset_ops ops = {
42632- gigaset_write_cmd,
42633- gigaset_write_room,
42634- gigaset_chars_in_buffer,
42635- gigaset_brkchars,
42636- gigaset_init_bchannel,
42637- gigaset_close_bchannel,
42638- gigaset_initbcshw,
42639- gigaset_freebcshw,
42640- gigaset_reinitbcshw,
42641- gigaset_initcshw,
42642- gigaset_freecshw,
42643- gigaset_set_modem_ctrl,
42644- gigaset_baud_rate,
42645- gigaset_set_line_ctrl,
42646- gigaset_m10x_send_skb,
42647- gigaset_m10x_input,
42648+ .write_cmd = gigaset_write_cmd,
42649+ .write_room = gigaset_write_room,
42650+ .chars_in_buffer = gigaset_chars_in_buffer,
42651+ .brkchars = gigaset_brkchars,
42652+ .init_bchannel = gigaset_init_bchannel,
42653+ .close_bchannel = gigaset_close_bchannel,
42654+ .initbcshw = gigaset_initbcshw,
42655+ .freebcshw = gigaset_freebcshw,
42656+ .reinitbcshw = gigaset_reinitbcshw,
42657+ .initcshw = gigaset_initcshw,
42658+ .freecshw = gigaset_freecshw,
42659+ .set_modem_ctrl = gigaset_set_modem_ctrl,
42660+ .baud_rate = gigaset_baud_rate,
42661+ .set_line_ctrl = gigaset_set_line_ctrl,
42662+ .send_skb = gigaset_m10x_send_skb,
42663+ .handle_input = gigaset_m10x_input,
42664 };
42665
42666 /*
42667diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
42668index 4d9b195..455075c 100644
42669--- a/drivers/isdn/hardware/avm/b1.c
42670+++ b/drivers/isdn/hardware/avm/b1.c
42671@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
42672 }
42673 if (left) {
42674 if (t4file->user) {
42675- if (copy_from_user(buf, dp, left))
42676+ if (left > sizeof buf || copy_from_user(buf, dp, left))
42677 return -EFAULT;
42678 } else {
42679 memcpy(buf, dp, left);
42680@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
42681 }
42682 if (left) {
42683 if (config->user) {
42684- if (copy_from_user(buf, dp, left))
42685+ if (left > sizeof buf || copy_from_user(buf, dp, left))
42686 return -EFAULT;
42687 } else {
42688 memcpy(buf, dp, left);
42689diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
42690index 9bb12ba..d4262f7 100644
42691--- a/drivers/isdn/i4l/isdn_common.c
42692+++ b/drivers/isdn/i4l/isdn_common.c
42693@@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
42694 } else
42695 return -EINVAL;
42696 case IIOCDBGVAR:
42697+ if (!capable(CAP_SYS_RAWIO))
42698+ return -EPERM;
42699 if (arg) {
42700 if (copy_to_user(argp, &dev, sizeof(ulong)))
42701 return -EFAULT;
42702diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
42703index 91d5730..336523e 100644
42704--- a/drivers/isdn/i4l/isdn_concap.c
42705+++ b/drivers/isdn/i4l/isdn_concap.c
42706@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
42707 }
42708
42709 struct concap_device_ops isdn_concap_reliable_dl_dops = {
42710- &isdn_concap_dl_data_req,
42711- &isdn_concap_dl_connect_req,
42712- &isdn_concap_dl_disconn_req
42713+ .data_req = &isdn_concap_dl_data_req,
42714+ .connect_req = &isdn_concap_dl_connect_req,
42715+ .disconn_req = &isdn_concap_dl_disconn_req
42716 };
42717
42718 /* The following should better go into a dedicated source file such that
42719diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
42720index 3c5f249..5fac4d0 100644
42721--- a/drivers/isdn/i4l/isdn_tty.c
42722+++ b/drivers/isdn/i4l/isdn_tty.c
42723@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
42724
42725 #ifdef ISDN_DEBUG_MODEM_OPEN
42726 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
42727- port->count);
42728+ atomic_read(&port->count));
42729 #endif
42730- port->count++;
42731+ atomic_inc(&port->count);
42732 port->tty = tty;
42733 /*
42734 * Start up serial port
42735@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
42736 #endif
42737 return;
42738 }
42739- if ((tty->count == 1) && (port->count != 1)) {
42740+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
42741 /*
42742 * Uh, oh. tty->count is 1, which means that the tty
42743 * structure will be freed. Info->count should always
42744@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
42745 * serial port won't be shutdown.
42746 */
42747 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
42748- "info->count is %d\n", port->count);
42749- port->count = 1;
42750+ "info->count is %d\n", atomic_read(&port->count));
42751+ atomic_set(&port->count, 1);
42752 }
42753- if (--port->count < 0) {
42754+ if (atomic_dec_return(&port->count) < 0) {
42755 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
42756- info->line, port->count);
42757- port->count = 0;
42758+ info->line, atomic_read(&port->count));
42759+ atomic_set(&port->count, 0);
42760 }
42761- if (port->count) {
42762+ if (atomic_read(&port->count)) {
42763 #ifdef ISDN_DEBUG_MODEM_OPEN
42764 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
42765 #endif
42766@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
42767 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
42768 return;
42769 isdn_tty_shutdown(info);
42770- port->count = 0;
42771+ atomic_set(&port->count, 0);
42772 port->flags &= ~ASYNC_NORMAL_ACTIVE;
42773 port->tty = NULL;
42774 wake_up_interruptible(&port->open_wait);
42775@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
42776 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
42777 modem_info *info = &dev->mdm.info[i];
42778
42779- if (info->port.count == 0)
42780+ if (atomic_read(&info->port.count) == 0)
42781 continue;
42782 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
42783 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
42784diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
42785index e2d4e58..40cd045 100644
42786--- a/drivers/isdn/i4l/isdn_x25iface.c
42787+++ b/drivers/isdn/i4l/isdn_x25iface.c
42788@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
42789
42790
42791 static struct concap_proto_ops ix25_pops = {
42792- &isdn_x25iface_proto_new,
42793- &isdn_x25iface_proto_del,
42794- &isdn_x25iface_proto_restart,
42795- &isdn_x25iface_proto_close,
42796- &isdn_x25iface_xmit,
42797- &isdn_x25iface_receive,
42798- &isdn_x25iface_connect_ind,
42799- &isdn_x25iface_disconn_ind
42800+ .proto_new = &isdn_x25iface_proto_new,
42801+ .proto_del = &isdn_x25iface_proto_del,
42802+ .restart = &isdn_x25iface_proto_restart,
42803+ .close = &isdn_x25iface_proto_close,
42804+ .encap_and_xmit = &isdn_x25iface_xmit,
42805+ .data_ind = &isdn_x25iface_receive,
42806+ .connect_ind = &isdn_x25iface_connect_ind,
42807+ .disconn_ind = &isdn_x25iface_disconn_ind
42808 };
42809
42810 /* error message helper function */
42811diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
42812index 53d487f..f020f41 100644
42813--- a/drivers/isdn/icn/icn.c
42814+++ b/drivers/isdn/icn/icn.c
42815@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
42816 if (count > len)
42817 count = len;
42818 if (user) {
42819- if (copy_from_user(msg, buf, count))
42820+ if (count > sizeof msg || copy_from_user(msg, buf, count))
42821 return -EFAULT;
42822 } else
42823 memcpy(msg, buf, count);
42824diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
42825index a4f05c5..1433bc5 100644
42826--- a/drivers/isdn/mISDN/dsp_cmx.c
42827+++ b/drivers/isdn/mISDN/dsp_cmx.c
42828@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
42829 static u16 dsp_count; /* last sample count */
42830 static int dsp_count_valid; /* if we have last sample count */
42831
42832-void
42833+void __intentional_overflow(-1)
42834 dsp_cmx_send(void *arg)
42835 {
42836 struct dsp_conf *conf;
42837diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
42838index d93e245..e7ece6b 100644
42839--- a/drivers/leds/leds-clevo-mail.c
42840+++ b/drivers/leds/leds-clevo-mail.c
42841@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
42842 * detected as working, but in reality it is not) as low as
42843 * possible.
42844 */
42845-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
42846+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
42847 {
42848 .callback = clevo_mail_led_dmi_callback,
42849 .ident = "Clevo D410J",
42850diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
42851index 5b8f938..b73d657 100644
42852--- a/drivers/leds/leds-ss4200.c
42853+++ b/drivers/leds/leds-ss4200.c
42854@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
42855 * detected as working, but in reality it is not) as low as
42856 * possible.
42857 */
42858-static struct dmi_system_id nas_led_whitelist[] __initdata = {
42859+static struct dmi_system_id nas_led_whitelist[] __initconst = {
42860 {
42861 .callback = ss4200_led_dmi_callback,
42862 .ident = "Intel SS4200-E",
42863diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
42864index 0bf1e4e..b4bf44e 100644
42865--- a/drivers/lguest/core.c
42866+++ b/drivers/lguest/core.c
42867@@ -97,9 +97,17 @@ static __init int map_switcher(void)
42868 * The end address needs +1 because __get_vm_area allocates an
42869 * extra guard page, so we need space for that.
42870 */
42871+
42872+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
42873+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
42874+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
42875+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
42876+#else
42877 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
42878 VM_ALLOC, switcher_addr, switcher_addr
42879 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
42880+#endif
42881+
42882 if (!switcher_vma) {
42883 err = -ENOMEM;
42884 printk("lguest: could not map switcher pages high\n");
42885@@ -124,7 +132,7 @@ static __init int map_switcher(void)
42886 * Now the Switcher is mapped at the right address, we can't fail!
42887 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
42888 */
42889- memcpy(switcher_vma->addr, start_switcher_text,
42890+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
42891 end_switcher_text - start_switcher_text);
42892
42893 printk(KERN_INFO "lguest: mapped switcher at %p\n",
42894diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
42895index bfb39bb..08a603b 100644
42896--- a/drivers/lguest/page_tables.c
42897+++ b/drivers/lguest/page_tables.c
42898@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
42899 /*:*/
42900
42901 #ifdef CONFIG_X86_PAE
42902-static void release_pmd(pmd_t *spmd)
42903+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
42904 {
42905 /* If the entry's not present, there's nothing to release. */
42906 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
42907diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
42908index 922a1ac..9dd0c2a 100644
42909--- a/drivers/lguest/x86/core.c
42910+++ b/drivers/lguest/x86/core.c
42911@@ -59,7 +59,7 @@ static struct {
42912 /* Offset from where switcher.S was compiled to where we've copied it */
42913 static unsigned long switcher_offset(void)
42914 {
42915- return switcher_addr - (unsigned long)start_switcher_text;
42916+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
42917 }
42918
42919 /* This cpu's struct lguest_pages (after the Switcher text page) */
42920@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
42921 * These copies are pretty cheap, so we do them unconditionally: */
42922 /* Save the current Host top-level page directory.
42923 */
42924+
42925+#ifdef CONFIG_PAX_PER_CPU_PGD
42926+ pages->state.host_cr3 = read_cr3();
42927+#else
42928 pages->state.host_cr3 = __pa(current->mm->pgd);
42929+#endif
42930+
42931 /*
42932 * Set up the Guest's page tables to see this CPU's pages (and no
42933 * other CPU's pages).
42934@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
42935 * compiled-in switcher code and the high-mapped copy we just made.
42936 */
42937 for (i = 0; i < IDT_ENTRIES; i++)
42938- default_idt_entries[i] += switcher_offset();
42939+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
42940
42941 /*
42942 * Set up the Switcher's per-cpu areas.
42943@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
42944 * it will be undisturbed when we switch. To change %cs and jump we
42945 * need this structure to feed to Intel's "lcall" instruction.
42946 */
42947- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
42948+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
42949 lguest_entry.segment = LGUEST_CS;
42950
42951 /*
42952diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
42953index 40634b0..4f5855e 100644
42954--- a/drivers/lguest/x86/switcher_32.S
42955+++ b/drivers/lguest/x86/switcher_32.S
42956@@ -87,6 +87,7 @@
42957 #include <asm/page.h>
42958 #include <asm/segment.h>
42959 #include <asm/lguest.h>
42960+#include <asm/processor-flags.h>
42961
42962 // We mark the start of the code to copy
42963 // It's placed in .text tho it's never run here
42964@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
42965 // Changes type when we load it: damn Intel!
42966 // For after we switch over our page tables
42967 // That entry will be read-only: we'd crash.
42968+
42969+#ifdef CONFIG_PAX_KERNEXEC
42970+ mov %cr0, %edx
42971+ xor $X86_CR0_WP, %edx
42972+ mov %edx, %cr0
42973+#endif
42974+
42975 movl $(GDT_ENTRY_TSS*8), %edx
42976 ltr %dx
42977
42978@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
42979 // Let's clear it again for our return.
42980 // The GDT descriptor of the Host
42981 // Points to the table after two "size" bytes
42982- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
42983+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
42984 // Clear "used" from type field (byte 5, bit 2)
42985- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
42986+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
42987+
42988+#ifdef CONFIG_PAX_KERNEXEC
42989+ mov %cr0, %eax
42990+ xor $X86_CR0_WP, %eax
42991+ mov %eax, %cr0
42992+#endif
42993
42994 // Once our page table's switched, the Guest is live!
42995 // The Host fades as we run this final step.
42996@@ -295,13 +309,12 @@ deliver_to_host:
42997 // I consulted gcc, and it gave
42998 // These instructions, which I gladly credit:
42999 leal (%edx,%ebx,8), %eax
43000- movzwl (%eax),%edx
43001- movl 4(%eax), %eax
43002- xorw %ax, %ax
43003- orl %eax, %edx
43004+ movl 4(%eax), %edx
43005+ movw (%eax), %dx
43006 // Now the address of the handler's in %edx
43007 // We call it now: its "iret" drops us home.
43008- jmp *%edx
43009+ ljmp $__KERNEL_CS, $1f
43010+1: jmp *%edx
43011
43012 // Every interrupt can come to us here
43013 // But we must truly tell each apart.
43014diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
43015index 9762f1b..3e79734 100644
43016--- a/drivers/md/bcache/closure.h
43017+++ b/drivers/md/bcache/closure.h
43018@@ -483,7 +483,7 @@ static inline void closure_queue(struct closure *cl)
43019 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
43020 struct workqueue_struct *wq)
43021 {
43022- BUG_ON(object_is_on_stack(cl));
43023+ BUG_ON(object_starts_on_stack(cl));
43024 closure_set_ip(cl);
43025 cl->fn = fn;
43026 cl->wq = wq;
43027diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
43028index 12dc29b..1596277 100644
43029--- a/drivers/md/bitmap.c
43030+++ b/drivers/md/bitmap.c
43031@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
43032 chunk_kb ? "KB" : "B");
43033 if (bitmap->storage.file) {
43034 seq_printf(seq, ", file: ");
43035- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
43036+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
43037 }
43038
43039 seq_printf(seq, "\n");
43040diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
43041index 5152142..623d141 100644
43042--- a/drivers/md/dm-ioctl.c
43043+++ b/drivers/md/dm-ioctl.c
43044@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
43045 cmd == DM_LIST_VERSIONS_CMD)
43046 return 0;
43047
43048- if ((cmd == DM_DEV_CREATE_CMD)) {
43049+ if (cmd == DM_DEV_CREATE_CMD) {
43050 if (!*param->name) {
43051 DMWARN("name not supplied when creating device");
43052 return -EINVAL;
43053diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
43054index 9584443..9fc9ac9 100644
43055--- a/drivers/md/dm-raid1.c
43056+++ b/drivers/md/dm-raid1.c
43057@@ -40,7 +40,7 @@ enum dm_raid1_error {
43058
43059 struct mirror {
43060 struct mirror_set *ms;
43061- atomic_t error_count;
43062+ atomic_unchecked_t error_count;
43063 unsigned long error_type;
43064 struct dm_dev *dev;
43065 sector_t offset;
43066@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
43067 struct mirror *m;
43068
43069 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
43070- if (!atomic_read(&m->error_count))
43071+ if (!atomic_read_unchecked(&m->error_count))
43072 return m;
43073
43074 return NULL;
43075@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
43076 * simple way to tell if a device has encountered
43077 * errors.
43078 */
43079- atomic_inc(&m->error_count);
43080+ atomic_inc_unchecked(&m->error_count);
43081
43082 if (test_and_set_bit(error_type, &m->error_type))
43083 return;
43084@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
43085 struct mirror *m = get_default_mirror(ms);
43086
43087 do {
43088- if (likely(!atomic_read(&m->error_count)))
43089+ if (likely(!atomic_read_unchecked(&m->error_count)))
43090 return m;
43091
43092 if (m-- == ms->mirror)
43093@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
43094 {
43095 struct mirror *default_mirror = get_default_mirror(m->ms);
43096
43097- return !atomic_read(&default_mirror->error_count);
43098+ return !atomic_read_unchecked(&default_mirror->error_count);
43099 }
43100
43101 static int mirror_available(struct mirror_set *ms, struct bio *bio)
43102@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
43103 */
43104 if (likely(region_in_sync(ms, region, 1)))
43105 m = choose_mirror(ms, bio->bi_sector);
43106- else if (m && atomic_read(&m->error_count))
43107+ else if (m && atomic_read_unchecked(&m->error_count))
43108 m = NULL;
43109
43110 if (likely(m))
43111@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
43112 }
43113
43114 ms->mirror[mirror].ms = ms;
43115- atomic_set(&(ms->mirror[mirror].error_count), 0);
43116+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
43117 ms->mirror[mirror].error_type = 0;
43118 ms->mirror[mirror].offset = offset;
43119
43120@@ -1339,7 +1339,7 @@ static void mirror_resume(struct dm_target *ti)
43121 */
43122 static char device_status_char(struct mirror *m)
43123 {
43124- if (!atomic_read(&(m->error_count)))
43125+ if (!atomic_read_unchecked(&(m->error_count)))
43126 return 'A';
43127
43128 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
43129diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
43130index 28a9012..9c0f6a5 100644
43131--- a/drivers/md/dm-stats.c
43132+++ b/drivers/md/dm-stats.c
43133@@ -382,7 +382,7 @@ do_sync_free:
43134 synchronize_rcu_expedited();
43135 dm_stat_free(&s->rcu_head);
43136 } else {
43137- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
43138+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
43139 call_rcu(&s->rcu_head, dm_stat_free);
43140 }
43141 return 0;
43142@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
43143 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
43144 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
43145 ));
43146- ACCESS_ONCE(last->last_sector) = end_sector;
43147- ACCESS_ONCE(last->last_rw) = bi_rw;
43148+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
43149+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
43150 }
43151
43152 rcu_read_lock();
43153diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
43154index 73c1712..7347292 100644
43155--- a/drivers/md/dm-stripe.c
43156+++ b/drivers/md/dm-stripe.c
43157@@ -21,7 +21,7 @@ struct stripe {
43158 struct dm_dev *dev;
43159 sector_t physical_start;
43160
43161- atomic_t error_count;
43162+ atomic_unchecked_t error_count;
43163 };
43164
43165 struct stripe_c {
43166@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
43167 kfree(sc);
43168 return r;
43169 }
43170- atomic_set(&(sc->stripe[i].error_count), 0);
43171+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
43172 }
43173
43174 ti->private = sc;
43175@@ -327,7 +327,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
43176 DMEMIT("%d ", sc->stripes);
43177 for (i = 0; i < sc->stripes; i++) {
43178 DMEMIT("%s ", sc->stripe[i].dev->name);
43179- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
43180+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
43181 'D' : 'A';
43182 }
43183 buffer[i] = '\0';
43184@@ -372,8 +372,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
43185 */
43186 for (i = 0; i < sc->stripes; i++)
43187 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
43188- atomic_inc(&(sc->stripe[i].error_count));
43189- if (atomic_read(&(sc->stripe[i].error_count)) <
43190+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
43191+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
43192 DM_IO_ERROR_THRESHOLD)
43193 schedule_work(&sc->trigger_event);
43194 }
43195diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
43196index 3ba6a38..b0fa9b0 100644
43197--- a/drivers/md/dm-table.c
43198+++ b/drivers/md/dm-table.c
43199@@ -291,7 +291,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
43200 static int open_dev(struct dm_dev_internal *d, dev_t dev,
43201 struct mapped_device *md)
43202 {
43203- static char *_claim_ptr = "I belong to device-mapper";
43204+ static char _claim_ptr[] = "I belong to device-mapper";
43205 struct block_device *bdev;
43206
43207 int r;
43208@@ -359,7 +359,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
43209 if (!dev_size)
43210 return 0;
43211
43212- if ((start >= dev_size) || (start + len > dev_size)) {
43213+ if ((start >= dev_size) || (len > dev_size - start)) {
43214 DMWARN("%s: %s too small for target: "
43215 "start=%llu, len=%llu, dev_size=%llu",
43216 dm_device_name(ti->table->md), bdevname(bdev, b),
43217diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
43218index 8a30ad5..72792d3 100644
43219--- a/drivers/md/dm-thin-metadata.c
43220+++ b/drivers/md/dm-thin-metadata.c
43221@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
43222 {
43223 pmd->info.tm = pmd->tm;
43224 pmd->info.levels = 2;
43225- pmd->info.value_type.context = pmd->data_sm;
43226+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
43227 pmd->info.value_type.size = sizeof(__le64);
43228 pmd->info.value_type.inc = data_block_inc;
43229 pmd->info.value_type.dec = data_block_dec;
43230@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
43231
43232 pmd->bl_info.tm = pmd->tm;
43233 pmd->bl_info.levels = 1;
43234- pmd->bl_info.value_type.context = pmd->data_sm;
43235+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
43236 pmd->bl_info.value_type.size = sizeof(__le64);
43237 pmd->bl_info.value_type.inc = data_block_inc;
43238 pmd->bl_info.value_type.dec = data_block_dec;
43239diff --git a/drivers/md/dm.c b/drivers/md/dm.c
43240index 0704c52..0a33d61 100644
43241--- a/drivers/md/dm.c
43242+++ b/drivers/md/dm.c
43243@@ -185,9 +185,9 @@ struct mapped_device {
43244 /*
43245 * Event handling.
43246 */
43247- atomic_t event_nr;
43248+ atomic_unchecked_t event_nr;
43249 wait_queue_head_t eventq;
43250- atomic_t uevent_seq;
43251+ atomic_unchecked_t uevent_seq;
43252 struct list_head uevent_list;
43253 spinlock_t uevent_lock; /* Protect access to uevent_list */
43254
43255@@ -2021,8 +2021,8 @@ static struct mapped_device *alloc_dev(int minor)
43256 spin_lock_init(&md->deferred_lock);
43257 atomic_set(&md->holders, 1);
43258 atomic_set(&md->open_count, 0);
43259- atomic_set(&md->event_nr, 0);
43260- atomic_set(&md->uevent_seq, 0);
43261+ atomic_set_unchecked(&md->event_nr, 0);
43262+ atomic_set_unchecked(&md->uevent_seq, 0);
43263 INIT_LIST_HEAD(&md->uevent_list);
43264 spin_lock_init(&md->uevent_lock);
43265
43266@@ -2175,7 +2175,7 @@ static void event_callback(void *context)
43267
43268 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
43269
43270- atomic_inc(&md->event_nr);
43271+ atomic_inc_unchecked(&md->event_nr);
43272 wake_up(&md->eventq);
43273 }
43274
43275@@ -2868,18 +2868,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
43276
43277 uint32_t dm_next_uevent_seq(struct mapped_device *md)
43278 {
43279- return atomic_add_return(1, &md->uevent_seq);
43280+ return atomic_add_return_unchecked(1, &md->uevent_seq);
43281 }
43282
43283 uint32_t dm_get_event_nr(struct mapped_device *md)
43284 {
43285- return atomic_read(&md->event_nr);
43286+ return atomic_read_unchecked(&md->event_nr);
43287 }
43288
43289 int dm_wait_event(struct mapped_device *md, int event_nr)
43290 {
43291 return wait_event_interruptible(md->eventq,
43292- (event_nr != atomic_read(&md->event_nr)));
43293+ (event_nr != atomic_read_unchecked(&md->event_nr)));
43294 }
43295
43296 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
43297diff --git a/drivers/md/md.c b/drivers/md/md.c
43298index 369d919..ba7049c 100644
43299--- a/drivers/md/md.c
43300+++ b/drivers/md/md.c
43301@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
43302 * start build, activate spare
43303 */
43304 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
43305-static atomic_t md_event_count;
43306+static atomic_unchecked_t md_event_count;
43307 void md_new_event(struct mddev *mddev)
43308 {
43309- atomic_inc(&md_event_count);
43310+ atomic_inc_unchecked(&md_event_count);
43311 wake_up(&md_event_waiters);
43312 }
43313 EXPORT_SYMBOL_GPL(md_new_event);
43314@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
43315 */
43316 static void md_new_event_inintr(struct mddev *mddev)
43317 {
43318- atomic_inc(&md_event_count);
43319+ atomic_inc_unchecked(&md_event_count);
43320 wake_up(&md_event_waiters);
43321 }
43322
43323@@ -1463,7 +1463,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
43324 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
43325 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
43326 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
43327- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
43328+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
43329
43330 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
43331 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
43332@@ -1710,7 +1710,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
43333 else
43334 sb->resync_offset = cpu_to_le64(0);
43335
43336- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
43337+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
43338
43339 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
43340 sb->size = cpu_to_le64(mddev->dev_sectors);
43341@@ -2715,7 +2715,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
43342 static ssize_t
43343 errors_show(struct md_rdev *rdev, char *page)
43344 {
43345- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
43346+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
43347 }
43348
43349 static ssize_t
43350@@ -2724,7 +2724,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
43351 char *e;
43352 unsigned long n = simple_strtoul(buf, &e, 10);
43353 if (*buf && (*e == 0 || *e == '\n')) {
43354- atomic_set(&rdev->corrected_errors, n);
43355+ atomic_set_unchecked(&rdev->corrected_errors, n);
43356 return len;
43357 }
43358 return -EINVAL;
43359@@ -3173,8 +3173,8 @@ int md_rdev_init(struct md_rdev *rdev)
43360 rdev->sb_loaded = 0;
43361 rdev->bb_page = NULL;
43362 atomic_set(&rdev->nr_pending, 0);
43363- atomic_set(&rdev->read_errors, 0);
43364- atomic_set(&rdev->corrected_errors, 0);
43365+ atomic_set_unchecked(&rdev->read_errors, 0);
43366+ atomic_set_unchecked(&rdev->corrected_errors, 0);
43367
43368 INIT_LIST_HEAD(&rdev->same_set);
43369 init_waitqueue_head(&rdev->blocked_wait);
43370@@ -7038,7 +7038,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
43371
43372 spin_unlock(&pers_lock);
43373 seq_printf(seq, "\n");
43374- seq->poll_event = atomic_read(&md_event_count);
43375+ seq->poll_event = atomic_read_unchecked(&md_event_count);
43376 return 0;
43377 }
43378 if (v == (void*)2) {
43379@@ -7141,7 +7141,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
43380 return error;
43381
43382 seq = file->private_data;
43383- seq->poll_event = atomic_read(&md_event_count);
43384+ seq->poll_event = atomic_read_unchecked(&md_event_count);
43385 return error;
43386 }
43387
43388@@ -7155,7 +7155,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
43389 /* always allow read */
43390 mask = POLLIN | POLLRDNORM;
43391
43392- if (seq->poll_event != atomic_read(&md_event_count))
43393+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
43394 mask |= POLLERR | POLLPRI;
43395 return mask;
43396 }
43397@@ -7199,7 +7199,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
43398 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
43399 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
43400 (int)part_stat_read(&disk->part0, sectors[1]) -
43401- atomic_read(&disk->sync_io);
43402+ atomic_read_unchecked(&disk->sync_io);
43403 /* sync IO will cause sync_io to increase before the disk_stats
43404 * as sync_io is counted when a request starts, and
43405 * disk_stats is counted when it completes.
43406diff --git a/drivers/md/md.h b/drivers/md/md.h
43407index 0095ec8..c89277a 100644
43408--- a/drivers/md/md.h
43409+++ b/drivers/md/md.h
43410@@ -94,13 +94,13 @@ struct md_rdev {
43411 * only maintained for arrays that
43412 * support hot removal
43413 */
43414- atomic_t read_errors; /* number of consecutive read errors that
43415+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
43416 * we have tried to ignore.
43417 */
43418 struct timespec last_read_error; /* monotonic time since our
43419 * last read error
43420 */
43421- atomic_t corrected_errors; /* number of corrected read errors,
43422+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
43423 * for reporting to userspace and storing
43424 * in superblock.
43425 */
43426@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
43427
43428 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
43429 {
43430- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
43431+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
43432 }
43433
43434 struct md_personality
43435diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
43436index 3e6d115..ffecdeb 100644
43437--- a/drivers/md/persistent-data/dm-space-map.h
43438+++ b/drivers/md/persistent-data/dm-space-map.h
43439@@ -71,6 +71,7 @@ struct dm_space_map {
43440 dm_sm_threshold_fn fn,
43441 void *context);
43442 };
43443+typedef struct dm_space_map __no_const dm_space_map_no_const;
43444
43445 /*----------------------------------------------------------------*/
43446
43447diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
43448index a49cfcc..20b9a65 100644
43449--- a/drivers/md/raid1.c
43450+++ b/drivers/md/raid1.c
43451@@ -1921,7 +1921,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
43452 if (r1_sync_page_io(rdev, sect, s,
43453 bio->bi_io_vec[idx].bv_page,
43454 READ) != 0)
43455- atomic_add(s, &rdev->corrected_errors);
43456+ atomic_add_unchecked(s, &rdev->corrected_errors);
43457 }
43458 sectors -= s;
43459 sect += s;
43460@@ -2148,7 +2148,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
43461 test_bit(In_sync, &rdev->flags)) {
43462 if (r1_sync_page_io(rdev, sect, s,
43463 conf->tmppage, READ)) {
43464- atomic_add(s, &rdev->corrected_errors);
43465+ atomic_add_unchecked(s, &rdev->corrected_errors);
43466 printk(KERN_INFO
43467 "md/raid1:%s: read error corrected "
43468 "(%d sectors at %llu on %s)\n",
43469diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
43470index 06eeb99..770613e 100644
43471--- a/drivers/md/raid10.c
43472+++ b/drivers/md/raid10.c
43473@@ -1963,7 +1963,7 @@ static void end_sync_read(struct bio *bio, int error)
43474 /* The write handler will notice the lack of
43475 * R10BIO_Uptodate and record any errors etc
43476 */
43477- atomic_add(r10_bio->sectors,
43478+ atomic_add_unchecked(r10_bio->sectors,
43479 &conf->mirrors[d].rdev->corrected_errors);
43480
43481 /* for reconstruct, we always reschedule after a read.
43482@@ -2321,7 +2321,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
43483 {
43484 struct timespec cur_time_mon;
43485 unsigned long hours_since_last;
43486- unsigned int read_errors = atomic_read(&rdev->read_errors);
43487+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
43488
43489 ktime_get_ts(&cur_time_mon);
43490
43491@@ -2343,9 +2343,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
43492 * overflowing the shift of read_errors by hours_since_last.
43493 */
43494 if (hours_since_last >= 8 * sizeof(read_errors))
43495- atomic_set(&rdev->read_errors, 0);
43496+ atomic_set_unchecked(&rdev->read_errors, 0);
43497 else
43498- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
43499+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
43500 }
43501
43502 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
43503@@ -2399,8 +2399,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
43504 return;
43505
43506 check_decay_read_errors(mddev, rdev);
43507- atomic_inc(&rdev->read_errors);
43508- if (atomic_read(&rdev->read_errors) > max_read_errors) {
43509+ atomic_inc_unchecked(&rdev->read_errors);
43510+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
43511 char b[BDEVNAME_SIZE];
43512 bdevname(rdev->bdev, b);
43513
43514@@ -2408,7 +2408,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
43515 "md/raid10:%s: %s: Raid device exceeded "
43516 "read_error threshold [cur %d:max %d]\n",
43517 mdname(mddev), b,
43518- atomic_read(&rdev->read_errors), max_read_errors);
43519+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
43520 printk(KERN_NOTICE
43521 "md/raid10:%s: %s: Failing raid device\n",
43522 mdname(mddev), b);
43523@@ -2563,7 +2563,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
43524 sect +
43525 choose_data_offset(r10_bio, rdev)),
43526 bdevname(rdev->bdev, b));
43527- atomic_add(s, &rdev->corrected_errors);
43528+ atomic_add_unchecked(s, &rdev->corrected_errors);
43529 }
43530
43531 rdev_dec_pending(rdev, mddev);
43532diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
43533index 03f82ab..374bb38 100644
43534--- a/drivers/md/raid5.c
43535+++ b/drivers/md/raid5.c
43536@@ -1991,21 +1991,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
43537 mdname(conf->mddev), STRIPE_SECTORS,
43538 (unsigned long long)s,
43539 bdevname(rdev->bdev, b));
43540- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
43541+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
43542 clear_bit(R5_ReadError, &sh->dev[i].flags);
43543 clear_bit(R5_ReWrite, &sh->dev[i].flags);
43544 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
43545 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
43546
43547- if (atomic_read(&rdev->read_errors))
43548- atomic_set(&rdev->read_errors, 0);
43549+ if (atomic_read_unchecked(&rdev->read_errors))
43550+ atomic_set_unchecked(&rdev->read_errors, 0);
43551 } else {
43552 const char *bdn = bdevname(rdev->bdev, b);
43553 int retry = 0;
43554 int set_bad = 0;
43555
43556 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
43557- atomic_inc(&rdev->read_errors);
43558+ atomic_inc_unchecked(&rdev->read_errors);
43559 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
43560 printk_ratelimited(
43561 KERN_WARNING
43562@@ -2033,7 +2033,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
43563 mdname(conf->mddev),
43564 (unsigned long long)s,
43565 bdn);
43566- } else if (atomic_read(&rdev->read_errors)
43567+ } else if (atomic_read_unchecked(&rdev->read_errors)
43568 > conf->max_nr_stripes)
43569 printk(KERN_WARNING
43570 "md/raid:%s: Too many read errors, failing device %s.\n",
43571diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
43572index 983db75..ef9248c 100644
43573--- a/drivers/media/dvb-core/dvbdev.c
43574+++ b/drivers/media/dvb-core/dvbdev.c
43575@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
43576 const struct dvb_device *template, void *priv, int type)
43577 {
43578 struct dvb_device *dvbdev;
43579- struct file_operations *dvbdevfops;
43580+ file_operations_no_const *dvbdevfops;
43581 struct device *clsdev;
43582 int minor;
43583 int id;
43584diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
43585index 9b6c3bb..baeb5c7 100644
43586--- a/drivers/media/dvb-frontends/dib3000.h
43587+++ b/drivers/media/dvb-frontends/dib3000.h
43588@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
43589 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
43590 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
43591 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
43592-};
43593+} __no_const;
43594
43595 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
43596 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
43597diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
43598index ed8cb90..5ef7f79 100644
43599--- a/drivers/media/pci/cx88/cx88-video.c
43600+++ b/drivers/media/pci/cx88/cx88-video.c
43601@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
43602
43603 /* ------------------------------------------------------------------ */
43604
43605-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
43606-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
43607-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
43608+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
43609+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
43610+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
43611
43612 module_param_array(video_nr, int, NULL, 0444);
43613 module_param_array(vbi_nr, int, NULL, 0444);
43614diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
43615index 802642d..5534900 100644
43616--- a/drivers/media/pci/ivtv/ivtv-driver.c
43617+++ b/drivers/media/pci/ivtv/ivtv-driver.c
43618@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
43619 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
43620
43621 /* ivtv instance counter */
43622-static atomic_t ivtv_instance = ATOMIC_INIT(0);
43623+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
43624
43625 /* Parameter declarations */
43626 static int cardtype[IVTV_MAX_CARDS];
43627diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
43628index dfd0a21..6bbb465 100644
43629--- a/drivers/media/platform/omap/omap_vout.c
43630+++ b/drivers/media/platform/omap/omap_vout.c
43631@@ -63,7 +63,6 @@ enum omap_vout_channels {
43632 OMAP_VIDEO2,
43633 };
43634
43635-static struct videobuf_queue_ops video_vbq_ops;
43636 /* Variables configurable through module params*/
43637 static u32 video1_numbuffers = 3;
43638 static u32 video2_numbuffers = 3;
43639@@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *file)
43640 {
43641 struct videobuf_queue *q;
43642 struct omap_vout_device *vout = NULL;
43643+ static struct videobuf_queue_ops video_vbq_ops = {
43644+ .buf_setup = omap_vout_buffer_setup,
43645+ .buf_prepare = omap_vout_buffer_prepare,
43646+ .buf_release = omap_vout_buffer_release,
43647+ .buf_queue = omap_vout_buffer_queue,
43648+ };
43649
43650 vout = video_drvdata(file);
43651 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
43652@@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *file)
43653 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
43654
43655 q = &vout->vbq;
43656- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
43657- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
43658- video_vbq_ops.buf_release = omap_vout_buffer_release;
43659- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
43660 spin_lock_init(&vout->vbq_lock);
43661
43662 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
43663diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
43664index fb2acc5..a2fcbdc4 100644
43665--- a/drivers/media/platform/s5p-tv/mixer.h
43666+++ b/drivers/media/platform/s5p-tv/mixer.h
43667@@ -156,7 +156,7 @@ struct mxr_layer {
43668 /** layer index (unique identifier) */
43669 int idx;
43670 /** callbacks for layer methods */
43671- struct mxr_layer_ops ops;
43672+ struct mxr_layer_ops *ops;
43673 /** format array */
43674 const struct mxr_format **fmt_array;
43675 /** size of format array */
43676diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
43677index 74344c7..a39e70e 100644
43678--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
43679+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
43680@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
43681 {
43682 struct mxr_layer *layer;
43683 int ret;
43684- struct mxr_layer_ops ops = {
43685+ static struct mxr_layer_ops ops = {
43686 .release = mxr_graph_layer_release,
43687 .buffer_set = mxr_graph_buffer_set,
43688 .stream_set = mxr_graph_stream_set,
43689diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
43690index b713403..53cb5ad 100644
43691--- a/drivers/media/platform/s5p-tv/mixer_reg.c
43692+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
43693@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
43694 layer->update_buf = next;
43695 }
43696
43697- layer->ops.buffer_set(layer, layer->update_buf);
43698+ layer->ops->buffer_set(layer, layer->update_buf);
43699
43700 if (done && done != layer->shadow_buf)
43701 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
43702diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
43703index 81b97db..b089ccd 100644
43704--- a/drivers/media/platform/s5p-tv/mixer_video.c
43705+++ b/drivers/media/platform/s5p-tv/mixer_video.c
43706@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
43707 layer->geo.src.height = layer->geo.src.full_height;
43708
43709 mxr_geometry_dump(mdev, &layer->geo);
43710- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
43711+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
43712 mxr_geometry_dump(mdev, &layer->geo);
43713 }
43714
43715@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
43716 layer->geo.dst.full_width = mbus_fmt.width;
43717 layer->geo.dst.full_height = mbus_fmt.height;
43718 layer->geo.dst.field = mbus_fmt.field;
43719- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
43720+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
43721
43722 mxr_geometry_dump(mdev, &layer->geo);
43723 }
43724@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
43725 /* set source size to highest accepted value */
43726 geo->src.full_width = max(geo->dst.full_width, pix->width);
43727 geo->src.full_height = max(geo->dst.full_height, pix->height);
43728- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
43729+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
43730 mxr_geometry_dump(mdev, &layer->geo);
43731 /* set cropping to total visible screen */
43732 geo->src.width = pix->width;
43733@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
43734 geo->src.x_offset = 0;
43735 geo->src.y_offset = 0;
43736 /* assure consistency of geometry */
43737- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
43738+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
43739 mxr_geometry_dump(mdev, &layer->geo);
43740 /* set full size to lowest possible value */
43741 geo->src.full_width = 0;
43742 geo->src.full_height = 0;
43743- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
43744+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
43745 mxr_geometry_dump(mdev, &layer->geo);
43746
43747 /* returning results */
43748@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
43749 target->width = s->r.width;
43750 target->height = s->r.height;
43751
43752- layer->ops.fix_geometry(layer, stage, s->flags);
43753+ layer->ops->fix_geometry(layer, stage, s->flags);
43754
43755 /* retrieve update selection rectangle */
43756 res.left = target->x_offset;
43757@@ -955,13 +955,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
43758 mxr_output_get(mdev);
43759
43760 mxr_layer_update_output(layer);
43761- layer->ops.format_set(layer);
43762+ layer->ops->format_set(layer);
43763 /* enabling layer in hardware */
43764 spin_lock_irqsave(&layer->enq_slock, flags);
43765 layer->state = MXR_LAYER_STREAMING;
43766 spin_unlock_irqrestore(&layer->enq_slock, flags);
43767
43768- layer->ops.stream_set(layer, MXR_ENABLE);
43769+ layer->ops->stream_set(layer, MXR_ENABLE);
43770 mxr_streamer_get(mdev);
43771
43772 return 0;
43773@@ -1031,7 +1031,7 @@ static int stop_streaming(struct vb2_queue *vq)
43774 spin_unlock_irqrestore(&layer->enq_slock, flags);
43775
43776 /* disabling layer in hardware */
43777- layer->ops.stream_set(layer, MXR_DISABLE);
43778+ layer->ops->stream_set(layer, MXR_DISABLE);
43779 /* remove one streamer */
43780 mxr_streamer_put(mdev);
43781 /* allow changes in output configuration */
43782@@ -1070,8 +1070,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
43783
43784 void mxr_layer_release(struct mxr_layer *layer)
43785 {
43786- if (layer->ops.release)
43787- layer->ops.release(layer);
43788+ if (layer->ops->release)
43789+ layer->ops->release(layer);
43790 }
43791
43792 void mxr_base_layer_release(struct mxr_layer *layer)
43793@@ -1097,7 +1097,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
43794
43795 layer->mdev = mdev;
43796 layer->idx = idx;
43797- layer->ops = *ops;
43798+ layer->ops = ops;
43799
43800 spin_lock_init(&layer->enq_slock);
43801 INIT_LIST_HEAD(&layer->enq_list);
43802diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
43803index c9388c4..ce71ece 100644
43804--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
43805+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
43806@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
43807 {
43808 struct mxr_layer *layer;
43809 int ret;
43810- struct mxr_layer_ops ops = {
43811+ static struct mxr_layer_ops ops = {
43812 .release = mxr_vp_layer_release,
43813 .buffer_set = mxr_vp_buffer_set,
43814 .stream_set = mxr_vp_stream_set,
43815diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
43816index 2d4e73b..8b4d5b6 100644
43817--- a/drivers/media/platform/vivi.c
43818+++ b/drivers/media/platform/vivi.c
43819@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
43820 MODULE_LICENSE("Dual BSD/GPL");
43821 MODULE_VERSION(VIVI_VERSION);
43822
43823-static unsigned video_nr = -1;
43824-module_param(video_nr, uint, 0644);
43825+static int video_nr = -1;
43826+module_param(video_nr, int, 0644);
43827 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
43828
43829 static unsigned n_devs = 1;
43830diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
43831index 545c04c..a14bded 100644
43832--- a/drivers/media/radio/radio-cadet.c
43833+++ b/drivers/media/radio/radio-cadet.c
43834@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
43835 unsigned char readbuf[RDS_BUFFER];
43836 int i = 0;
43837
43838+ if (count > RDS_BUFFER)
43839+ return -EFAULT;
43840 mutex_lock(&dev->lock);
43841 if (dev->rdsstat == 0)
43842 cadet_start_rds(dev);
43843@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
43844 while (i < count && dev->rdsin != dev->rdsout)
43845 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
43846
43847- if (i && copy_to_user(data, readbuf, i))
43848+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
43849 i = -EFAULT;
43850 unlock:
43851 mutex_unlock(&dev->lock);
43852diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
43853index 5236035..c622c74 100644
43854--- a/drivers/media/radio/radio-maxiradio.c
43855+++ b/drivers/media/radio/radio-maxiradio.c
43856@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
43857 /* TEA5757 pin mappings */
43858 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
43859
43860-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
43861+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
43862
43863 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
43864 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
43865diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
43866index 050b3bb..79f62b9 100644
43867--- a/drivers/media/radio/radio-shark.c
43868+++ b/drivers/media/radio/radio-shark.c
43869@@ -79,7 +79,7 @@ struct shark_device {
43870 u32 last_val;
43871 };
43872
43873-static atomic_t shark_instance = ATOMIC_INIT(0);
43874+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
43875
43876 static void shark_write_val(struct snd_tea575x *tea, u32 val)
43877 {
43878diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
43879index 8654e0d..0608a64 100644
43880--- a/drivers/media/radio/radio-shark2.c
43881+++ b/drivers/media/radio/radio-shark2.c
43882@@ -74,7 +74,7 @@ struct shark_device {
43883 u8 *transfer_buffer;
43884 };
43885
43886-static atomic_t shark_instance = ATOMIC_INIT(0);
43887+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
43888
43889 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
43890 {
43891diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
43892index 2fd9009..278cc1e 100644
43893--- a/drivers/media/radio/radio-si476x.c
43894+++ b/drivers/media/radio/radio-si476x.c
43895@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
43896 struct si476x_radio *radio;
43897 struct v4l2_ctrl *ctrl;
43898
43899- static atomic_t instance = ATOMIC_INIT(0);
43900+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
43901
43902 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
43903 if (!radio)
43904diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
43905index 46da365..3ba4206 100644
43906--- a/drivers/media/rc/rc-main.c
43907+++ b/drivers/media/rc/rc-main.c
43908@@ -1065,7 +1065,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
43909 int rc_register_device(struct rc_dev *dev)
43910 {
43911 static bool raw_init = false; /* raw decoders loaded? */
43912- static atomic_t devno = ATOMIC_INIT(0);
43913+ static atomic_unchecked_t devno = ATOMIC_INIT(0);
43914 struct rc_map *rc_map;
43915 const char *path;
43916 int rc;
43917@@ -1096,7 +1096,7 @@ int rc_register_device(struct rc_dev *dev)
43918 */
43919 mutex_lock(&dev->lock);
43920
43921- dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
43922+ dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
43923 dev_set_name(&dev->dev, "rc%ld", dev->devno);
43924 dev_set_drvdata(&dev->dev, dev);
43925 rc = device_add(&dev->dev);
43926diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
43927index 20e345d..da56fe4 100644
43928--- a/drivers/media/usb/dvb-usb/cxusb.c
43929+++ b/drivers/media/usb/dvb-usb/cxusb.c
43930@@ -1101,7 +1101,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
43931
43932 struct dib0700_adapter_state {
43933 int (*set_param_save) (struct dvb_frontend *);
43934-};
43935+} __no_const;
43936
43937 static int dib7070_set_param_override(struct dvb_frontend *fe)
43938 {
43939diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
43940index c1a63b2..dbcbfb6 100644
43941--- a/drivers/media/usb/dvb-usb/dw2102.c
43942+++ b/drivers/media/usb/dvb-usb/dw2102.c
43943@@ -121,7 +121,7 @@ struct su3000_state {
43944
43945 struct s6x0_state {
43946 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
43947-};
43948+} __no_const;
43949
43950 /* debug */
43951 static int dvb_usb_dw2102_debug;
43952diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
43953index 8f7a6a4..eb0e1d4 100644
43954--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
43955+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
43956@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
43957 __u32 reserved;
43958 };
43959
43960-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
43961+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
43962 enum v4l2_memory memory)
43963 {
43964 void __user *up_pln;
43965@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
43966 return 0;
43967 }
43968
43969-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
43970+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
43971 enum v4l2_memory memory)
43972 {
43973 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
43974@@ -425,7 +425,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
43975 * by passing a very big num_planes value */
43976 uplane = compat_alloc_user_space(num_planes *
43977 sizeof(struct v4l2_plane));
43978- kp->m.planes = uplane;
43979+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
43980
43981 while (--num_planes >= 0) {
43982 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
43983@@ -496,7 +496,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
43984 if (num_planes == 0)
43985 return 0;
43986
43987- uplane = kp->m.planes;
43988+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
43989 if (get_user(p, &up->m.planes))
43990 return -EFAULT;
43991 uplane32 = compat_ptr(p);
43992@@ -550,7 +550,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
43993 get_user(kp->capability, &up->capability) ||
43994 get_user(kp->flags, &up->flags))
43995 return -EFAULT;
43996- kp->base = compat_ptr(tmp);
43997+ kp->base = (void __force_kernel *)compat_ptr(tmp);
43998 get_v4l2_pix_format(&kp->fmt, &up->fmt);
43999 return 0;
44000 }
44001@@ -656,7 +656,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
44002 n * sizeof(struct v4l2_ext_control32)))
44003 return -EFAULT;
44004 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
44005- kp->controls = kcontrols;
44006+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
44007 while (--n >= 0) {
44008 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
44009 return -EFAULT;
44010@@ -678,7 +678,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
44011 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
44012 {
44013 struct v4l2_ext_control32 __user *ucontrols;
44014- struct v4l2_ext_control __user *kcontrols = kp->controls;
44015+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
44016 int n = kp->count;
44017 compat_caddr_t p;
44018
44019@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
44020 put_user(kp->start_block, &up->start_block) ||
44021 put_user(kp->blocks, &up->blocks) ||
44022 put_user(tmp, &up->edid) ||
44023- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
44024+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
44025 return -EFAULT;
44026 return 0;
44027 }
44028diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
44029index fb46790..ae1f8fa 100644
44030--- a/drivers/media/v4l2-core/v4l2-ctrls.c
44031+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
44032@@ -1396,8 +1396,8 @@ static int validate_new(const struct v4l2_ctrl *ctrl,
44033 return 0;
44034
44035 case V4L2_CTRL_TYPE_STRING:
44036- len = strlen(c->string);
44037- if (len < ctrl->minimum)
44038+ len = strlen_user(c->string);
44039+ if (!len || len < ctrl->minimum)
44040 return -ERANGE;
44041 if ((len - ctrl->minimum) % ctrl->step)
44042 return -ERANGE;
44043diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
44044index 02d1b63..5fd6b16 100644
44045--- a/drivers/media/v4l2-core/v4l2-device.c
44046+++ b/drivers/media/v4l2-core/v4l2-device.c
44047@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
44048 EXPORT_SYMBOL_GPL(v4l2_device_put);
44049
44050 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
44051- atomic_t *instance)
44052+ atomic_unchecked_t *instance)
44053 {
44054- int num = atomic_inc_return(instance) - 1;
44055+ int num = atomic_inc_return_unchecked(instance) - 1;
44056 int len = strlen(basename);
44057
44058 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
44059diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
44060index 68e6b5e..8eb2aec 100644
44061--- a/drivers/media/v4l2-core/v4l2-ioctl.c
44062+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
44063@@ -1939,7 +1939,8 @@ struct v4l2_ioctl_info {
44064 struct file *file, void *fh, void *p);
44065 } u;
44066 void (*debug)(const void *arg, bool write_only);
44067-};
44068+} __do_const;
44069+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
44070
44071 /* This control needs a priority check */
44072 #define INFO_FL_PRIO (1 << 0)
44073@@ -2120,7 +2121,7 @@ static long __video_do_ioctl(struct file *file,
44074 struct video_device *vfd = video_devdata(file);
44075 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
44076 bool write_only = false;
44077- struct v4l2_ioctl_info default_info;
44078+ v4l2_ioctl_info_no_const default_info;
44079 const struct v4l2_ioctl_info *info;
44080 void *fh = file->private_data;
44081 struct v4l2_fh *vfh = NULL;
44082@@ -2194,7 +2195,7 @@ done:
44083 }
44084
44085 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
44086- void * __user *user_ptr, void ***kernel_ptr)
44087+ void __user **user_ptr, void ***kernel_ptr)
44088 {
44089 int ret = 0;
44090
44091@@ -2210,7 +2211,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
44092 ret = -EINVAL;
44093 break;
44094 }
44095- *user_ptr = (void __user *)buf->m.planes;
44096+ *user_ptr = (void __force_user *)buf->m.planes;
44097 *kernel_ptr = (void *)&buf->m.planes;
44098 *array_size = sizeof(struct v4l2_plane) * buf->length;
44099 ret = 1;
44100@@ -2245,7 +2246,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
44101 ret = -EINVAL;
44102 break;
44103 }
44104- *user_ptr = (void __user *)ctrls->controls;
44105+ *user_ptr = (void __force_user *)ctrls->controls;
44106 *kernel_ptr = (void *)&ctrls->controls;
44107 *array_size = sizeof(struct v4l2_ext_control)
44108 * ctrls->count;
44109@@ -2340,7 +2341,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
44110 err = -ENOTTY;
44111
44112 if (has_array_args) {
44113- *kernel_ptr = user_ptr;
44114+ *kernel_ptr = (void __force_kernel *)user_ptr;
44115 if (copy_to_user(user_ptr, mbuf, array_size))
44116 err = -EFAULT;
44117 goto out_array_args;
44118diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
44119index 767ff4d..c69d259 100644
44120--- a/drivers/message/fusion/mptbase.c
44121+++ b/drivers/message/fusion/mptbase.c
44122@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
44123 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
44124 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
44125
44126+#ifdef CONFIG_GRKERNSEC_HIDESYM
44127+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
44128+#else
44129 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
44130 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
44131+#endif
44132+
44133 /*
44134 * Rounding UP to nearest 4-kB boundary here...
44135 */
44136@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
44137 ioc->facts.GlobalCredits);
44138
44139 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
44140+#ifdef CONFIG_GRKERNSEC_HIDESYM
44141+ NULL, NULL);
44142+#else
44143 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
44144+#endif
44145 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
44146 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
44147 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
44148diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
44149index dd239bd..689c4f7 100644
44150--- a/drivers/message/fusion/mptsas.c
44151+++ b/drivers/message/fusion/mptsas.c
44152@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
44153 return 0;
44154 }
44155
44156+static inline void
44157+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
44158+{
44159+ if (phy_info->port_details) {
44160+ phy_info->port_details->rphy = rphy;
44161+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
44162+ ioc->name, rphy));
44163+ }
44164+
44165+ if (rphy) {
44166+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
44167+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
44168+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
44169+ ioc->name, rphy, rphy->dev.release));
44170+ }
44171+}
44172+
44173 /* no mutex */
44174 static void
44175 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
44176@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
44177 return NULL;
44178 }
44179
44180-static inline void
44181-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
44182-{
44183- if (phy_info->port_details) {
44184- phy_info->port_details->rphy = rphy;
44185- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
44186- ioc->name, rphy));
44187- }
44188-
44189- if (rphy) {
44190- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
44191- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
44192- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
44193- ioc->name, rphy, rphy->dev.release));
44194- }
44195-}
44196-
44197 static inline struct sas_port *
44198 mptsas_get_port(struct mptsas_phyinfo *phy_info)
44199 {
44200diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
44201index 727819c..ad74694 100644
44202--- a/drivers/message/fusion/mptscsih.c
44203+++ b/drivers/message/fusion/mptscsih.c
44204@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
44205
44206 h = shost_priv(SChost);
44207
44208- if (h) {
44209- if (h->info_kbuf == NULL)
44210- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
44211- return h->info_kbuf;
44212- h->info_kbuf[0] = '\0';
44213+ if (!h)
44214+ return NULL;
44215
44216- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
44217- h->info_kbuf[size-1] = '\0';
44218- }
44219+ if (h->info_kbuf == NULL)
44220+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
44221+ return h->info_kbuf;
44222+ h->info_kbuf[0] = '\0';
44223+
44224+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
44225+ h->info_kbuf[size-1] = '\0';
44226
44227 return h->info_kbuf;
44228 }
44229diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
44230index b7d87cd..3fb36da 100644
44231--- a/drivers/message/i2o/i2o_proc.c
44232+++ b/drivers/message/i2o/i2o_proc.c
44233@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
44234 "Array Controller Device"
44235 };
44236
44237-static char *chtostr(char *tmp, u8 *chars, int n)
44238-{
44239- tmp[0] = 0;
44240- return strncat(tmp, (char *)chars, n);
44241-}
44242-
44243 static int i2o_report_query_status(struct seq_file *seq, int block_status,
44244 char *group)
44245 {
44246@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
44247 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
44248 {
44249 struct i2o_controller *c = (struct i2o_controller *)seq->private;
44250- static u32 work32[5];
44251- static u8 *work8 = (u8 *) work32;
44252- static u16 *work16 = (u16 *) work32;
44253+ u32 work32[5];
44254+ u8 *work8 = (u8 *) work32;
44255+ u16 *work16 = (u16 *) work32;
44256 int token;
44257 u32 hwcap;
44258
44259@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
44260 } *result;
44261
44262 i2o_exec_execute_ddm_table ddm_table;
44263- char tmp[28 + 1];
44264
44265 result = kmalloc(sizeof(*result), GFP_KERNEL);
44266 if (!result)
44267@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
44268
44269 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
44270 seq_printf(seq, "%-#8x", ddm_table.module_id);
44271- seq_printf(seq, "%-29s",
44272- chtostr(tmp, ddm_table.module_name_version, 28));
44273+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
44274 seq_printf(seq, "%9d ", ddm_table.data_size);
44275 seq_printf(seq, "%8d", ddm_table.code_size);
44276
44277@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
44278
44279 i2o_driver_result_table *result;
44280 i2o_driver_store_table *dst;
44281- char tmp[28 + 1];
44282
44283 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
44284 if (result == NULL)
44285@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
44286
44287 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
44288 seq_printf(seq, "%-#8x", dst->module_id);
44289- seq_printf(seq, "%-29s",
44290- chtostr(tmp, dst->module_name_version, 28));
44291- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
44292+ seq_printf(seq, "%-.28s", dst->module_name_version);
44293+ seq_printf(seq, "%-.8s", dst->date);
44294 seq_printf(seq, "%8d ", dst->module_size);
44295 seq_printf(seq, "%8d ", dst->mpb_size);
44296 seq_printf(seq, "0x%04x", dst->module_flags);
44297@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
44298 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
44299 {
44300 struct i2o_device *d = (struct i2o_device *)seq->private;
44301- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
44302+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
44303 // == (allow) 512d bytes (max)
44304- static u16 *work16 = (u16 *) work32;
44305+ u16 *work16 = (u16 *) work32;
44306 int token;
44307- char tmp[16 + 1];
44308
44309 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
44310
44311@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
44312 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
44313 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
44314 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
44315- seq_printf(seq, "Vendor info : %s\n",
44316- chtostr(tmp, (u8 *) (work32 + 2), 16));
44317- seq_printf(seq, "Product info : %s\n",
44318- chtostr(tmp, (u8 *) (work32 + 6), 16));
44319- seq_printf(seq, "Description : %s\n",
44320- chtostr(tmp, (u8 *) (work32 + 10), 16));
44321- seq_printf(seq, "Product rev. : %s\n",
44322- chtostr(tmp, (u8 *) (work32 + 14), 8));
44323+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
44324+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
44325+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
44326+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
44327
44328 seq_printf(seq, "Serial number : ");
44329 print_serial_number(seq, (u8 *) (work32 + 16),
44330@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
44331 u8 pad[256]; // allow up to 256 byte (max) serial number
44332 } result;
44333
44334- char tmp[24 + 1];
44335-
44336 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
44337
44338 if (token < 0) {
44339@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
44340 }
44341
44342 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
44343- seq_printf(seq, "Module name : %s\n",
44344- chtostr(tmp, result.module_name, 24));
44345- seq_printf(seq, "Module revision : %s\n",
44346- chtostr(tmp, result.module_rev, 8));
44347+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
44348+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
44349
44350 seq_printf(seq, "Serial number : ");
44351 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
44352@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
44353 u8 instance_number[4];
44354 } result;
44355
44356- char tmp[64 + 1];
44357-
44358 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
44359
44360 if (token < 0) {
44361@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
44362 return 0;
44363 }
44364
44365- seq_printf(seq, "Device name : %s\n",
44366- chtostr(tmp, result.device_name, 64));
44367- seq_printf(seq, "Service name : %s\n",
44368- chtostr(tmp, result.service_name, 64));
44369- seq_printf(seq, "Physical name : %s\n",
44370- chtostr(tmp, result.physical_location, 64));
44371- seq_printf(seq, "Instance number : %s\n",
44372- chtostr(tmp, result.instance_number, 4));
44373+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
44374+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
44375+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
44376+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
44377
44378 return 0;
44379 }
44380@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
44381 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
44382 {
44383 struct i2o_device *d = (struct i2o_device *)seq->private;
44384- static u32 work32[12];
44385- static u16 *work16 = (u16 *) work32;
44386- static u8 *work8 = (u8 *) work32;
44387+ u32 work32[12];
44388+ u16 *work16 = (u16 *) work32;
44389+ u8 *work8 = (u8 *) work32;
44390 int token;
44391
44392 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
44393diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
44394index a8c08f3..155fe3d 100644
44395--- a/drivers/message/i2o/iop.c
44396+++ b/drivers/message/i2o/iop.c
44397@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
44398
44399 spin_lock_irqsave(&c->context_list_lock, flags);
44400
44401- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
44402- atomic_inc(&c->context_list_counter);
44403+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
44404+ atomic_inc_unchecked(&c->context_list_counter);
44405
44406- entry->context = atomic_read(&c->context_list_counter);
44407+ entry->context = atomic_read_unchecked(&c->context_list_counter);
44408
44409 list_add(&entry->list, &c->context_list);
44410
44411@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
44412
44413 #if BITS_PER_LONG == 64
44414 spin_lock_init(&c->context_list_lock);
44415- atomic_set(&c->context_list_counter, 0);
44416+ atomic_set_unchecked(&c->context_list_counter, 0);
44417 INIT_LIST_HEAD(&c->context_list);
44418 #endif
44419
44420diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
44421index fcbb2e9..2635e11 100644
44422--- a/drivers/mfd/janz-cmodio.c
44423+++ b/drivers/mfd/janz-cmodio.c
44424@@ -13,6 +13,7 @@
44425
44426 #include <linux/kernel.h>
44427 #include <linux/module.h>
44428+#include <linux/slab.h>
44429 #include <linux/init.h>
44430 #include <linux/pci.h>
44431 #include <linux/interrupt.h>
44432diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
44433index 176aa26..27811b2 100644
44434--- a/drivers/mfd/max8925-i2c.c
44435+++ b/drivers/mfd/max8925-i2c.c
44436@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
44437 const struct i2c_device_id *id)
44438 {
44439 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
44440- static struct max8925_chip *chip;
44441+ struct max8925_chip *chip;
44442 struct device_node *node = client->dev.of_node;
44443
44444 if (node && !pdata) {
44445diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
44446index c0f608e..286f8ec 100644
44447--- a/drivers/mfd/tps65910.c
44448+++ b/drivers/mfd/tps65910.c
44449@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
44450 struct tps65910_platform_data *pdata)
44451 {
44452 int ret = 0;
44453- static struct regmap_irq_chip *tps6591x_irqs_chip;
44454+ struct regmap_irq_chip *tps6591x_irqs_chip;
44455
44456 if (!irq) {
44457 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
44458diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
44459index 9aa6d1e..1631bfc 100644
44460--- a/drivers/mfd/twl4030-irq.c
44461+++ b/drivers/mfd/twl4030-irq.c
44462@@ -35,6 +35,7 @@
44463 #include <linux/of.h>
44464 #include <linux/irqdomain.h>
44465 #include <linux/i2c/twl.h>
44466+#include <asm/pgtable.h>
44467
44468 #include "twl-core.h"
44469
44470@@ -726,10 +727,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
44471 * Install an irq handler for each of the SIH modules;
44472 * clone dummy irq_chip since PIH can't *do* anything
44473 */
44474- twl4030_irq_chip = dummy_irq_chip;
44475- twl4030_irq_chip.name = "twl4030";
44476+ pax_open_kernel();
44477+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
44478+ *(const char **)&twl4030_irq_chip.name = "twl4030";
44479
44480- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
44481+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
44482+ pax_close_kernel();
44483
44484 for (i = irq_base; i < irq_end; i++) {
44485 irq_set_chip_and_handler(i, &twl4030_irq_chip,
44486diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
44487index 464419b..64bae8d 100644
44488--- a/drivers/misc/c2port/core.c
44489+++ b/drivers/misc/c2port/core.c
44490@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
44491 goto error_idr_alloc;
44492 c2dev->id = ret;
44493
44494- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
44495+ pax_open_kernel();
44496+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
44497+ pax_close_kernel();
44498
44499 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
44500 "c2port%d", c2dev->id);
44501diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
44502index 9c34e57..b981cda 100644
44503--- a/drivers/misc/eeprom/sunxi_sid.c
44504+++ b/drivers/misc/eeprom/sunxi_sid.c
44505@@ -127,7 +127,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
44506
44507 platform_set_drvdata(pdev, sid_data);
44508
44509- sid_bin_attr.size = sid_data->keysize;
44510+ pax_open_kernel();
44511+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
44512+ pax_close_kernel();
44513 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
44514 return -ENODEV;
44515
44516diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
44517index 36f5d52..32311c3 100644
44518--- a/drivers/misc/kgdbts.c
44519+++ b/drivers/misc/kgdbts.c
44520@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
44521 char before[BREAK_INSTR_SIZE];
44522 char after[BREAK_INSTR_SIZE];
44523
44524- probe_kernel_read(before, (char *)kgdbts_break_test,
44525+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
44526 BREAK_INSTR_SIZE);
44527 init_simple_test();
44528 ts.tst = plant_and_detach_test;
44529@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
44530 /* Activate test with initial breakpoint */
44531 if (!is_early)
44532 kgdb_breakpoint();
44533- probe_kernel_read(after, (char *)kgdbts_break_test,
44534+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
44535 BREAK_INSTR_SIZE);
44536 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
44537 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
44538diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
44539index 036effe..b3a6336 100644
44540--- a/drivers/misc/lis3lv02d/lis3lv02d.c
44541+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
44542@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
44543 * the lid is closed. This leads to interrupts as soon as a little move
44544 * is done.
44545 */
44546- atomic_inc(&lis3->count);
44547+ atomic_inc_unchecked(&lis3->count);
44548
44549 wake_up_interruptible(&lis3->misc_wait);
44550 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
44551@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
44552 if (lis3->pm_dev)
44553 pm_runtime_get_sync(lis3->pm_dev);
44554
44555- atomic_set(&lis3->count, 0);
44556+ atomic_set_unchecked(&lis3->count, 0);
44557 return 0;
44558 }
44559
44560@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
44561 add_wait_queue(&lis3->misc_wait, &wait);
44562 while (true) {
44563 set_current_state(TASK_INTERRUPTIBLE);
44564- data = atomic_xchg(&lis3->count, 0);
44565+ data = atomic_xchg_unchecked(&lis3->count, 0);
44566 if (data)
44567 break;
44568
44569@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
44570 struct lis3lv02d, miscdev);
44571
44572 poll_wait(file, &lis3->misc_wait, wait);
44573- if (atomic_read(&lis3->count))
44574+ if (atomic_read_unchecked(&lis3->count))
44575 return POLLIN | POLLRDNORM;
44576 return 0;
44577 }
44578diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
44579index c439c82..1f20f57 100644
44580--- a/drivers/misc/lis3lv02d/lis3lv02d.h
44581+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
44582@@ -297,7 +297,7 @@ struct lis3lv02d {
44583 struct input_polled_dev *idev; /* input device */
44584 struct platform_device *pdev; /* platform device */
44585 struct regulator_bulk_data regulators[2];
44586- atomic_t count; /* interrupt count after last read */
44587+ atomic_unchecked_t count; /* interrupt count after last read */
44588 union axis_conversion ac; /* hw -> logical axis */
44589 int mapped_btns[3];
44590
44591diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
44592index 2f30bad..c4c13d0 100644
44593--- a/drivers/misc/sgi-gru/gruhandles.c
44594+++ b/drivers/misc/sgi-gru/gruhandles.c
44595@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
44596 unsigned long nsec;
44597
44598 nsec = CLKS2NSEC(clks);
44599- atomic_long_inc(&mcs_op_statistics[op].count);
44600- atomic_long_add(nsec, &mcs_op_statistics[op].total);
44601+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
44602+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
44603 if (mcs_op_statistics[op].max < nsec)
44604 mcs_op_statistics[op].max = nsec;
44605 }
44606diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
44607index 4f76359..cdfcb2e 100644
44608--- a/drivers/misc/sgi-gru/gruprocfs.c
44609+++ b/drivers/misc/sgi-gru/gruprocfs.c
44610@@ -32,9 +32,9 @@
44611
44612 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
44613
44614-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
44615+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
44616 {
44617- unsigned long val = atomic_long_read(v);
44618+ unsigned long val = atomic_long_read_unchecked(v);
44619
44620 seq_printf(s, "%16lu %s\n", val, id);
44621 }
44622@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
44623
44624 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
44625 for (op = 0; op < mcsop_last; op++) {
44626- count = atomic_long_read(&mcs_op_statistics[op].count);
44627- total = atomic_long_read(&mcs_op_statistics[op].total);
44628+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
44629+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
44630 max = mcs_op_statistics[op].max;
44631 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
44632 count ? total / count : 0, max);
44633diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
44634index 5c3ce24..4915ccb 100644
44635--- a/drivers/misc/sgi-gru/grutables.h
44636+++ b/drivers/misc/sgi-gru/grutables.h
44637@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
44638 * GRU statistics.
44639 */
44640 struct gru_stats_s {
44641- atomic_long_t vdata_alloc;
44642- atomic_long_t vdata_free;
44643- atomic_long_t gts_alloc;
44644- atomic_long_t gts_free;
44645- atomic_long_t gms_alloc;
44646- atomic_long_t gms_free;
44647- atomic_long_t gts_double_allocate;
44648- atomic_long_t assign_context;
44649- atomic_long_t assign_context_failed;
44650- atomic_long_t free_context;
44651- atomic_long_t load_user_context;
44652- atomic_long_t load_kernel_context;
44653- atomic_long_t lock_kernel_context;
44654- atomic_long_t unlock_kernel_context;
44655- atomic_long_t steal_user_context;
44656- atomic_long_t steal_kernel_context;
44657- atomic_long_t steal_context_failed;
44658- atomic_long_t nopfn;
44659- atomic_long_t asid_new;
44660- atomic_long_t asid_next;
44661- atomic_long_t asid_wrap;
44662- atomic_long_t asid_reuse;
44663- atomic_long_t intr;
44664- atomic_long_t intr_cbr;
44665- atomic_long_t intr_tfh;
44666- atomic_long_t intr_spurious;
44667- atomic_long_t intr_mm_lock_failed;
44668- atomic_long_t call_os;
44669- atomic_long_t call_os_wait_queue;
44670- atomic_long_t user_flush_tlb;
44671- atomic_long_t user_unload_context;
44672- atomic_long_t user_exception;
44673- atomic_long_t set_context_option;
44674- atomic_long_t check_context_retarget_intr;
44675- atomic_long_t check_context_unload;
44676- atomic_long_t tlb_dropin;
44677- atomic_long_t tlb_preload_page;
44678- atomic_long_t tlb_dropin_fail_no_asid;
44679- atomic_long_t tlb_dropin_fail_upm;
44680- atomic_long_t tlb_dropin_fail_invalid;
44681- atomic_long_t tlb_dropin_fail_range_active;
44682- atomic_long_t tlb_dropin_fail_idle;
44683- atomic_long_t tlb_dropin_fail_fmm;
44684- atomic_long_t tlb_dropin_fail_no_exception;
44685- atomic_long_t tfh_stale_on_fault;
44686- atomic_long_t mmu_invalidate_range;
44687- atomic_long_t mmu_invalidate_page;
44688- atomic_long_t flush_tlb;
44689- atomic_long_t flush_tlb_gru;
44690- atomic_long_t flush_tlb_gru_tgh;
44691- atomic_long_t flush_tlb_gru_zero_asid;
44692+ atomic_long_unchecked_t vdata_alloc;
44693+ atomic_long_unchecked_t vdata_free;
44694+ atomic_long_unchecked_t gts_alloc;
44695+ atomic_long_unchecked_t gts_free;
44696+ atomic_long_unchecked_t gms_alloc;
44697+ atomic_long_unchecked_t gms_free;
44698+ atomic_long_unchecked_t gts_double_allocate;
44699+ atomic_long_unchecked_t assign_context;
44700+ atomic_long_unchecked_t assign_context_failed;
44701+ atomic_long_unchecked_t free_context;
44702+ atomic_long_unchecked_t load_user_context;
44703+ atomic_long_unchecked_t load_kernel_context;
44704+ atomic_long_unchecked_t lock_kernel_context;
44705+ atomic_long_unchecked_t unlock_kernel_context;
44706+ atomic_long_unchecked_t steal_user_context;
44707+ atomic_long_unchecked_t steal_kernel_context;
44708+ atomic_long_unchecked_t steal_context_failed;
44709+ atomic_long_unchecked_t nopfn;
44710+ atomic_long_unchecked_t asid_new;
44711+ atomic_long_unchecked_t asid_next;
44712+ atomic_long_unchecked_t asid_wrap;
44713+ atomic_long_unchecked_t asid_reuse;
44714+ atomic_long_unchecked_t intr;
44715+ atomic_long_unchecked_t intr_cbr;
44716+ atomic_long_unchecked_t intr_tfh;
44717+ atomic_long_unchecked_t intr_spurious;
44718+ atomic_long_unchecked_t intr_mm_lock_failed;
44719+ atomic_long_unchecked_t call_os;
44720+ atomic_long_unchecked_t call_os_wait_queue;
44721+ atomic_long_unchecked_t user_flush_tlb;
44722+ atomic_long_unchecked_t user_unload_context;
44723+ atomic_long_unchecked_t user_exception;
44724+ atomic_long_unchecked_t set_context_option;
44725+ atomic_long_unchecked_t check_context_retarget_intr;
44726+ atomic_long_unchecked_t check_context_unload;
44727+ atomic_long_unchecked_t tlb_dropin;
44728+ atomic_long_unchecked_t tlb_preload_page;
44729+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
44730+ atomic_long_unchecked_t tlb_dropin_fail_upm;
44731+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
44732+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
44733+ atomic_long_unchecked_t tlb_dropin_fail_idle;
44734+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
44735+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
44736+ atomic_long_unchecked_t tfh_stale_on_fault;
44737+ atomic_long_unchecked_t mmu_invalidate_range;
44738+ atomic_long_unchecked_t mmu_invalidate_page;
44739+ atomic_long_unchecked_t flush_tlb;
44740+ atomic_long_unchecked_t flush_tlb_gru;
44741+ atomic_long_unchecked_t flush_tlb_gru_tgh;
44742+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
44743
44744- atomic_long_t copy_gpa;
44745- atomic_long_t read_gpa;
44746+ atomic_long_unchecked_t copy_gpa;
44747+ atomic_long_unchecked_t read_gpa;
44748
44749- atomic_long_t mesq_receive;
44750- atomic_long_t mesq_receive_none;
44751- atomic_long_t mesq_send;
44752- atomic_long_t mesq_send_failed;
44753- atomic_long_t mesq_noop;
44754- atomic_long_t mesq_send_unexpected_error;
44755- atomic_long_t mesq_send_lb_overflow;
44756- atomic_long_t mesq_send_qlimit_reached;
44757- atomic_long_t mesq_send_amo_nacked;
44758- atomic_long_t mesq_send_put_nacked;
44759- atomic_long_t mesq_page_overflow;
44760- atomic_long_t mesq_qf_locked;
44761- atomic_long_t mesq_qf_noop_not_full;
44762- atomic_long_t mesq_qf_switch_head_failed;
44763- atomic_long_t mesq_qf_unexpected_error;
44764- atomic_long_t mesq_noop_unexpected_error;
44765- atomic_long_t mesq_noop_lb_overflow;
44766- atomic_long_t mesq_noop_qlimit_reached;
44767- atomic_long_t mesq_noop_amo_nacked;
44768- atomic_long_t mesq_noop_put_nacked;
44769- atomic_long_t mesq_noop_page_overflow;
44770+ atomic_long_unchecked_t mesq_receive;
44771+ atomic_long_unchecked_t mesq_receive_none;
44772+ atomic_long_unchecked_t mesq_send;
44773+ atomic_long_unchecked_t mesq_send_failed;
44774+ atomic_long_unchecked_t mesq_noop;
44775+ atomic_long_unchecked_t mesq_send_unexpected_error;
44776+ atomic_long_unchecked_t mesq_send_lb_overflow;
44777+ atomic_long_unchecked_t mesq_send_qlimit_reached;
44778+ atomic_long_unchecked_t mesq_send_amo_nacked;
44779+ atomic_long_unchecked_t mesq_send_put_nacked;
44780+ atomic_long_unchecked_t mesq_page_overflow;
44781+ atomic_long_unchecked_t mesq_qf_locked;
44782+ atomic_long_unchecked_t mesq_qf_noop_not_full;
44783+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
44784+ atomic_long_unchecked_t mesq_qf_unexpected_error;
44785+ atomic_long_unchecked_t mesq_noop_unexpected_error;
44786+ atomic_long_unchecked_t mesq_noop_lb_overflow;
44787+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
44788+ atomic_long_unchecked_t mesq_noop_amo_nacked;
44789+ atomic_long_unchecked_t mesq_noop_put_nacked;
44790+ atomic_long_unchecked_t mesq_noop_page_overflow;
44791
44792 };
44793
44794@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
44795 tghop_invalidate, mcsop_last};
44796
44797 struct mcs_op_statistic {
44798- atomic_long_t count;
44799- atomic_long_t total;
44800+ atomic_long_unchecked_t count;
44801+ atomic_long_unchecked_t total;
44802 unsigned long max;
44803 };
44804
44805@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
44806
44807 #define STAT(id) do { \
44808 if (gru_options & OPT_STATS) \
44809- atomic_long_inc(&gru_stats.id); \
44810+ atomic_long_inc_unchecked(&gru_stats.id); \
44811 } while (0)
44812
44813 #ifdef CONFIG_SGI_GRU_DEBUG
44814diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
44815index c862cd4..0d176fe 100644
44816--- a/drivers/misc/sgi-xp/xp.h
44817+++ b/drivers/misc/sgi-xp/xp.h
44818@@ -288,7 +288,7 @@ struct xpc_interface {
44819 xpc_notify_func, void *);
44820 void (*received) (short, int, void *);
44821 enum xp_retval (*partid_to_nasids) (short, void *);
44822-};
44823+} __no_const;
44824
44825 extern struct xpc_interface xpc_interface;
44826
44827diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
44828index 01be66d..e3a0c7e 100644
44829--- a/drivers/misc/sgi-xp/xp_main.c
44830+++ b/drivers/misc/sgi-xp/xp_main.c
44831@@ -78,13 +78,13 @@ xpc_notloaded(void)
44832 }
44833
44834 struct xpc_interface xpc_interface = {
44835- (void (*)(int))xpc_notloaded,
44836- (void (*)(int))xpc_notloaded,
44837- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
44838- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
44839+ .connect = (void (*)(int))xpc_notloaded,
44840+ .disconnect = (void (*)(int))xpc_notloaded,
44841+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
44842+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
44843 void *))xpc_notloaded,
44844- (void (*)(short, int, void *))xpc_notloaded,
44845- (enum xp_retval(*)(short, void *))xpc_notloaded
44846+ .received = (void (*)(short, int, void *))xpc_notloaded,
44847+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
44848 };
44849 EXPORT_SYMBOL_GPL(xpc_interface);
44850
44851diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
44852index b94d5f7..7f494c5 100644
44853--- a/drivers/misc/sgi-xp/xpc.h
44854+++ b/drivers/misc/sgi-xp/xpc.h
44855@@ -835,6 +835,7 @@ struct xpc_arch_operations {
44856 void (*received_payload) (struct xpc_channel *, void *);
44857 void (*notify_senders_of_disconnect) (struct xpc_channel *);
44858 };
44859+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
44860
44861 /* struct xpc_partition act_state values (for XPC HB) */
44862
44863@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
44864 /* found in xpc_main.c */
44865 extern struct device *xpc_part;
44866 extern struct device *xpc_chan;
44867-extern struct xpc_arch_operations xpc_arch_ops;
44868+extern xpc_arch_operations_no_const xpc_arch_ops;
44869 extern int xpc_disengage_timelimit;
44870 extern int xpc_disengage_timedout;
44871 extern int xpc_activate_IRQ_rcvd;
44872diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
44873index 82dc574..8539ab2 100644
44874--- a/drivers/misc/sgi-xp/xpc_main.c
44875+++ b/drivers/misc/sgi-xp/xpc_main.c
44876@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
44877 .notifier_call = xpc_system_die,
44878 };
44879
44880-struct xpc_arch_operations xpc_arch_ops;
44881+xpc_arch_operations_no_const xpc_arch_ops;
44882
44883 /*
44884 * Timer function to enforce the timelimit on the partition disengage.
44885@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
44886
44887 if (((die_args->trapnr == X86_TRAP_MF) ||
44888 (die_args->trapnr == X86_TRAP_XF)) &&
44889- !user_mode_vm(die_args->regs))
44890+ !user_mode(die_args->regs))
44891 xpc_die_deactivate();
44892
44893 break;
44894diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
44895index 29d5d98..fea356f 100644
44896--- a/drivers/mmc/card/block.c
44897+++ b/drivers/mmc/card/block.c
44898@@ -575,7 +575,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
44899 if (idata->ic.postsleep_min_us)
44900 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
44901
44902- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
44903+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
44904 err = -EFAULT;
44905 goto cmd_rel_host;
44906 }
44907diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
44908index e5b5eeb..7bf2212 100644
44909--- a/drivers/mmc/core/mmc_ops.c
44910+++ b/drivers/mmc/core/mmc_ops.c
44911@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
44912 void *data_buf;
44913 int is_on_stack;
44914
44915- is_on_stack = object_is_on_stack(buf);
44916+ is_on_stack = object_starts_on_stack(buf);
44917 if (is_on_stack) {
44918 /*
44919 * dma onto stack is unsafe/nonportable, but callers to this
44920diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
44921index 6bf24ab..13d0293b 100644
44922--- a/drivers/mmc/host/dw_mmc.h
44923+++ b/drivers/mmc/host/dw_mmc.h
44924@@ -258,5 +258,5 @@ struct dw_mci_drv_data {
44925 int (*parse_dt)(struct dw_mci *host);
44926 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
44927 struct dw_mci_tuning_data *tuning_data);
44928-};
44929+} __do_const;
44930 #endif /* _DW_MMC_H_ */
44931diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
44932index f320579..7b7ebac 100644
44933--- a/drivers/mmc/host/mmci.c
44934+++ b/drivers/mmc/host/mmci.c
44935@@ -1504,7 +1504,9 @@ static int mmci_probe(struct amba_device *dev,
44936 }
44937
44938 if (variant->busy_detect) {
44939- mmci_ops.card_busy = mmci_card_busy;
44940+ pax_open_kernel();
44941+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
44942+ pax_close_kernel();
44943 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
44944 }
44945
44946diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
44947index 1dcaf8a..025af25 100644
44948--- a/drivers/mmc/host/sdhci-esdhc-imx.c
44949+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
44950@@ -1009,9 +1009,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
44951 host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
44952 }
44953
44954- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
44955- sdhci_esdhc_ops.platform_execute_tuning =
44956+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
44957+ pax_open_kernel();
44958+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
44959 esdhc_executing_tuning;
44960+ pax_close_kernel();
44961+ }
44962 boarddata = &imx_data->boarddata;
44963 if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
44964 if (!host->mmc->parent->platform_data) {
44965diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
44966index 6debda9..2ba7427 100644
44967--- a/drivers/mmc/host/sdhci-s3c.c
44968+++ b/drivers/mmc/host/sdhci-s3c.c
44969@@ -668,9 +668,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
44970 * we can use overriding functions instead of default.
44971 */
44972 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
44973- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
44974- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
44975- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
44976+ pax_open_kernel();
44977+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
44978+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
44979+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
44980+ pax_close_kernel();
44981 }
44982
44983 /* It supports additional host capabilities if needed */
44984diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
44985index 096993f..f02c23b 100644
44986--- a/drivers/mtd/chips/cfi_cmdset_0020.c
44987+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
44988@@ -669,7 +669,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
44989 size_t totlen = 0, thislen;
44990 int ret = 0;
44991 size_t buflen = 0;
44992- static char *buffer;
44993+ char *buffer;
44994
44995 if (!ECCBUF_SIZE) {
44996 /* We should fall back to a general writev implementation.
44997diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
44998index 370b9dd..1a1176b 100644
44999--- a/drivers/mtd/nand/denali.c
45000+++ b/drivers/mtd/nand/denali.c
45001@@ -24,6 +24,7 @@
45002 #include <linux/slab.h>
45003 #include <linux/mtd/mtd.h>
45004 #include <linux/module.h>
45005+#include <linux/slab.h>
45006
45007 #include "denali.h"
45008
45009diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
45010index 51b9d6a..52af9a7 100644
45011--- a/drivers/mtd/nftlmount.c
45012+++ b/drivers/mtd/nftlmount.c
45013@@ -24,6 +24,7 @@
45014 #include <asm/errno.h>
45015 #include <linux/delay.h>
45016 #include <linux/slab.h>
45017+#include <linux/sched.h>
45018 #include <linux/mtd/mtd.h>
45019 #include <linux/mtd/nand.h>
45020 #include <linux/mtd/nftl.h>
45021diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
45022index 4b8e895..6b3c498 100644
45023--- a/drivers/mtd/sm_ftl.c
45024+++ b/drivers/mtd/sm_ftl.c
45025@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
45026 #define SM_CIS_VENDOR_OFFSET 0x59
45027 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
45028 {
45029- struct attribute_group *attr_group;
45030+ attribute_group_no_const *attr_group;
45031 struct attribute **attributes;
45032 struct sm_sysfs_attribute *vendor_attribute;
45033
45034diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
45035index 4b8c58b..a200546 100644
45036--- a/drivers/net/bonding/bond_main.c
45037+++ b/drivers/net/bonding/bond_main.c
45038@@ -4527,6 +4527,7 @@ static void __exit bonding_exit(void)
45039
45040 bond_netlink_fini();
45041 unregister_pernet_subsys(&bond_net_ops);
45042+ rtnl_link_unregister(&bond_link_ops);
45043
45044 #ifdef CONFIG_NET_POLL_CONTROLLER
45045 /*
45046diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
45047index 40e7b1c..6a70fff 100644
45048--- a/drivers/net/bonding/bond_netlink.c
45049+++ b/drivers/net/bonding/bond_netlink.c
45050@@ -102,7 +102,7 @@ nla_put_failure:
45051 return -EMSGSIZE;
45052 }
45053
45054-struct rtnl_link_ops bond_link_ops __read_mostly = {
45055+struct rtnl_link_ops bond_link_ops = {
45056 .kind = "bond",
45057 .priv_size = sizeof(struct bonding),
45058 .setup = bond_setup,
45059diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
45060index 36fa577..a158806 100644
45061--- a/drivers/net/ethernet/8390/ax88796.c
45062+++ b/drivers/net/ethernet/8390/ax88796.c
45063@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
45064 if (ax->plat->reg_offsets)
45065 ei_local->reg_offset = ax->plat->reg_offsets;
45066 else {
45067+ resource_size_t _mem_size = mem_size;
45068+ do_div(_mem_size, 0x18);
45069 ei_local->reg_offset = ax->reg_offsets;
45070 for (ret = 0; ret < 0x18; ret++)
45071- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
45072+ ax->reg_offsets[ret] = _mem_size * ret;
45073 }
45074
45075 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
45076diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
45077index 41f3ca5a..1ee5364 100644
45078--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
45079+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
45080@@ -1139,7 +1139,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
45081 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
45082 {
45083 /* RX_MODE controlling object */
45084- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
45085+ bnx2x_init_rx_mode_obj(bp);
45086
45087 /* multicast configuration controlling object */
45088 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
45089diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
45090index 18438a5..c923b8e 100644
45091--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
45092+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
45093@@ -2591,15 +2591,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
45094 return rc;
45095 }
45096
45097-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
45098- struct bnx2x_rx_mode_obj *o)
45099+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
45100 {
45101 if (CHIP_IS_E1x(bp)) {
45102- o->wait_comp = bnx2x_empty_rx_mode_wait;
45103- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
45104+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
45105+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
45106 } else {
45107- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
45108- o->config_rx_mode = bnx2x_set_rx_mode_e2;
45109+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
45110+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
45111 }
45112 }
45113
45114diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
45115index 6a53c15..6e7d1e7 100644
45116--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
45117+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
45118@@ -1332,8 +1332,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
45119
45120 /********************* RX MODE ****************/
45121
45122-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
45123- struct bnx2x_rx_mode_obj *o);
45124+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
45125
45126 /**
45127 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
45128diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
45129index 5c3835a..d18b952 100644
45130--- a/drivers/net/ethernet/broadcom/tg3.h
45131+++ b/drivers/net/ethernet/broadcom/tg3.h
45132@@ -150,6 +150,7 @@
45133 #define CHIPREV_ID_5750_A0 0x4000
45134 #define CHIPREV_ID_5750_A1 0x4001
45135 #define CHIPREV_ID_5750_A3 0x4003
45136+#define CHIPREV_ID_5750_C1 0x4201
45137 #define CHIPREV_ID_5750_C2 0x4202
45138 #define CHIPREV_ID_5752_A0_HW 0x5000
45139 #define CHIPREV_ID_5752_A0 0x6000
45140diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
45141index 3ca77fa..fcc015f 100644
45142--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
45143+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
45144@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
45145 }
45146
45147 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
45148- bna_cb_ioceth_enable,
45149- bna_cb_ioceth_disable,
45150- bna_cb_ioceth_hbfail,
45151- bna_cb_ioceth_reset
45152+ .enable_cbfn = bna_cb_ioceth_enable,
45153+ .disable_cbfn = bna_cb_ioceth_disable,
45154+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
45155+ .reset_cbfn = bna_cb_ioceth_reset
45156 };
45157
45158 static void bna_attr_init(struct bna_ioceth *ioceth)
45159diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
45160index 8cffcdf..aadf043 100644
45161--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
45162+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
45163@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
45164 */
45165 struct l2t_skb_cb {
45166 arp_failure_handler_func arp_failure_handler;
45167-};
45168+} __no_const;
45169
45170 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
45171
45172diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
45173index fff02ed..d421412 100644
45174--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
45175+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
45176@@ -2120,7 +2120,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
45177
45178 int i;
45179 struct adapter *ap = netdev2adap(dev);
45180- static const unsigned int *reg_ranges;
45181+ const unsigned int *reg_ranges;
45182 int arr_size = 0, buf_size = 0;
45183
45184 if (is_t4(ap->params.chip)) {
45185diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
45186index c05b66d..ed69872 100644
45187--- a/drivers/net/ethernet/dec/tulip/de4x5.c
45188+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
45189@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
45190 for (i=0; i<ETH_ALEN; i++) {
45191 tmp.addr[i] = dev->dev_addr[i];
45192 }
45193- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
45194+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
45195 break;
45196
45197 case DE4X5_SET_HWADDR: /* Set the hardware address */
45198@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
45199 spin_lock_irqsave(&lp->lock, flags);
45200 memcpy(&statbuf, &lp->pktStats, ioc->len);
45201 spin_unlock_irqrestore(&lp->lock, flags);
45202- if (copy_to_user(ioc->data, &statbuf, ioc->len))
45203+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
45204 return -EFAULT;
45205 break;
45206 }
45207diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
45208index a37039d..a51d7e8 100644
45209--- a/drivers/net/ethernet/emulex/benet/be_main.c
45210+++ b/drivers/net/ethernet/emulex/benet/be_main.c
45211@@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
45212
45213 if (wrapped)
45214 newacc += 65536;
45215- ACCESS_ONCE(*acc) = newacc;
45216+ ACCESS_ONCE_RW(*acc) = newacc;
45217 }
45218
45219 static void populate_erx_stats(struct be_adapter *adapter,
45220diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
45221index 212f44b..fb69959 100644
45222--- a/drivers/net/ethernet/faraday/ftgmac100.c
45223+++ b/drivers/net/ethernet/faraday/ftgmac100.c
45224@@ -31,6 +31,8 @@
45225 #include <linux/netdevice.h>
45226 #include <linux/phy.h>
45227 #include <linux/platform_device.h>
45228+#include <linux/interrupt.h>
45229+#include <linux/irqreturn.h>
45230 #include <net/ip.h>
45231
45232 #include "ftgmac100.h"
45233diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
45234index 8be5b40..081bc1b 100644
45235--- a/drivers/net/ethernet/faraday/ftmac100.c
45236+++ b/drivers/net/ethernet/faraday/ftmac100.c
45237@@ -31,6 +31,8 @@
45238 #include <linux/module.h>
45239 #include <linux/netdevice.h>
45240 #include <linux/platform_device.h>
45241+#include <linux/interrupt.h>
45242+#include <linux/irqreturn.h>
45243
45244 #include "ftmac100.h"
45245
45246diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
45247index 5184e2a..acb28c3 100644
45248--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
45249+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
45250@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
45251 }
45252
45253 /* update the base incval used to calculate frequency adjustment */
45254- ACCESS_ONCE(adapter->base_incval) = incval;
45255+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
45256 smp_mb();
45257
45258 /* need lock to prevent incorrect read while modifying cyclecounter */
45259diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
45260index fbe5363..266b4e3 100644
45261--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
45262+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
45263@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
45264 struct __vxge_hw_fifo *fifo;
45265 struct vxge_hw_fifo_config *config;
45266 u32 txdl_size, txdl_per_memblock;
45267- struct vxge_hw_mempool_cbs fifo_mp_callback;
45268+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
45269+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
45270+ };
45271+
45272 struct __vxge_hw_virtualpath *vpath;
45273
45274 if ((vp == NULL) || (attr == NULL)) {
45275@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
45276 goto exit;
45277 }
45278
45279- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
45280-
45281 fifo->mempool =
45282 __vxge_hw_mempool_create(vpath->hldev,
45283 fifo->config->memblock_size,
45284diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
45285index 918e18d..4ca3650 100644
45286--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
45287+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
45288@@ -2086,7 +2086,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
45289 adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
45290 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
45291 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
45292- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
45293+ pax_open_kernel();
45294+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
45295+ pax_close_kernel();
45296 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
45297 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
45298 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
45299diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
45300index 734d286..b017bf5 100644
45301--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
45302+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
45303@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
45304 case QLCNIC_NON_PRIV_FUNC:
45305 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
45306 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
45307- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
45308+ pax_open_kernel();
45309+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
45310+ pax_close_kernel();
45311 break;
45312 case QLCNIC_PRIV_FUNC:
45313 ahw->op_mode = QLCNIC_PRIV_FUNC;
45314 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
45315- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
45316+ pax_open_kernel();
45317+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
45318+ pax_close_kernel();
45319 break;
45320 case QLCNIC_MGMT_FUNC:
45321 ahw->op_mode = QLCNIC_MGMT_FUNC;
45322 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
45323- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
45324+ pax_open_kernel();
45325+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
45326+ pax_close_kernel();
45327 break;
45328 default:
45329 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
45330diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
45331index 7763962..c3499a7 100644
45332--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
45333+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
45334@@ -1108,7 +1108,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
45335 struct qlcnic_dump_entry *entry;
45336 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
45337 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
45338- static const struct qlcnic_dump_operations *fw_dump_ops;
45339+ const struct qlcnic_dump_operations *fw_dump_ops;
45340 struct device *dev = &adapter->pdev->dev;
45341 struct qlcnic_hardware_context *ahw;
45342 void *temp_buffer;
45343diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
45344index c737f0e..32b8682 100644
45345--- a/drivers/net/ethernet/realtek/r8169.c
45346+++ b/drivers/net/ethernet/realtek/r8169.c
45347@@ -759,22 +759,22 @@ struct rtl8169_private {
45348 struct mdio_ops {
45349 void (*write)(struct rtl8169_private *, int, int);
45350 int (*read)(struct rtl8169_private *, int);
45351- } mdio_ops;
45352+ } __no_const mdio_ops;
45353
45354 struct pll_power_ops {
45355 void (*down)(struct rtl8169_private *);
45356 void (*up)(struct rtl8169_private *);
45357- } pll_power_ops;
45358+ } __no_const pll_power_ops;
45359
45360 struct jumbo_ops {
45361 void (*enable)(struct rtl8169_private *);
45362 void (*disable)(struct rtl8169_private *);
45363- } jumbo_ops;
45364+ } __no_const jumbo_ops;
45365
45366 struct csi_ops {
45367 void (*write)(struct rtl8169_private *, int, int);
45368 u32 (*read)(struct rtl8169_private *, int);
45369- } csi_ops;
45370+ } __no_const csi_ops;
45371
45372 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
45373 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
45374diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
45375index 3dd39dc..85efa46 100644
45376--- a/drivers/net/ethernet/sfc/ptp.c
45377+++ b/drivers/net/ethernet/sfc/ptp.c
45378@@ -541,7 +541,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
45379 ptp->start.dma_addr);
45380
45381 /* Clear flag that signals MC ready */
45382- ACCESS_ONCE(*start) = 0;
45383+ ACCESS_ONCE_RW(*start) = 0;
45384 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
45385 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
45386 EFX_BUG_ON_PARANOID(rc);
45387diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
45388index 50617c5..b13724c 100644
45389--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
45390+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
45391@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
45392
45393 writel(value, ioaddr + MMC_CNTRL);
45394
45395- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
45396- MMC_CNTRL, value);
45397+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
45398+// MMC_CNTRL, value);
45399 }
45400
45401 /* To mask all all interrupts.*/
45402diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
45403index e6fe0d8..2b7d752 100644
45404--- a/drivers/net/hyperv/hyperv_net.h
45405+++ b/drivers/net/hyperv/hyperv_net.h
45406@@ -101,7 +101,7 @@ struct rndis_device {
45407
45408 enum rndis_device_state state;
45409 bool link_state;
45410- atomic_t new_req_id;
45411+ atomic_unchecked_t new_req_id;
45412
45413 spinlock_t request_lock;
45414 struct list_head req_list;
45415diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
45416index 0775f0a..d4fb316 100644
45417--- a/drivers/net/hyperv/rndis_filter.c
45418+++ b/drivers/net/hyperv/rndis_filter.c
45419@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
45420 * template
45421 */
45422 set = &rndis_msg->msg.set_req;
45423- set->req_id = atomic_inc_return(&dev->new_req_id);
45424+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
45425
45426 /* Add to the request list */
45427 spin_lock_irqsave(&dev->request_lock, flags);
45428@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
45429
45430 /* Setup the rndis set */
45431 halt = &request->request_msg.msg.halt_req;
45432- halt->req_id = atomic_inc_return(&dev->new_req_id);
45433+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
45434
45435 /* Ignore return since this msg is optional. */
45436 rndis_filter_send_request(dev, request);
45437diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
45438index bf0d55e..82bcfbd1 100644
45439--- a/drivers/net/ieee802154/fakehard.c
45440+++ b/drivers/net/ieee802154/fakehard.c
45441@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
45442 phy->transmit_power = 0xbf;
45443
45444 dev->netdev_ops = &fake_ops;
45445- dev->ml_priv = &fake_mlme;
45446+ dev->ml_priv = (void *)&fake_mlme;
45447
45448 priv = netdev_priv(dev);
45449 priv->phy = phy;
45450diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
45451index bc8faae..e51e25d 100644
45452--- a/drivers/net/macvlan.c
45453+++ b/drivers/net/macvlan.c
45454@@ -990,13 +990,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
45455 int macvlan_link_register(struct rtnl_link_ops *ops)
45456 {
45457 /* common fields */
45458- ops->priv_size = sizeof(struct macvlan_dev);
45459- ops->validate = macvlan_validate;
45460- ops->maxtype = IFLA_MACVLAN_MAX;
45461- ops->policy = macvlan_policy;
45462- ops->changelink = macvlan_changelink;
45463- ops->get_size = macvlan_get_size;
45464- ops->fill_info = macvlan_fill_info;
45465+ pax_open_kernel();
45466+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
45467+ *(void **)&ops->validate = macvlan_validate;
45468+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
45469+ *(const void **)&ops->policy = macvlan_policy;
45470+ *(void **)&ops->changelink = macvlan_changelink;
45471+ *(void **)&ops->get_size = macvlan_get_size;
45472+ *(void **)&ops->fill_info = macvlan_fill_info;
45473+ pax_close_kernel();
45474
45475 return rtnl_link_register(ops);
45476 };
45477@@ -1051,7 +1053,7 @@ static int macvlan_device_event(struct notifier_block *unused,
45478 return NOTIFY_DONE;
45479 }
45480
45481-static struct notifier_block macvlan_notifier_block __read_mostly = {
45482+static struct notifier_block macvlan_notifier_block = {
45483 .notifier_call = macvlan_device_event,
45484 };
45485
45486diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
45487index 2a89da0..c17fe1d 100644
45488--- a/drivers/net/macvtap.c
45489+++ b/drivers/net/macvtap.c
45490@@ -1012,7 +1012,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
45491 }
45492
45493 ret = 0;
45494- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
45495+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
45496 put_user(q->flags, &ifr->ifr_flags))
45497 ret = -EFAULT;
45498 macvtap_put_vlan(vlan);
45499@@ -1182,7 +1182,7 @@ static int macvtap_device_event(struct notifier_block *unused,
45500 return NOTIFY_DONE;
45501 }
45502
45503-static struct notifier_block macvtap_notifier_block __read_mostly = {
45504+static struct notifier_block macvtap_notifier_block = {
45505 .notifier_call = macvtap_device_event,
45506 };
45507
45508diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
45509index daec9b0..6428fcb 100644
45510--- a/drivers/net/phy/mdio-bitbang.c
45511+++ b/drivers/net/phy/mdio-bitbang.c
45512@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
45513 struct mdiobb_ctrl *ctrl = bus->priv;
45514
45515 module_put(ctrl->ops->owner);
45516+ mdiobus_unregister(bus);
45517 mdiobus_free(bus);
45518 }
45519 EXPORT_SYMBOL(free_mdio_bitbang);
45520diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
45521index 72ff14b..11d442d 100644
45522--- a/drivers/net/ppp/ppp_generic.c
45523+++ b/drivers/net/ppp/ppp_generic.c
45524@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
45525 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
45526 struct ppp_stats stats;
45527 struct ppp_comp_stats cstats;
45528- char *vers;
45529
45530 switch (cmd) {
45531 case SIOCGPPPSTATS:
45532@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
45533 break;
45534
45535 case SIOCGPPPVER:
45536- vers = PPP_VERSION;
45537- if (copy_to_user(addr, vers, strlen(vers) + 1))
45538+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
45539 break;
45540 err = 0;
45541 break;
45542diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
45543index 1252d9c..80e660b 100644
45544--- a/drivers/net/slip/slhc.c
45545+++ b/drivers/net/slip/slhc.c
45546@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
45547 register struct tcphdr *thp;
45548 register struct iphdr *ip;
45549 register struct cstate *cs;
45550- int len, hdrlen;
45551+ long len, hdrlen;
45552 unsigned char *cp = icp;
45553
45554 /* We've got a compressed packet; read the change byte */
45555diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
45556index b75ae5b..953c157 100644
45557--- a/drivers/net/team/team.c
45558+++ b/drivers/net/team/team.c
45559@@ -2865,7 +2865,7 @@ static int team_device_event(struct notifier_block *unused,
45560 return NOTIFY_DONE;
45561 }
45562
45563-static struct notifier_block team_notifier_block __read_mostly = {
45564+static struct notifier_block team_notifier_block = {
45565 .notifier_call = team_device_event,
45566 };
45567
45568diff --git a/drivers/net/tun.c b/drivers/net/tun.c
45569index ecec802..614f08f 100644
45570--- a/drivers/net/tun.c
45571+++ b/drivers/net/tun.c
45572@@ -1839,7 +1839,7 @@ unlock:
45573 }
45574
45575 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
45576- unsigned long arg, int ifreq_len)
45577+ unsigned long arg, size_t ifreq_len)
45578 {
45579 struct tun_file *tfile = file->private_data;
45580 struct tun_struct *tun;
45581@@ -1852,6 +1852,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
45582 unsigned int ifindex;
45583 int ret;
45584
45585+ if (ifreq_len > sizeof ifr)
45586+ return -EFAULT;
45587+
45588 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
45589 if (copy_from_user(&ifr, argp, ifreq_len))
45590 return -EFAULT;
45591diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
45592index 1a48234..a555339 100644
45593--- a/drivers/net/usb/hso.c
45594+++ b/drivers/net/usb/hso.c
45595@@ -71,7 +71,7 @@
45596 #include <asm/byteorder.h>
45597 #include <linux/serial_core.h>
45598 #include <linux/serial.h>
45599-
45600+#include <asm/local.h>
45601
45602 #define MOD_AUTHOR "Option Wireless"
45603 #define MOD_DESCRIPTION "USB High Speed Option driver"
45604@@ -1179,7 +1179,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
45605 struct urb *urb;
45606
45607 urb = serial->rx_urb[0];
45608- if (serial->port.count > 0) {
45609+ if (atomic_read(&serial->port.count) > 0) {
45610 count = put_rxbuf_data(urb, serial);
45611 if (count == -1)
45612 return;
45613@@ -1215,7 +1215,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
45614 DUMP1(urb->transfer_buffer, urb->actual_length);
45615
45616 /* Anyone listening? */
45617- if (serial->port.count == 0)
45618+ if (atomic_read(&serial->port.count) == 0)
45619 return;
45620
45621 if (status == 0) {
45622@@ -1297,8 +1297,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
45623 tty_port_tty_set(&serial->port, tty);
45624
45625 /* check for port already opened, if not set the termios */
45626- serial->port.count++;
45627- if (serial->port.count == 1) {
45628+ if (atomic_inc_return(&serial->port.count) == 1) {
45629 serial->rx_state = RX_IDLE;
45630 /* Force default termio settings */
45631 _hso_serial_set_termios(tty, NULL);
45632@@ -1310,7 +1309,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
45633 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
45634 if (result) {
45635 hso_stop_serial_device(serial->parent);
45636- serial->port.count--;
45637+ atomic_dec(&serial->port.count);
45638 kref_put(&serial->parent->ref, hso_serial_ref_free);
45639 }
45640 } else {
45641@@ -1347,10 +1346,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
45642
45643 /* reset the rts and dtr */
45644 /* do the actual close */
45645- serial->port.count--;
45646+ atomic_dec(&serial->port.count);
45647
45648- if (serial->port.count <= 0) {
45649- serial->port.count = 0;
45650+ if (atomic_read(&serial->port.count) <= 0) {
45651+ atomic_set(&serial->port.count, 0);
45652 tty_port_tty_set(&serial->port, NULL);
45653 if (!usb_gone)
45654 hso_stop_serial_device(serial->parent);
45655@@ -1426,7 +1425,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
45656
45657 /* the actual setup */
45658 spin_lock_irqsave(&serial->serial_lock, flags);
45659- if (serial->port.count)
45660+ if (atomic_read(&serial->port.count))
45661 _hso_serial_set_termios(tty, old);
45662 else
45663 tty->termios = *old;
45664@@ -1895,7 +1894,7 @@ static void intr_callback(struct urb *urb)
45665 D1("Pending read interrupt on port %d\n", i);
45666 spin_lock(&serial->serial_lock);
45667 if (serial->rx_state == RX_IDLE &&
45668- serial->port.count > 0) {
45669+ atomic_read(&serial->port.count) > 0) {
45670 /* Setup and send a ctrl req read on
45671 * port i */
45672 if (!serial->rx_urb_filled[0]) {
45673@@ -3071,7 +3070,7 @@ static int hso_resume(struct usb_interface *iface)
45674 /* Start all serial ports */
45675 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
45676 if (serial_table[i] && (serial_table[i]->interface == iface)) {
45677- if (dev2ser(serial_table[i])->port.count) {
45678+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
45679 result =
45680 hso_start_serial_device(serial_table[i], GFP_NOIO);
45681 hso_kick_transmit(dev2ser(serial_table[i]));
45682diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
45683index a79e9d3..78cd4fa 100644
45684--- a/drivers/net/usb/sierra_net.c
45685+++ b/drivers/net/usb/sierra_net.c
45686@@ -52,7 +52,7 @@ static const char driver_name[] = "sierra_net";
45687 /* atomic counter partially included in MAC address to make sure 2 devices
45688 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
45689 */
45690-static atomic_t iface_counter = ATOMIC_INIT(0);
45691+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
45692
45693 /*
45694 * SYNC Timer Delay definition used to set the expiry time
45695@@ -698,7 +698,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
45696 dev->net->netdev_ops = &sierra_net_device_ops;
45697
45698 /* change MAC addr to include, ifacenum, and to be unique */
45699- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
45700+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
45701 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
45702
45703 /* we will have to manufacture ethernet headers, prepare template */
45704diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
45705index 0247973..088193a 100644
45706--- a/drivers/net/vxlan.c
45707+++ b/drivers/net/vxlan.c
45708@@ -2615,7 +2615,7 @@ nla_put_failure:
45709 return -EMSGSIZE;
45710 }
45711
45712-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
45713+static struct rtnl_link_ops vxlan_link_ops = {
45714 .kind = "vxlan",
45715 .maxtype = IFLA_VXLAN_MAX,
45716 .policy = vxlan_policy,
45717diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
45718index 5920c99..ff2e4a5 100644
45719--- a/drivers/net/wan/lmc/lmc_media.c
45720+++ b/drivers/net/wan/lmc/lmc_media.c
45721@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
45722 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
45723
45724 lmc_media_t lmc_ds3_media = {
45725- lmc_ds3_init, /* special media init stuff */
45726- lmc_ds3_default, /* reset to default state */
45727- lmc_ds3_set_status, /* reset status to state provided */
45728- lmc_dummy_set_1, /* set clock source */
45729- lmc_dummy_set2_1, /* set line speed */
45730- lmc_ds3_set_100ft, /* set cable length */
45731- lmc_ds3_set_scram, /* set scrambler */
45732- lmc_ds3_get_link_status, /* get link status */
45733- lmc_dummy_set_1, /* set link status */
45734- lmc_ds3_set_crc_length, /* set CRC length */
45735- lmc_dummy_set_1, /* set T1 or E1 circuit type */
45736- lmc_ds3_watchdog
45737+ .init = lmc_ds3_init, /* special media init stuff */
45738+ .defaults = lmc_ds3_default, /* reset to default state */
45739+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
45740+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
45741+ .set_speed = lmc_dummy_set2_1, /* set line speed */
45742+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
45743+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
45744+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
45745+ .set_link_status = lmc_dummy_set_1, /* set link status */
45746+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
45747+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
45748+ .watchdog = lmc_ds3_watchdog
45749 };
45750
45751 lmc_media_t lmc_hssi_media = {
45752- lmc_hssi_init, /* special media init stuff */
45753- lmc_hssi_default, /* reset to default state */
45754- lmc_hssi_set_status, /* reset status to state provided */
45755- lmc_hssi_set_clock, /* set clock source */
45756- lmc_dummy_set2_1, /* set line speed */
45757- lmc_dummy_set_1, /* set cable length */
45758- lmc_dummy_set_1, /* set scrambler */
45759- lmc_hssi_get_link_status, /* get link status */
45760- lmc_hssi_set_link_status, /* set link status */
45761- lmc_hssi_set_crc_length, /* set CRC length */
45762- lmc_dummy_set_1, /* set T1 or E1 circuit type */
45763- lmc_hssi_watchdog
45764+ .init = lmc_hssi_init, /* special media init stuff */
45765+ .defaults = lmc_hssi_default, /* reset to default state */
45766+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
45767+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
45768+ .set_speed = lmc_dummy_set2_1, /* set line speed */
45769+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
45770+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
45771+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
45772+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
45773+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
45774+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
45775+ .watchdog = lmc_hssi_watchdog
45776 };
45777
45778-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
45779- lmc_ssi_default, /* reset to default state */
45780- lmc_ssi_set_status, /* reset status to state provided */
45781- lmc_ssi_set_clock, /* set clock source */
45782- lmc_ssi_set_speed, /* set line speed */
45783- lmc_dummy_set_1, /* set cable length */
45784- lmc_dummy_set_1, /* set scrambler */
45785- lmc_ssi_get_link_status, /* get link status */
45786- lmc_ssi_set_link_status, /* set link status */
45787- lmc_ssi_set_crc_length, /* set CRC length */
45788- lmc_dummy_set_1, /* set T1 or E1 circuit type */
45789- lmc_ssi_watchdog
45790+lmc_media_t lmc_ssi_media = {
45791+ .init = lmc_ssi_init, /* special media init stuff */
45792+ .defaults = lmc_ssi_default, /* reset to default state */
45793+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
45794+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
45795+ .set_speed = lmc_ssi_set_speed, /* set line speed */
45796+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
45797+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
45798+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
45799+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
45800+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
45801+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
45802+ .watchdog = lmc_ssi_watchdog
45803 };
45804
45805 lmc_media_t lmc_t1_media = {
45806- lmc_t1_init, /* special media init stuff */
45807- lmc_t1_default, /* reset to default state */
45808- lmc_t1_set_status, /* reset status to state provided */
45809- lmc_t1_set_clock, /* set clock source */
45810- lmc_dummy_set2_1, /* set line speed */
45811- lmc_dummy_set_1, /* set cable length */
45812- lmc_dummy_set_1, /* set scrambler */
45813- lmc_t1_get_link_status, /* get link status */
45814- lmc_dummy_set_1, /* set link status */
45815- lmc_t1_set_crc_length, /* set CRC length */
45816- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
45817- lmc_t1_watchdog
45818+ .init = lmc_t1_init, /* special media init stuff */
45819+ .defaults = lmc_t1_default, /* reset to default state */
45820+ .set_status = lmc_t1_set_status, /* reset status to state provided */
45821+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
45822+ .set_speed = lmc_dummy_set2_1, /* set line speed */
45823+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
45824+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
45825+ .get_link_status = lmc_t1_get_link_status, /* get link status */
45826+ .set_link_status = lmc_dummy_set_1, /* set link status */
45827+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
45828+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
45829+ .watchdog = lmc_t1_watchdog
45830 };
45831
45832 static void
45833diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
45834index feacc3b..5bac0de 100644
45835--- a/drivers/net/wan/z85230.c
45836+++ b/drivers/net/wan/z85230.c
45837@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
45838
45839 struct z8530_irqhandler z8530_sync =
45840 {
45841- z8530_rx,
45842- z8530_tx,
45843- z8530_status
45844+ .rx = z8530_rx,
45845+ .tx = z8530_tx,
45846+ .status = z8530_status
45847 };
45848
45849 EXPORT_SYMBOL(z8530_sync);
45850@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
45851 }
45852
45853 static struct z8530_irqhandler z8530_dma_sync = {
45854- z8530_dma_rx,
45855- z8530_dma_tx,
45856- z8530_dma_status
45857+ .rx = z8530_dma_rx,
45858+ .tx = z8530_dma_tx,
45859+ .status = z8530_dma_status
45860 };
45861
45862 static struct z8530_irqhandler z8530_txdma_sync = {
45863- z8530_rx,
45864- z8530_dma_tx,
45865- z8530_dma_status
45866+ .rx = z8530_rx,
45867+ .tx = z8530_dma_tx,
45868+ .status = z8530_dma_status
45869 };
45870
45871 /**
45872@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
45873
45874 struct z8530_irqhandler z8530_nop=
45875 {
45876- z8530_rx_clear,
45877- z8530_tx_clear,
45878- z8530_status_clear
45879+ .rx = z8530_rx_clear,
45880+ .tx = z8530_tx_clear,
45881+ .status = z8530_status_clear
45882 };
45883
45884
45885diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
45886index 0b60295..b8bfa5b 100644
45887--- a/drivers/net/wimax/i2400m/rx.c
45888+++ b/drivers/net/wimax/i2400m/rx.c
45889@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
45890 if (i2400m->rx_roq == NULL)
45891 goto error_roq_alloc;
45892
45893- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
45894+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
45895 GFP_KERNEL);
45896 if (rd == NULL) {
45897 result = -ENOMEM;
45898diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
45899index edf4b57..68b51c0 100644
45900--- a/drivers/net/wireless/airo.c
45901+++ b/drivers/net/wireless/airo.c
45902@@ -7843,7 +7843,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
45903 struct airo_info *ai = dev->ml_priv;
45904 int ridcode;
45905 int enabled;
45906- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
45907+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
45908 unsigned char *iobuf;
45909
45910 /* Only super-user can write RIDs */
45911diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
45912index 34c8a33..3261fdc 100644
45913--- a/drivers/net/wireless/at76c50x-usb.c
45914+++ b/drivers/net/wireless/at76c50x-usb.c
45915@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
45916 }
45917
45918 /* Convert timeout from the DFU status to jiffies */
45919-static inline unsigned long at76_get_timeout(struct dfu_status *s)
45920+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
45921 {
45922 return msecs_to_jiffies((s->poll_timeout[2] << 16)
45923 | (s->poll_timeout[1] << 8)
45924diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
45925index edae50b..b24278c 100644
45926--- a/drivers/net/wireless/ath/ath10k/htc.c
45927+++ b/drivers/net/wireless/ath/ath10k/htc.c
45928@@ -842,7 +842,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
45929 /* registered target arrival callback from the HIF layer */
45930 int ath10k_htc_init(struct ath10k *ar)
45931 {
45932- struct ath10k_hif_cb htc_callbacks;
45933+ static struct ath10k_hif_cb htc_callbacks = {
45934+ .rx_completion = ath10k_htc_rx_completion_handler,
45935+ .tx_completion = ath10k_htc_tx_completion_handler,
45936+ };
45937 struct ath10k_htc_ep *ep = NULL;
45938 struct ath10k_htc *htc = &ar->htc;
45939
45940@@ -852,8 +855,6 @@ int ath10k_htc_init(struct ath10k *ar)
45941 ath10k_htc_reset_endpoint_states(htc);
45942
45943 /* setup HIF layer callbacks */
45944- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
45945- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
45946 htc->ar = ar;
45947
45948 /* Get HIF default pipe for HTC message exchange */
45949diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
45950index 4716d33..a688310 100644
45951--- a/drivers/net/wireless/ath/ath10k/htc.h
45952+++ b/drivers/net/wireless/ath/ath10k/htc.h
45953@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
45954
45955 struct ath10k_htc_ops {
45956 void (*target_send_suspend_complete)(struct ath10k *ar);
45957-};
45958+} __no_const;
45959
45960 struct ath10k_htc_ep_ops {
45961 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
45962 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
45963 void (*ep_tx_credits)(struct ath10k *);
45964-};
45965+} __no_const;
45966
45967 /* service connection information */
45968 struct ath10k_htc_svc_conn_req {
45969diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
45970index a366d6b..b6f28f8 100644
45971--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
45972+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
45973@@ -218,8 +218,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
45974 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
45975 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
45976
45977- ACCESS_ONCE(ads->ds_link) = i->link;
45978- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
45979+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
45980+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
45981
45982 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
45983 ctl6 = SM(i->keytype, AR_EncrType);
45984@@ -233,26 +233,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
45985
45986 if ((i->is_first || i->is_last) &&
45987 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
45988- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
45989+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
45990 | set11nTries(i->rates, 1)
45991 | set11nTries(i->rates, 2)
45992 | set11nTries(i->rates, 3)
45993 | (i->dur_update ? AR_DurUpdateEna : 0)
45994 | SM(0, AR_BurstDur);
45995
45996- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
45997+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
45998 | set11nRate(i->rates, 1)
45999 | set11nRate(i->rates, 2)
46000 | set11nRate(i->rates, 3);
46001 } else {
46002- ACCESS_ONCE(ads->ds_ctl2) = 0;
46003- ACCESS_ONCE(ads->ds_ctl3) = 0;
46004+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
46005+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
46006 }
46007
46008 if (!i->is_first) {
46009- ACCESS_ONCE(ads->ds_ctl0) = 0;
46010- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
46011- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
46012+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
46013+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
46014+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
46015 return;
46016 }
46017
46018@@ -277,7 +277,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46019 break;
46020 }
46021
46022- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
46023+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
46024 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
46025 | SM(i->txpower, AR_XmitPower)
46026 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
46027@@ -287,19 +287,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46028 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
46029 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
46030
46031- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
46032- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
46033+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
46034+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
46035
46036 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
46037 return;
46038
46039- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
46040+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
46041 | set11nPktDurRTSCTS(i->rates, 1);
46042
46043- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
46044+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
46045 | set11nPktDurRTSCTS(i->rates, 3);
46046
46047- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
46048+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
46049 | set11nRateFlags(i->rates, 1)
46050 | set11nRateFlags(i->rates, 2)
46051 | set11nRateFlags(i->rates, 3)
46052diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
46053index f6c5c1b..6058354 100644
46054--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
46055+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
46056@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46057 (i->qcu << AR_TxQcuNum_S) | desc_len;
46058
46059 checksum += val;
46060- ACCESS_ONCE(ads->info) = val;
46061+ ACCESS_ONCE_RW(ads->info) = val;
46062
46063 checksum += i->link;
46064- ACCESS_ONCE(ads->link) = i->link;
46065+ ACCESS_ONCE_RW(ads->link) = i->link;
46066
46067 checksum += i->buf_addr[0];
46068- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
46069+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
46070 checksum += i->buf_addr[1];
46071- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
46072+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
46073 checksum += i->buf_addr[2];
46074- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
46075+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
46076 checksum += i->buf_addr[3];
46077- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
46078+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
46079
46080 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
46081- ACCESS_ONCE(ads->ctl3) = val;
46082+ ACCESS_ONCE_RW(ads->ctl3) = val;
46083 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
46084- ACCESS_ONCE(ads->ctl5) = val;
46085+ ACCESS_ONCE_RW(ads->ctl5) = val;
46086 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
46087- ACCESS_ONCE(ads->ctl7) = val;
46088+ ACCESS_ONCE_RW(ads->ctl7) = val;
46089 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
46090- ACCESS_ONCE(ads->ctl9) = val;
46091+ ACCESS_ONCE_RW(ads->ctl9) = val;
46092
46093 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
46094- ACCESS_ONCE(ads->ctl10) = checksum;
46095+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
46096
46097 if (i->is_first || i->is_last) {
46098- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
46099+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
46100 | set11nTries(i->rates, 1)
46101 | set11nTries(i->rates, 2)
46102 | set11nTries(i->rates, 3)
46103 | (i->dur_update ? AR_DurUpdateEna : 0)
46104 | SM(0, AR_BurstDur);
46105
46106- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
46107+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
46108 | set11nRate(i->rates, 1)
46109 | set11nRate(i->rates, 2)
46110 | set11nRate(i->rates, 3);
46111 } else {
46112- ACCESS_ONCE(ads->ctl13) = 0;
46113- ACCESS_ONCE(ads->ctl14) = 0;
46114+ ACCESS_ONCE_RW(ads->ctl13) = 0;
46115+ ACCESS_ONCE_RW(ads->ctl14) = 0;
46116 }
46117
46118 ads->ctl20 = 0;
46119@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46120
46121 ctl17 = SM(i->keytype, AR_EncrType);
46122 if (!i->is_first) {
46123- ACCESS_ONCE(ads->ctl11) = 0;
46124- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
46125- ACCESS_ONCE(ads->ctl15) = 0;
46126- ACCESS_ONCE(ads->ctl16) = 0;
46127- ACCESS_ONCE(ads->ctl17) = ctl17;
46128- ACCESS_ONCE(ads->ctl18) = 0;
46129- ACCESS_ONCE(ads->ctl19) = 0;
46130+ ACCESS_ONCE_RW(ads->ctl11) = 0;
46131+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
46132+ ACCESS_ONCE_RW(ads->ctl15) = 0;
46133+ ACCESS_ONCE_RW(ads->ctl16) = 0;
46134+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
46135+ ACCESS_ONCE_RW(ads->ctl18) = 0;
46136+ ACCESS_ONCE_RW(ads->ctl19) = 0;
46137 return;
46138 }
46139
46140- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
46141+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
46142 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
46143 | SM(i->txpower, AR_XmitPower)
46144 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
46145@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46146 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
46147 ctl12 |= SM(val, AR_PAPRDChainMask);
46148
46149- ACCESS_ONCE(ads->ctl12) = ctl12;
46150- ACCESS_ONCE(ads->ctl17) = ctl17;
46151+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
46152+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
46153
46154- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
46155+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
46156 | set11nPktDurRTSCTS(i->rates, 1);
46157
46158- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
46159+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
46160 | set11nPktDurRTSCTS(i->rates, 3);
46161
46162- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
46163+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
46164 | set11nRateFlags(i->rates, 1)
46165 | set11nRateFlags(i->rates, 2)
46166 | set11nRateFlags(i->rates, 3)
46167 | SM(i->rtscts_rate, AR_RTSCTSRate);
46168
46169- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
46170+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
46171 }
46172
46173 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
46174diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
46175index a2c9a5d..b52273e 100644
46176--- a/drivers/net/wireless/ath/ath9k/hw.h
46177+++ b/drivers/net/wireless/ath/ath9k/hw.h
46178@@ -635,7 +635,7 @@ struct ath_hw_private_ops {
46179
46180 /* ANI */
46181 void (*ani_cache_ini_regs)(struct ath_hw *ah);
46182-};
46183+} __no_const;
46184
46185 /**
46186 * struct ath_spec_scan - parameters for Atheros spectral scan
46187@@ -711,7 +711,7 @@ struct ath_hw_ops {
46188 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
46189 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
46190 #endif
46191-};
46192+} __no_const;
46193
46194 struct ath_nf_limits {
46195 s16 max;
46196diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
46197index 92190da..f3a4c4c 100644
46198--- a/drivers/net/wireless/b43/phy_lp.c
46199+++ b/drivers/net/wireless/b43/phy_lp.c
46200@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
46201 {
46202 struct ssb_bus *bus = dev->dev->sdev->bus;
46203
46204- static const struct b206x_channel *chandata = NULL;
46205+ const struct b206x_channel *chandata = NULL;
46206 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
46207 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
46208 u16 old_comm15, scale;
46209diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
46210index dea3b50..543db99 100644
46211--- a/drivers/net/wireless/iwlegacy/3945-mac.c
46212+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
46213@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
46214 */
46215 if (il3945_mod_params.disable_hw_scan) {
46216 D_INFO("Disabling hw_scan\n");
46217- il3945_mac_ops.hw_scan = NULL;
46218+ pax_open_kernel();
46219+ *(void **)&il3945_mac_ops.hw_scan = NULL;
46220+ pax_close_kernel();
46221 }
46222
46223 D_INFO("*** LOAD DRIVER ***\n");
46224diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
46225index d94f8ab..5b568c8 100644
46226--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
46227+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
46228@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
46229 {
46230 struct iwl_priv *priv = file->private_data;
46231 char buf[64];
46232- int buf_size;
46233+ size_t buf_size;
46234 u32 offset, len;
46235
46236 memset(buf, 0, sizeof(buf));
46237@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
46238 struct iwl_priv *priv = file->private_data;
46239
46240 char buf[8];
46241- int buf_size;
46242+ size_t buf_size;
46243 u32 reset_flag;
46244
46245 memset(buf, 0, sizeof(buf));
46246@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
46247 {
46248 struct iwl_priv *priv = file->private_data;
46249 char buf[8];
46250- int buf_size;
46251+ size_t buf_size;
46252 int ht40;
46253
46254 memset(buf, 0, sizeof(buf));
46255@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
46256 {
46257 struct iwl_priv *priv = file->private_data;
46258 char buf[8];
46259- int buf_size;
46260+ size_t buf_size;
46261 int value;
46262
46263 memset(buf, 0, sizeof(buf));
46264@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
46265 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
46266 DEBUGFS_READ_FILE_OPS(current_sleep_command);
46267
46268-static const char *fmt_value = " %-30s %10u\n";
46269-static const char *fmt_hex = " %-30s 0x%02X\n";
46270-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
46271-static const char *fmt_header =
46272+static const char fmt_value[] = " %-30s %10u\n";
46273+static const char fmt_hex[] = " %-30s 0x%02X\n";
46274+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
46275+static const char fmt_header[] =
46276 "%-32s current cumulative delta max\n";
46277
46278 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
46279@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
46280 {
46281 struct iwl_priv *priv = file->private_data;
46282 char buf[8];
46283- int buf_size;
46284+ size_t buf_size;
46285 int clear;
46286
46287 memset(buf, 0, sizeof(buf));
46288@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
46289 {
46290 struct iwl_priv *priv = file->private_data;
46291 char buf[8];
46292- int buf_size;
46293+ size_t buf_size;
46294 int trace;
46295
46296 memset(buf, 0, sizeof(buf));
46297@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
46298 {
46299 struct iwl_priv *priv = file->private_data;
46300 char buf[8];
46301- int buf_size;
46302+ size_t buf_size;
46303 int missed;
46304
46305 memset(buf, 0, sizeof(buf));
46306@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
46307
46308 struct iwl_priv *priv = file->private_data;
46309 char buf[8];
46310- int buf_size;
46311+ size_t buf_size;
46312 int plcp;
46313
46314 memset(buf, 0, sizeof(buf));
46315@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
46316
46317 struct iwl_priv *priv = file->private_data;
46318 char buf[8];
46319- int buf_size;
46320+ size_t buf_size;
46321 int flush;
46322
46323 memset(buf, 0, sizeof(buf));
46324@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
46325
46326 struct iwl_priv *priv = file->private_data;
46327 char buf[8];
46328- int buf_size;
46329+ size_t buf_size;
46330 int rts;
46331
46332 if (!priv->cfg->ht_params)
46333@@ -2205,7 +2205,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
46334 {
46335 struct iwl_priv *priv = file->private_data;
46336 char buf[8];
46337- int buf_size;
46338+ size_t buf_size;
46339
46340 memset(buf, 0, sizeof(buf));
46341 buf_size = min(count, sizeof(buf) - 1);
46342@@ -2239,7 +2239,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
46343 struct iwl_priv *priv = file->private_data;
46344 u32 event_log_flag;
46345 char buf[8];
46346- int buf_size;
46347+ size_t buf_size;
46348
46349 /* check that the interface is up */
46350 if (!iwl_is_ready(priv))
46351@@ -2293,7 +2293,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
46352 struct iwl_priv *priv = file->private_data;
46353 char buf[8];
46354 u32 calib_disabled;
46355- int buf_size;
46356+ size_t buf_size;
46357
46358 memset(buf, 0, sizeof(buf));
46359 buf_size = min(count, sizeof(buf) - 1);
46360diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
46361index 7aad766..06addb4 100644
46362--- a/drivers/net/wireless/iwlwifi/dvm/main.c
46363+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
46364@@ -1123,7 +1123,7 @@ static void iwl_option_config(struct iwl_priv *priv)
46365 static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
46366 {
46367 struct iwl_nvm_data *data = priv->nvm_data;
46368- char *debug_msg;
46369+ static const char debug_msg[] = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
46370
46371 if (data->sku_cap_11n_enable &&
46372 !priv->cfg->ht_params) {
46373@@ -1137,7 +1137,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
46374 return -EINVAL;
46375 }
46376
46377- debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
46378 IWL_DEBUG_INFO(priv, debug_msg,
46379 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
46380 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
46381diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
46382index f53ef83..5e34bcb 100644
46383--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
46384+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
46385@@ -1390,7 +1390,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
46386 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
46387
46388 char buf[8];
46389- int buf_size;
46390+ size_t buf_size;
46391 u32 reset_flag;
46392
46393 memset(buf, 0, sizeof(buf));
46394@@ -1411,7 +1411,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
46395 {
46396 struct iwl_trans *trans = file->private_data;
46397 char buf[8];
46398- int buf_size;
46399+ size_t buf_size;
46400 int csr;
46401
46402 memset(buf, 0, sizeof(buf));
46403diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
46404index a1b32ee..94b3c3d 100644
46405--- a/drivers/net/wireless/mac80211_hwsim.c
46406+++ b/drivers/net/wireless/mac80211_hwsim.c
46407@@ -2224,25 +2224,19 @@ static int __init init_mac80211_hwsim(void)
46408
46409 if (channels > 1) {
46410 hwsim_if_comb.num_different_channels = channels;
46411- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
46412- mac80211_hwsim_ops.cancel_hw_scan =
46413- mac80211_hwsim_cancel_hw_scan;
46414- mac80211_hwsim_ops.sw_scan_start = NULL;
46415- mac80211_hwsim_ops.sw_scan_complete = NULL;
46416- mac80211_hwsim_ops.remain_on_channel =
46417- mac80211_hwsim_roc;
46418- mac80211_hwsim_ops.cancel_remain_on_channel =
46419- mac80211_hwsim_croc;
46420- mac80211_hwsim_ops.add_chanctx =
46421- mac80211_hwsim_add_chanctx;
46422- mac80211_hwsim_ops.remove_chanctx =
46423- mac80211_hwsim_remove_chanctx;
46424- mac80211_hwsim_ops.change_chanctx =
46425- mac80211_hwsim_change_chanctx;
46426- mac80211_hwsim_ops.assign_vif_chanctx =
46427- mac80211_hwsim_assign_vif_chanctx;
46428- mac80211_hwsim_ops.unassign_vif_chanctx =
46429- mac80211_hwsim_unassign_vif_chanctx;
46430+ pax_open_kernel();
46431+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
46432+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
46433+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
46434+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
46435+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
46436+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
46437+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
46438+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
46439+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
46440+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
46441+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
46442+ pax_close_kernel();
46443 }
46444
46445 spin_lock_init(&hwsim_radio_lock);
46446diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
46447index 8169a85..7fa3b47 100644
46448--- a/drivers/net/wireless/rndis_wlan.c
46449+++ b/drivers/net/wireless/rndis_wlan.c
46450@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
46451
46452 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
46453
46454- if (rts_threshold < 0 || rts_threshold > 2347)
46455+ if (rts_threshold > 2347)
46456 rts_threshold = 2347;
46457
46458 tmp = cpu_to_le32(rts_threshold);
46459diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
46460index e4ba2ce..63d7417 100644
46461--- a/drivers/net/wireless/rt2x00/rt2x00.h
46462+++ b/drivers/net/wireless/rt2x00/rt2x00.h
46463@@ -377,7 +377,7 @@ struct rt2x00_intf {
46464 * for hardware which doesn't support hardware
46465 * sequence counting.
46466 */
46467- atomic_t seqno;
46468+ atomic_unchecked_t seqno;
46469 };
46470
46471 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
46472diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
46473index a5d38e8..d3c24ea 100644
46474--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
46475+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
46476@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
46477 * sequence counter given by mac80211.
46478 */
46479 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
46480- seqno = atomic_add_return(0x10, &intf->seqno);
46481+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
46482 else
46483- seqno = atomic_read(&intf->seqno);
46484+ seqno = atomic_read_unchecked(&intf->seqno);
46485
46486 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
46487 hdr->seq_ctrl |= cpu_to_le16(seqno);
46488diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
46489index e2b3d9c..67a5184 100644
46490--- a/drivers/net/wireless/ti/wl1251/sdio.c
46491+++ b/drivers/net/wireless/ti/wl1251/sdio.c
46492@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
46493
46494 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
46495
46496- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
46497- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
46498+ pax_open_kernel();
46499+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
46500+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
46501+ pax_close_kernel();
46502
46503 wl1251_info("using dedicated interrupt line");
46504 } else {
46505- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
46506- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
46507+ pax_open_kernel();
46508+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
46509+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
46510+ pax_close_kernel();
46511
46512 wl1251_info("using SDIO interrupt");
46513 }
46514diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
46515index be7129b..4161356 100644
46516--- a/drivers/net/wireless/ti/wl12xx/main.c
46517+++ b/drivers/net/wireless/ti/wl12xx/main.c
46518@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
46519 sizeof(wl->conf.mem));
46520
46521 /* read data preparation is only needed by wl127x */
46522- wl->ops->prepare_read = wl127x_prepare_read;
46523+ pax_open_kernel();
46524+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
46525+ pax_close_kernel();
46526
46527 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
46528 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
46529@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
46530 sizeof(wl->conf.mem));
46531
46532 /* read data preparation is only needed by wl127x */
46533- wl->ops->prepare_read = wl127x_prepare_read;
46534+ pax_open_kernel();
46535+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
46536+ pax_close_kernel();
46537
46538 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
46539 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
46540diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
46541index ec37b16..7e34d66 100644
46542--- a/drivers/net/wireless/ti/wl18xx/main.c
46543+++ b/drivers/net/wireless/ti/wl18xx/main.c
46544@@ -1823,8 +1823,10 @@ static int wl18xx_setup(struct wl1271 *wl)
46545 }
46546
46547 if (!checksum_param) {
46548- wl18xx_ops.set_rx_csum = NULL;
46549- wl18xx_ops.init_vif = NULL;
46550+ pax_open_kernel();
46551+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
46552+ *(void **)&wl18xx_ops.init_vif = NULL;
46553+ pax_close_kernel();
46554 }
46555
46556 /* Enable 11a Band only if we have 5G antennas */
46557diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
46558index 84d94f5..bd6c61c 100644
46559--- a/drivers/net/wireless/zd1211rw/zd_usb.c
46560+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
46561@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
46562 {
46563 struct zd_usb *usb = urb->context;
46564 struct zd_usb_interrupt *intr = &usb->intr;
46565- int len;
46566+ unsigned int len;
46567 u16 int_num;
46568
46569 ZD_ASSERT(in_interrupt());
46570diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
46571index 7130864..00e64de 100644
46572--- a/drivers/nfc/nfcwilink.c
46573+++ b/drivers/nfc/nfcwilink.c
46574@@ -498,7 +498,7 @@ static struct nci_ops nfcwilink_ops = {
46575
46576 static int nfcwilink_probe(struct platform_device *pdev)
46577 {
46578- static struct nfcwilink *drv;
46579+ struct nfcwilink *drv;
46580 int rc;
46581 __u32 protocols;
46582
46583diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
46584index d93b2b6..ae50401 100644
46585--- a/drivers/oprofile/buffer_sync.c
46586+++ b/drivers/oprofile/buffer_sync.c
46587@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
46588 if (cookie == NO_COOKIE)
46589 offset = pc;
46590 if (cookie == INVALID_COOKIE) {
46591- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
46592+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
46593 offset = pc;
46594 }
46595 if (cookie != last_cookie) {
46596@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
46597 /* add userspace sample */
46598
46599 if (!mm) {
46600- atomic_inc(&oprofile_stats.sample_lost_no_mm);
46601+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
46602 return 0;
46603 }
46604
46605 cookie = lookup_dcookie(mm, s->eip, &offset);
46606
46607 if (cookie == INVALID_COOKIE) {
46608- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
46609+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
46610 return 0;
46611 }
46612
46613@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
46614 /* ignore backtraces if failed to add a sample */
46615 if (state == sb_bt_start) {
46616 state = sb_bt_ignore;
46617- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
46618+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
46619 }
46620 }
46621 release_mm(mm);
46622diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
46623index c0cc4e7..44d4e54 100644
46624--- a/drivers/oprofile/event_buffer.c
46625+++ b/drivers/oprofile/event_buffer.c
46626@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
46627 }
46628
46629 if (buffer_pos == buffer_size) {
46630- atomic_inc(&oprofile_stats.event_lost_overflow);
46631+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
46632 return;
46633 }
46634
46635diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
46636index ed2c3ec..deda85a 100644
46637--- a/drivers/oprofile/oprof.c
46638+++ b/drivers/oprofile/oprof.c
46639@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
46640 if (oprofile_ops.switch_events())
46641 return;
46642
46643- atomic_inc(&oprofile_stats.multiplex_counter);
46644+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
46645 start_switch_worker();
46646 }
46647
46648diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
46649index ee2cfce..7f8f699 100644
46650--- a/drivers/oprofile/oprofile_files.c
46651+++ b/drivers/oprofile/oprofile_files.c
46652@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
46653
46654 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
46655
46656-static ssize_t timeout_read(struct file *file, char __user *buf,
46657+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
46658 size_t count, loff_t *offset)
46659 {
46660 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
46661diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
46662index 59659ce..6c860a0 100644
46663--- a/drivers/oprofile/oprofile_stats.c
46664+++ b/drivers/oprofile/oprofile_stats.c
46665@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
46666 cpu_buf->sample_invalid_eip = 0;
46667 }
46668
46669- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
46670- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
46671- atomic_set(&oprofile_stats.event_lost_overflow, 0);
46672- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
46673- atomic_set(&oprofile_stats.multiplex_counter, 0);
46674+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
46675+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
46676+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
46677+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
46678+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
46679 }
46680
46681
46682diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
46683index 1fc622b..8c48fc3 100644
46684--- a/drivers/oprofile/oprofile_stats.h
46685+++ b/drivers/oprofile/oprofile_stats.h
46686@@ -13,11 +13,11 @@
46687 #include <linux/atomic.h>
46688
46689 struct oprofile_stat_struct {
46690- atomic_t sample_lost_no_mm;
46691- atomic_t sample_lost_no_mapping;
46692- atomic_t bt_lost_no_mapping;
46693- atomic_t event_lost_overflow;
46694- atomic_t multiplex_counter;
46695+ atomic_unchecked_t sample_lost_no_mm;
46696+ atomic_unchecked_t sample_lost_no_mapping;
46697+ atomic_unchecked_t bt_lost_no_mapping;
46698+ atomic_unchecked_t event_lost_overflow;
46699+ atomic_unchecked_t multiplex_counter;
46700 };
46701
46702 extern struct oprofile_stat_struct oprofile_stats;
46703diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
46704index 3f49345..c750d0b 100644
46705--- a/drivers/oprofile/oprofilefs.c
46706+++ b/drivers/oprofile/oprofilefs.c
46707@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
46708
46709 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
46710 {
46711- atomic_t *val = file->private_data;
46712- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
46713+ atomic_unchecked_t *val = file->private_data;
46714+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
46715 }
46716
46717
46718@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
46719
46720
46721 int oprofilefs_create_ro_atomic(struct dentry *root,
46722- char const *name, atomic_t *val)
46723+ char const *name, atomic_unchecked_t *val)
46724 {
46725 return __oprofilefs_create_file(root, name,
46726 &atomic_ro_fops, 0444, val);
46727diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
46728index 61be1d9..dec05d7 100644
46729--- a/drivers/oprofile/timer_int.c
46730+++ b/drivers/oprofile/timer_int.c
46731@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
46732 return NOTIFY_OK;
46733 }
46734
46735-static struct notifier_block __refdata oprofile_cpu_notifier = {
46736+static struct notifier_block oprofile_cpu_notifier = {
46737 .notifier_call = oprofile_cpu_notify,
46738 };
46739
46740diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
46741index 92ed045..62d39bd7 100644
46742--- a/drivers/parport/procfs.c
46743+++ b/drivers/parport/procfs.c
46744@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
46745
46746 *ppos += len;
46747
46748- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
46749+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
46750 }
46751
46752 #ifdef CONFIG_PARPORT_1284
46753@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
46754
46755 *ppos += len;
46756
46757- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
46758+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
46759 }
46760 #endif /* IEEE1284.3 support. */
46761
46762diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
46763index ecfac7e..41be7028 100644
46764--- a/drivers/pci/hotplug/acpiphp_ibm.c
46765+++ b/drivers/pci/hotplug/acpiphp_ibm.c
46766@@ -453,7 +453,9 @@ static int __init ibm_acpiphp_init(void)
46767 goto init_cleanup;
46768 }
46769
46770- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
46771+ pax_open_kernel();
46772+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
46773+ pax_close_kernel();
46774 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
46775
46776 return retval;
46777diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
46778index 7536eef..52dc8fa 100644
46779--- a/drivers/pci/hotplug/cpcihp_generic.c
46780+++ b/drivers/pci/hotplug/cpcihp_generic.c
46781@@ -73,7 +73,6 @@ static u16 port;
46782 static unsigned int enum_bit;
46783 static u8 enum_mask;
46784
46785-static struct cpci_hp_controller_ops generic_hpc_ops;
46786 static struct cpci_hp_controller generic_hpc;
46787
46788 static int __init validate_parameters(void)
46789@@ -139,6 +138,10 @@ static int query_enum(void)
46790 return ((value & enum_mask) == enum_mask);
46791 }
46792
46793+static struct cpci_hp_controller_ops generic_hpc_ops = {
46794+ .query_enum = query_enum,
46795+};
46796+
46797 static int __init cpcihp_generic_init(void)
46798 {
46799 int status;
46800@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
46801 pci_dev_put(dev);
46802
46803 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
46804- generic_hpc_ops.query_enum = query_enum;
46805 generic_hpc.ops = &generic_hpc_ops;
46806
46807 status = cpci_hp_register_controller(&generic_hpc);
46808diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
46809index e8c4a7c..7046f5c 100644
46810--- a/drivers/pci/hotplug/cpcihp_zt5550.c
46811+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
46812@@ -59,7 +59,6 @@
46813 /* local variables */
46814 static bool debug;
46815 static bool poll;
46816-static struct cpci_hp_controller_ops zt5550_hpc_ops;
46817 static struct cpci_hp_controller zt5550_hpc;
46818
46819 /* Primary cPCI bus bridge device */
46820@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
46821 return 0;
46822 }
46823
46824+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
46825+ .query_enum = zt5550_hc_query_enum,
46826+};
46827+
46828 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
46829 {
46830 int status;
46831@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
46832 dbg("returned from zt5550_hc_config");
46833
46834 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
46835- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
46836 zt5550_hpc.ops = &zt5550_hpc_ops;
46837 if(!poll) {
46838 zt5550_hpc.irq = hc_dev->irq;
46839 zt5550_hpc.irq_flags = IRQF_SHARED;
46840 zt5550_hpc.dev_id = hc_dev;
46841
46842- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
46843- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
46844- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
46845+ pax_open_kernel();
46846+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
46847+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
46848+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
46849+ pax_open_kernel();
46850 } else {
46851 info("using ENUM# polling mode");
46852 }
46853diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
46854index 76ba8a1..20ca857 100644
46855--- a/drivers/pci/hotplug/cpqphp_nvram.c
46856+++ b/drivers/pci/hotplug/cpqphp_nvram.c
46857@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
46858
46859 void compaq_nvram_init (void __iomem *rom_start)
46860 {
46861+
46862+#ifndef CONFIG_PAX_KERNEXEC
46863 if (rom_start) {
46864 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
46865 }
46866+#endif
46867+
46868 dbg("int15 entry = %p\n", compaq_int15_entry_point);
46869
46870 /* initialize our int15 lock */
46871diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
46872index cfa92a9..29539c5 100644
46873--- a/drivers/pci/hotplug/pci_hotplug_core.c
46874+++ b/drivers/pci/hotplug/pci_hotplug_core.c
46875@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
46876 return -EINVAL;
46877 }
46878
46879- slot->ops->owner = owner;
46880- slot->ops->mod_name = mod_name;
46881+ pax_open_kernel();
46882+ *(struct module **)&slot->ops->owner = owner;
46883+ *(const char **)&slot->ops->mod_name = mod_name;
46884+ pax_close_kernel();
46885
46886 mutex_lock(&pci_hp_mutex);
46887 /*
46888diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
46889index bbd48bb..6907ef4 100644
46890--- a/drivers/pci/hotplug/pciehp_core.c
46891+++ b/drivers/pci/hotplug/pciehp_core.c
46892@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
46893 struct slot *slot = ctrl->slot;
46894 struct hotplug_slot *hotplug = NULL;
46895 struct hotplug_slot_info *info = NULL;
46896- struct hotplug_slot_ops *ops = NULL;
46897+ hotplug_slot_ops_no_const *ops = NULL;
46898 char name[SLOT_NAME_SIZE];
46899 int retval = -ENOMEM;
46900
46901diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
46902index c91e6c1..5c723ef 100644
46903--- a/drivers/pci/pci-sysfs.c
46904+++ b/drivers/pci/pci-sysfs.c
46905@@ -1117,7 +1117,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
46906 {
46907 /* allocate attribute structure, piggyback attribute name */
46908 int name_len = write_combine ? 13 : 10;
46909- struct bin_attribute *res_attr;
46910+ bin_attribute_no_const *res_attr;
46911 int retval;
46912
46913 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
46914@@ -1302,7 +1302,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
46915 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
46916 {
46917 int retval;
46918- struct bin_attribute *attr;
46919+ bin_attribute_no_const *attr;
46920
46921 /* If the device has VPD, try to expose it in sysfs. */
46922 if (dev->vpd) {
46923@@ -1349,7 +1349,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
46924 {
46925 int retval;
46926 int rom_size = 0;
46927- struct bin_attribute *attr;
46928+ bin_attribute_no_const *attr;
46929
46930 if (!sysfs_initialized)
46931 return -EACCES;
46932diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
46933index 9c91ecc..bda4796 100644
46934--- a/drivers/pci/pci.h
46935+++ b/drivers/pci/pci.h
46936@@ -95,7 +95,7 @@ struct pci_vpd_ops {
46937 struct pci_vpd {
46938 unsigned int len;
46939 const struct pci_vpd_ops *ops;
46940- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
46941+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
46942 };
46943
46944 int pci_vpd_pci22_init(struct pci_dev *dev);
46945diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
46946index f1272dc..e92a1ac 100644
46947--- a/drivers/pci/pcie/aspm.c
46948+++ b/drivers/pci/pcie/aspm.c
46949@@ -27,9 +27,9 @@
46950 #define MODULE_PARAM_PREFIX "pcie_aspm."
46951
46952 /* Note: those are not register definitions */
46953-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
46954-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
46955-#define ASPM_STATE_L1 (4) /* L1 state */
46956+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
46957+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
46958+#define ASPM_STATE_L1 (4U) /* L1 state */
46959 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
46960 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
46961
46962diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
46963index 38e403d..a2ce55a 100644
46964--- a/drivers/pci/probe.c
46965+++ b/drivers/pci/probe.c
46966@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
46967 struct pci_bus_region region, inverted_region;
46968 bool bar_too_big = false, bar_disabled = false;
46969
46970- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
46971+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
46972
46973 /* No printks while decoding is disabled! */
46974 if (!dev->mmio_always_on) {
46975diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
46976index 46d1378..30e452b 100644
46977--- a/drivers/pci/proc.c
46978+++ b/drivers/pci/proc.c
46979@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
46980 static int __init pci_proc_init(void)
46981 {
46982 struct pci_dev *dev = NULL;
46983+
46984+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46985+#ifdef CONFIG_GRKERNSEC_PROC_USER
46986+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
46987+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46988+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46989+#endif
46990+#else
46991 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
46992+#endif
46993 proc_create("devices", 0, proc_bus_pci_dir,
46994 &proc_bus_pci_dev_operations);
46995 proc_initialized = 1;
46996diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
46997index 3e5b4497..dcdfb70 100644
46998--- a/drivers/platform/chrome/chromeos_laptop.c
46999+++ b/drivers/platform/chrome/chromeos_laptop.c
47000@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
47001 return 0;
47002 }
47003
47004-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
47005+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
47006 {
47007 .ident = "Samsung Series 5 550 - Touchpad",
47008 .matches = {
47009diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
47010index 19c313b..ed28b38 100644
47011--- a/drivers/platform/x86/asus-wmi.c
47012+++ b/drivers/platform/x86/asus-wmi.c
47013@@ -1618,6 +1618,10 @@ static int show_dsts(struct seq_file *m, void *data)
47014 int err;
47015 u32 retval = -1;
47016
47017+#ifdef CONFIG_GRKERNSEC_KMEM
47018+ return -EPERM;
47019+#endif
47020+
47021 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
47022
47023 if (err < 0)
47024@@ -1634,6 +1638,10 @@ static int show_devs(struct seq_file *m, void *data)
47025 int err;
47026 u32 retval = -1;
47027
47028+#ifdef CONFIG_GRKERNSEC_KMEM
47029+ return -EPERM;
47030+#endif
47031+
47032 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
47033 &retval);
47034
47035@@ -1658,6 +1666,10 @@ static int show_call(struct seq_file *m, void *data)
47036 union acpi_object *obj;
47037 acpi_status status;
47038
47039+#ifdef CONFIG_GRKERNSEC_KMEM
47040+ return -EPERM;
47041+#endif
47042+
47043 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
47044 1, asus->debug.method_id,
47045 &input, &output);
47046diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
47047index 62f8030..c7f2a45 100644
47048--- a/drivers/platform/x86/msi-laptop.c
47049+++ b/drivers/platform/x86/msi-laptop.c
47050@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
47051
47052 if (!quirks->ec_read_only) {
47053 /* allow userland write sysfs file */
47054- dev_attr_bluetooth.store = store_bluetooth;
47055- dev_attr_wlan.store = store_wlan;
47056- dev_attr_threeg.store = store_threeg;
47057- dev_attr_bluetooth.attr.mode |= S_IWUSR;
47058- dev_attr_wlan.attr.mode |= S_IWUSR;
47059- dev_attr_threeg.attr.mode |= S_IWUSR;
47060+ pax_open_kernel();
47061+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
47062+ *(void **)&dev_attr_wlan.store = store_wlan;
47063+ *(void **)&dev_attr_threeg.store = store_threeg;
47064+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
47065+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
47066+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
47067+ pax_close_kernel();
47068 }
47069
47070 /* disable hardware control by fn key */
47071diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
47072index 70222f2..8c8ce66 100644
47073--- a/drivers/platform/x86/msi-wmi.c
47074+++ b/drivers/platform/x86/msi-wmi.c
47075@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
47076 static void msi_wmi_notify(u32 value, void *context)
47077 {
47078 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
47079- static struct key_entry *key;
47080+ struct key_entry *key;
47081 union acpi_object *obj;
47082 acpi_status status;
47083
47084diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
47085index fb233ae..23a325c 100644
47086--- a/drivers/platform/x86/sony-laptop.c
47087+++ b/drivers/platform/x86/sony-laptop.c
47088@@ -2453,7 +2453,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
47089 }
47090
47091 /* High speed charging function */
47092-static struct device_attribute *hsc_handle;
47093+static device_attribute_no_const *hsc_handle;
47094
47095 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
47096 struct device_attribute *attr,
47097diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
47098index 58b0274..6704626 100644
47099--- a/drivers/platform/x86/thinkpad_acpi.c
47100+++ b/drivers/platform/x86/thinkpad_acpi.c
47101@@ -2100,7 +2100,7 @@ static int hotkey_mask_get(void)
47102 return 0;
47103 }
47104
47105-void static hotkey_mask_warn_incomplete_mask(void)
47106+static void hotkey_mask_warn_incomplete_mask(void)
47107 {
47108 /* log only what the user can fix... */
47109 const u32 wantedmask = hotkey_driver_mask &
47110@@ -2327,11 +2327,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
47111 }
47112 }
47113
47114-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47115- struct tp_nvram_state *newn,
47116- const u32 event_mask)
47117-{
47118-
47119 #define TPACPI_COMPARE_KEY(__scancode, __member) \
47120 do { \
47121 if ((event_mask & (1 << __scancode)) && \
47122@@ -2345,36 +2340,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47123 tpacpi_hotkey_send_key(__scancode); \
47124 } while (0)
47125
47126- void issue_volchange(const unsigned int oldvol,
47127- const unsigned int newvol)
47128- {
47129- unsigned int i = oldvol;
47130+static void issue_volchange(const unsigned int oldvol,
47131+ const unsigned int newvol,
47132+ const u32 event_mask)
47133+{
47134+ unsigned int i = oldvol;
47135
47136- while (i > newvol) {
47137- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
47138- i--;
47139- }
47140- while (i < newvol) {
47141- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
47142- i++;
47143- }
47144+ while (i > newvol) {
47145+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
47146+ i--;
47147 }
47148+ while (i < newvol) {
47149+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
47150+ i++;
47151+ }
47152+}
47153
47154- void issue_brightnesschange(const unsigned int oldbrt,
47155- const unsigned int newbrt)
47156- {
47157- unsigned int i = oldbrt;
47158+static void issue_brightnesschange(const unsigned int oldbrt,
47159+ const unsigned int newbrt,
47160+ const u32 event_mask)
47161+{
47162+ unsigned int i = oldbrt;
47163
47164- while (i > newbrt) {
47165- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
47166- i--;
47167- }
47168- while (i < newbrt) {
47169- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
47170- i++;
47171- }
47172+ while (i > newbrt) {
47173+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
47174+ i--;
47175+ }
47176+ while (i < newbrt) {
47177+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
47178+ i++;
47179 }
47180+}
47181
47182+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47183+ struct tp_nvram_state *newn,
47184+ const u32 event_mask)
47185+{
47186 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
47187 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
47188 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
47189@@ -2408,7 +2409,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47190 oldn->volume_level != newn->volume_level) {
47191 /* recently muted, or repeated mute keypress, or
47192 * multiple presses ending in mute */
47193- issue_volchange(oldn->volume_level, newn->volume_level);
47194+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
47195 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
47196 }
47197 } else {
47198@@ -2418,7 +2419,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47199 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
47200 }
47201 if (oldn->volume_level != newn->volume_level) {
47202- issue_volchange(oldn->volume_level, newn->volume_level);
47203+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
47204 } else if (oldn->volume_toggle != newn->volume_toggle) {
47205 /* repeated vol up/down keypress at end of scale ? */
47206 if (newn->volume_level == 0)
47207@@ -2431,7 +2432,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47208 /* handle brightness */
47209 if (oldn->brightness_level != newn->brightness_level) {
47210 issue_brightnesschange(oldn->brightness_level,
47211- newn->brightness_level);
47212+ newn->brightness_level,
47213+ event_mask);
47214 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
47215 /* repeated key presses that didn't change state */
47216 if (newn->brightness_level == 0)
47217@@ -2440,10 +2442,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47218 && !tp_features.bright_unkfw)
47219 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
47220 }
47221+}
47222
47223 #undef TPACPI_COMPARE_KEY
47224 #undef TPACPI_MAY_SEND_KEY
47225-}
47226
47227 /*
47228 * Polling driver
47229diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
47230index 769d265..a3a05ca 100644
47231--- a/drivers/pnp/pnpbios/bioscalls.c
47232+++ b/drivers/pnp/pnpbios/bioscalls.c
47233@@ -58,7 +58,7 @@ do { \
47234 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
47235 } while(0)
47236
47237-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
47238+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
47239 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
47240
47241 /*
47242@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
47243
47244 cpu = get_cpu();
47245 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
47246+
47247+ pax_open_kernel();
47248 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
47249+ pax_close_kernel();
47250
47251 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
47252 spin_lock_irqsave(&pnp_bios_lock, flags);
47253@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
47254 :"memory");
47255 spin_unlock_irqrestore(&pnp_bios_lock, flags);
47256
47257+ pax_open_kernel();
47258 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
47259+ pax_close_kernel();
47260+
47261 put_cpu();
47262
47263 /* If we get here and this is set then the PnP BIOS faulted on us. */
47264@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
47265 return status;
47266 }
47267
47268-void pnpbios_calls_init(union pnp_bios_install_struct *header)
47269+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
47270 {
47271 int i;
47272
47273@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
47274 pnp_bios_callpoint.offset = header->fields.pm16offset;
47275 pnp_bios_callpoint.segment = PNP_CS16;
47276
47277+ pax_open_kernel();
47278+
47279 for_each_possible_cpu(i) {
47280 struct desc_struct *gdt = get_cpu_gdt_table(i);
47281 if (!gdt)
47282@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
47283 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
47284 (unsigned long)__va(header->fields.pm16dseg));
47285 }
47286+
47287+ pax_close_kernel();
47288 }
47289diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
47290index d95e101..67f0c3f 100644
47291--- a/drivers/pnp/resource.c
47292+++ b/drivers/pnp/resource.c
47293@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
47294 return 1;
47295
47296 /* check if the resource is valid */
47297- if (*irq < 0 || *irq > 15)
47298+ if (*irq > 15)
47299 return 0;
47300
47301 /* check if the resource is reserved */
47302@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
47303 return 1;
47304
47305 /* check if the resource is valid */
47306- if (*dma < 0 || *dma == 4 || *dma > 7)
47307+ if (*dma == 4 || *dma > 7)
47308 return 0;
47309
47310 /* check if the resource is reserved */
47311diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
47312index 0c52e2a..3421ab7 100644
47313--- a/drivers/power/pda_power.c
47314+++ b/drivers/power/pda_power.c
47315@@ -37,7 +37,11 @@ static int polling;
47316
47317 #if IS_ENABLED(CONFIG_USB_PHY)
47318 static struct usb_phy *transceiver;
47319-static struct notifier_block otg_nb;
47320+static int otg_handle_notification(struct notifier_block *nb,
47321+ unsigned long event, void *unused);
47322+static struct notifier_block otg_nb = {
47323+ .notifier_call = otg_handle_notification
47324+};
47325 #endif
47326
47327 static struct regulator *ac_draw;
47328@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
47329
47330 #if IS_ENABLED(CONFIG_USB_PHY)
47331 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
47332- otg_nb.notifier_call = otg_handle_notification;
47333 ret = usb_register_notifier(transceiver, &otg_nb);
47334 if (ret) {
47335 dev_err(dev, "failure to register otg notifier\n");
47336diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
47337index cc439fd..8fa30df 100644
47338--- a/drivers/power/power_supply.h
47339+++ b/drivers/power/power_supply.h
47340@@ -16,12 +16,12 @@ struct power_supply;
47341
47342 #ifdef CONFIG_SYSFS
47343
47344-extern void power_supply_init_attrs(struct device_type *dev_type);
47345+extern void power_supply_init_attrs(void);
47346 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
47347
47348 #else
47349
47350-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
47351+static inline void power_supply_init_attrs(void) {}
47352 #define power_supply_uevent NULL
47353
47354 #endif /* CONFIG_SYSFS */
47355diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
47356index 557af94..84dc1fe 100644
47357--- a/drivers/power/power_supply_core.c
47358+++ b/drivers/power/power_supply_core.c
47359@@ -24,7 +24,10 @@
47360 struct class *power_supply_class;
47361 EXPORT_SYMBOL_GPL(power_supply_class);
47362
47363-static struct device_type power_supply_dev_type;
47364+extern const struct attribute_group *power_supply_attr_groups[];
47365+static struct device_type power_supply_dev_type = {
47366+ .groups = power_supply_attr_groups,
47367+};
47368
47369 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
47370 struct power_supply *supply)
47371@@ -584,7 +587,7 @@ static int __init power_supply_class_init(void)
47372 return PTR_ERR(power_supply_class);
47373
47374 power_supply_class->dev_uevent = power_supply_uevent;
47375- power_supply_init_attrs(&power_supply_dev_type);
47376+ power_supply_init_attrs();
47377
47378 return 0;
47379 }
47380diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
47381index 44420d1..967126e 100644
47382--- a/drivers/power/power_supply_sysfs.c
47383+++ b/drivers/power/power_supply_sysfs.c
47384@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
47385 .is_visible = power_supply_attr_is_visible,
47386 };
47387
47388-static const struct attribute_group *power_supply_attr_groups[] = {
47389+const struct attribute_group *power_supply_attr_groups[] = {
47390 &power_supply_attr_group,
47391 NULL,
47392 };
47393
47394-void power_supply_init_attrs(struct device_type *dev_type)
47395+void power_supply_init_attrs(void)
47396 {
47397 int i;
47398
47399- dev_type->groups = power_supply_attr_groups;
47400-
47401 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
47402 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
47403 }
47404diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
47405index 84419af..268ede8 100644
47406--- a/drivers/powercap/powercap_sys.c
47407+++ b/drivers/powercap/powercap_sys.c
47408@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
47409 struct device_attribute name_attr;
47410 };
47411
47412+static ssize_t show_constraint_name(struct device *dev,
47413+ struct device_attribute *dev_attr,
47414+ char *buf);
47415+
47416 static struct powercap_constraint_attr
47417- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
47418+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
47419+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
47420+ .power_limit_attr = {
47421+ .attr = {
47422+ .name = NULL,
47423+ .mode = S_IWUSR | S_IRUGO
47424+ },
47425+ .show = show_constraint_power_limit_uw,
47426+ .store = store_constraint_power_limit_uw
47427+ },
47428+
47429+ .time_window_attr = {
47430+ .attr = {
47431+ .name = NULL,
47432+ .mode = S_IWUSR | S_IRUGO
47433+ },
47434+ .show = show_constraint_time_window_us,
47435+ .store = store_constraint_time_window_us
47436+ },
47437+
47438+ .max_power_attr = {
47439+ .attr = {
47440+ .name = NULL,
47441+ .mode = S_IRUGO
47442+ },
47443+ .show = show_constraint_max_power_uw,
47444+ .store = NULL
47445+ },
47446+
47447+ .min_power_attr = {
47448+ .attr = {
47449+ .name = NULL,
47450+ .mode = S_IRUGO
47451+ },
47452+ .show = show_constraint_min_power_uw,
47453+ .store = NULL
47454+ },
47455+
47456+ .max_time_window_attr = {
47457+ .attr = {
47458+ .name = NULL,
47459+ .mode = S_IRUGO
47460+ },
47461+ .show = show_constraint_max_time_window_us,
47462+ .store = NULL
47463+ },
47464+
47465+ .min_time_window_attr = {
47466+ .attr = {
47467+ .name = NULL,
47468+ .mode = S_IRUGO
47469+ },
47470+ .show = show_constraint_min_time_window_us,
47471+ .store = NULL
47472+ },
47473+
47474+ .name_attr = {
47475+ .attr = {
47476+ .name = NULL,
47477+ .mode = S_IRUGO
47478+ },
47479+ .show = show_constraint_name,
47480+ .store = NULL
47481+ }
47482+ }
47483+};
47484
47485 /* A list of powercap control_types */
47486 static LIST_HEAD(powercap_cntrl_list);
47487@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
47488 }
47489
47490 static int create_constraint_attribute(int id, const char *name,
47491- int mode,
47492- struct device_attribute *dev_attr,
47493- ssize_t (*show)(struct device *,
47494- struct device_attribute *, char *),
47495- ssize_t (*store)(struct device *,
47496- struct device_attribute *,
47497- const char *, size_t)
47498- )
47499+ struct device_attribute *dev_attr)
47500 {
47501+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
47502
47503- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
47504- id, name);
47505- if (!dev_attr->attr.name)
47506+ if (!name)
47507 return -ENOMEM;
47508- dev_attr->attr.mode = mode;
47509- dev_attr->show = show;
47510- dev_attr->store = store;
47511+
47512+ pax_open_kernel();
47513+ *(const char **)&dev_attr->attr.name = name;
47514+ pax_close_kernel();
47515
47516 return 0;
47517 }
47518@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
47519
47520 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
47521 ret = create_constraint_attribute(i, "power_limit_uw",
47522- S_IWUSR | S_IRUGO,
47523- &constraint_attrs[i].power_limit_attr,
47524- show_constraint_power_limit_uw,
47525- store_constraint_power_limit_uw);
47526+ &constraint_attrs[i].power_limit_attr);
47527 if (ret)
47528 goto err_alloc;
47529 ret = create_constraint_attribute(i, "time_window_us",
47530- S_IWUSR | S_IRUGO,
47531- &constraint_attrs[i].time_window_attr,
47532- show_constraint_time_window_us,
47533- store_constraint_time_window_us);
47534+ &constraint_attrs[i].time_window_attr);
47535 if (ret)
47536 goto err_alloc;
47537- ret = create_constraint_attribute(i, "name", S_IRUGO,
47538- &constraint_attrs[i].name_attr,
47539- show_constraint_name,
47540- NULL);
47541+ ret = create_constraint_attribute(i, "name",
47542+ &constraint_attrs[i].name_attr);
47543 if (ret)
47544 goto err_alloc;
47545- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
47546- &constraint_attrs[i].max_power_attr,
47547- show_constraint_max_power_uw,
47548- NULL);
47549+ ret = create_constraint_attribute(i, "max_power_uw",
47550+ &constraint_attrs[i].max_power_attr);
47551 if (ret)
47552 goto err_alloc;
47553- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
47554- &constraint_attrs[i].min_power_attr,
47555- show_constraint_min_power_uw,
47556- NULL);
47557+ ret = create_constraint_attribute(i, "min_power_uw",
47558+ &constraint_attrs[i].min_power_attr);
47559 if (ret)
47560 goto err_alloc;
47561 ret = create_constraint_attribute(i, "max_time_window_us",
47562- S_IRUGO,
47563- &constraint_attrs[i].max_time_window_attr,
47564- show_constraint_max_time_window_us,
47565- NULL);
47566+ &constraint_attrs[i].max_time_window_attr);
47567 if (ret)
47568 goto err_alloc;
47569 ret = create_constraint_attribute(i, "min_time_window_us",
47570- S_IRUGO,
47571- &constraint_attrs[i].min_time_window_attr,
47572- show_constraint_min_time_window_us,
47573- NULL);
47574+ &constraint_attrs[i].min_time_window_attr);
47575 if (ret)
47576 goto err_alloc;
47577
47578@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
47579 power_zone->zone_dev_attrs[count++] =
47580 &dev_attr_max_energy_range_uj.attr;
47581 if (power_zone->ops->get_energy_uj) {
47582+ pax_open_kernel();
47583 if (power_zone->ops->reset_energy_uj)
47584- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
47585+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
47586 else
47587- dev_attr_energy_uj.attr.mode = S_IRUGO;
47588+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
47589+ pax_close_kernel();
47590 power_zone->zone_dev_attrs[count++] =
47591 &dev_attr_energy_uj.attr;
47592 }
47593diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
47594index d85f313..ae857d0 100644
47595--- a/drivers/regulator/core.c
47596+++ b/drivers/regulator/core.c
47597@@ -3362,7 +3362,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
47598 {
47599 const struct regulation_constraints *constraints = NULL;
47600 const struct regulator_init_data *init_data;
47601- static atomic_t regulator_no = ATOMIC_INIT(0);
47602+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
47603 struct regulator_dev *rdev;
47604 struct device *dev;
47605 int ret, i;
47606@@ -3432,7 +3432,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
47607 rdev->dev.of_node = config->of_node;
47608 rdev->dev.parent = dev;
47609 dev_set_name(&rdev->dev, "regulator.%d",
47610- atomic_inc_return(&regulator_no) - 1);
47611+ atomic_inc_return_unchecked(&regulator_no) - 1);
47612 ret = device_register(&rdev->dev);
47613 if (ret != 0) {
47614 put_device(&rdev->dev);
47615diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
47616index 8d94d3d..653b623 100644
47617--- a/drivers/regulator/max8660.c
47618+++ b/drivers/regulator/max8660.c
47619@@ -420,8 +420,10 @@ static int max8660_probe(struct i2c_client *client,
47620 max8660->shadow_regs[MAX8660_OVER1] = 5;
47621 } else {
47622 /* Otherwise devices can be toggled via software */
47623- max8660_dcdc_ops.enable = max8660_dcdc_enable;
47624- max8660_dcdc_ops.disable = max8660_dcdc_disable;
47625+ pax_open_kernel();
47626+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
47627+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
47628+ pax_close_kernel();
47629 }
47630
47631 /*
47632diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
47633index 892aa1e..ebd1b9c 100644
47634--- a/drivers/regulator/max8973-regulator.c
47635+++ b/drivers/regulator/max8973-regulator.c
47636@@ -406,9 +406,11 @@ static int max8973_probe(struct i2c_client *client,
47637 if (!pdata || !pdata->enable_ext_control) {
47638 max->desc.enable_reg = MAX8973_VOUT;
47639 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
47640- max->ops.enable = regulator_enable_regmap;
47641- max->ops.disable = regulator_disable_regmap;
47642- max->ops.is_enabled = regulator_is_enabled_regmap;
47643+ pax_open_kernel();
47644+ *(void **)&max->ops.enable = regulator_enable_regmap;
47645+ *(void **)&max->ops.disable = regulator_disable_regmap;
47646+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
47647+ pax_close_kernel();
47648 }
47649
47650 if (pdata) {
47651diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
47652index 96c9f80..90974ca 100644
47653--- a/drivers/regulator/mc13892-regulator.c
47654+++ b/drivers/regulator/mc13892-regulator.c
47655@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
47656 }
47657 mc13xxx_unlock(mc13892);
47658
47659- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
47660+ pax_open_kernel();
47661+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
47662 = mc13892_vcam_set_mode;
47663- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
47664+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
47665 = mc13892_vcam_get_mode;
47666+ pax_close_kernel();
47667
47668 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
47669 ARRAY_SIZE(mc13892_regulators));
47670diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
47671index f148762..5a6d1e5 100644
47672--- a/drivers/rtc/rtc-cmos.c
47673+++ b/drivers/rtc/rtc-cmos.c
47674@@ -731,7 +731,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
47675 hpet_rtc_timer_init();
47676
47677 /* export at least the first block of NVRAM */
47678- nvram.size = address_space - NVRAM_OFFSET;
47679+ pax_open_kernel();
47680+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
47681+ pax_close_kernel();
47682 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
47683 if (retval < 0) {
47684 dev_dbg(dev, "can't create nvram file? %d\n", retval);
47685diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
47686index d049393..bb20be0 100644
47687--- a/drivers/rtc/rtc-dev.c
47688+++ b/drivers/rtc/rtc-dev.c
47689@@ -16,6 +16,7 @@
47690 #include <linux/module.h>
47691 #include <linux/rtc.h>
47692 #include <linux/sched.h>
47693+#include <linux/grsecurity.h>
47694 #include "rtc-core.h"
47695
47696 static dev_t rtc_devt;
47697@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
47698 if (copy_from_user(&tm, uarg, sizeof(tm)))
47699 return -EFAULT;
47700
47701+ gr_log_timechange();
47702+
47703 return rtc_set_time(rtc, &tm);
47704
47705 case RTC_PIE_ON:
47706diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
47707index 4e75345..09f8663 100644
47708--- a/drivers/rtc/rtc-ds1307.c
47709+++ b/drivers/rtc/rtc-ds1307.c
47710@@ -107,7 +107,7 @@ struct ds1307 {
47711 u8 offset; /* register's offset */
47712 u8 regs[11];
47713 u16 nvram_offset;
47714- struct bin_attribute *nvram;
47715+ bin_attribute_no_const *nvram;
47716 enum ds_type type;
47717 unsigned long flags;
47718 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
47719diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
47720index 11880c1..b823aa4 100644
47721--- a/drivers/rtc/rtc-m48t59.c
47722+++ b/drivers/rtc/rtc-m48t59.c
47723@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
47724 if (IS_ERR(m48t59->rtc))
47725 return PTR_ERR(m48t59->rtc);
47726
47727- m48t59_nvram_attr.size = pdata->offset;
47728+ pax_open_kernel();
47729+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
47730+ pax_close_kernel();
47731
47732 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
47733 if (ret)
47734diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
47735index 14b5f8d..cc9bd26 100644
47736--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
47737+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
47738@@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
47739 for (bit = 0; bit < 8; bit++) {
47740
47741 if ((pci_status[i] & (0x1 << bit)) != 0) {
47742- static const char *s;
47743+ const char *s;
47744
47745 s = pci_status_strings[bit];
47746 if (i == 7/*TARG*/ && bit == 3)
47747@@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
47748
47749 for (bit = 0; bit < 8; bit++) {
47750
47751- if ((split_status[i] & (0x1 << bit)) != 0) {
47752- static const char *s;
47753-
47754- s = split_status_strings[bit];
47755- printk(s, ahd_name(ahd),
47756+ if ((split_status[i] & (0x1 << bit)) != 0)
47757+ printk(split_status_strings[bit], ahd_name(ahd),
47758 split_status_source[i]);
47759- }
47760
47761 if (i > 1)
47762 continue;
47763
47764- if ((sg_split_status[i] & (0x1 << bit)) != 0) {
47765- static const char *s;
47766-
47767- s = split_status_strings[bit];
47768- printk(s, ahd_name(ahd), "SG");
47769- }
47770+ if ((sg_split_status[i] & (0x1 << bit)) != 0)
47771+ printk(split_status_strings[bit], ahd_name(ahd), "SG");
47772 }
47773 }
47774 /*
47775diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
47776index e693af6..2e525b6 100644
47777--- a/drivers/scsi/bfa/bfa_fcpim.h
47778+++ b/drivers/scsi/bfa/bfa_fcpim.h
47779@@ -36,7 +36,7 @@ struct bfa_iotag_s {
47780
47781 struct bfa_itn_s {
47782 bfa_isr_func_t isr;
47783-};
47784+} __no_const;
47785
47786 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
47787 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
47788diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
47789index a3ab5cc..8143622 100644
47790--- a/drivers/scsi/bfa/bfa_fcs.c
47791+++ b/drivers/scsi/bfa/bfa_fcs.c
47792@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
47793 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
47794
47795 static struct bfa_fcs_mod_s fcs_modules[] = {
47796- { bfa_fcs_port_attach, NULL, NULL },
47797- { bfa_fcs_uf_attach, NULL, NULL },
47798- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
47799- bfa_fcs_fabric_modexit },
47800+ {
47801+ .attach = bfa_fcs_port_attach,
47802+ .modinit = NULL,
47803+ .modexit = NULL
47804+ },
47805+ {
47806+ .attach = bfa_fcs_uf_attach,
47807+ .modinit = NULL,
47808+ .modexit = NULL
47809+ },
47810+ {
47811+ .attach = bfa_fcs_fabric_attach,
47812+ .modinit = bfa_fcs_fabric_modinit,
47813+ .modexit = bfa_fcs_fabric_modexit
47814+ },
47815 };
47816
47817 /*
47818diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
47819index f5e4e61..a0acaf6 100644
47820--- a/drivers/scsi/bfa/bfa_fcs_lport.c
47821+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
47822@@ -89,15 +89,26 @@ static struct {
47823 void (*offline) (struct bfa_fcs_lport_s *port);
47824 } __port_action[] = {
47825 {
47826- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
47827- bfa_fcs_lport_unknown_offline}, {
47828- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
47829- bfa_fcs_lport_fab_offline}, {
47830- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
47831- bfa_fcs_lport_n2n_offline}, {
47832- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
47833- bfa_fcs_lport_loop_offline},
47834- };
47835+ .init = bfa_fcs_lport_unknown_init,
47836+ .online = bfa_fcs_lport_unknown_online,
47837+ .offline = bfa_fcs_lport_unknown_offline
47838+ },
47839+ {
47840+ .init = bfa_fcs_lport_fab_init,
47841+ .online = bfa_fcs_lport_fab_online,
47842+ .offline = bfa_fcs_lport_fab_offline
47843+ },
47844+ {
47845+ .init = bfa_fcs_lport_n2n_init,
47846+ .online = bfa_fcs_lport_n2n_online,
47847+ .offline = bfa_fcs_lport_n2n_offline
47848+ },
47849+ {
47850+ .init = bfa_fcs_lport_loop_init,
47851+ .online = bfa_fcs_lport_loop_online,
47852+ .offline = bfa_fcs_lport_loop_offline
47853+ },
47854+};
47855
47856 /*
47857 * fcs_port_sm FCS logical port state machine
47858diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
47859index 90814fe..4384138 100644
47860--- a/drivers/scsi/bfa/bfa_ioc.h
47861+++ b/drivers/scsi/bfa/bfa_ioc.h
47862@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
47863 bfa_ioc_disable_cbfn_t disable_cbfn;
47864 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
47865 bfa_ioc_reset_cbfn_t reset_cbfn;
47866-};
47867+} __no_const;
47868
47869 /*
47870 * IOC event notification mechanism.
47871@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
47872 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
47873 enum bfi_ioc_state fwstate);
47874 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
47875-};
47876+} __no_const;
47877
47878 /*
47879 * Queue element to wait for room in request queue. FIFO order is
47880diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
47881index a14c784..6de6790 100644
47882--- a/drivers/scsi/bfa/bfa_modules.h
47883+++ b/drivers/scsi/bfa/bfa_modules.h
47884@@ -78,12 +78,12 @@ enum {
47885 \
47886 extern struct bfa_module_s hal_mod_ ## __mod; \
47887 struct bfa_module_s hal_mod_ ## __mod = { \
47888- bfa_ ## __mod ## _meminfo, \
47889- bfa_ ## __mod ## _attach, \
47890- bfa_ ## __mod ## _detach, \
47891- bfa_ ## __mod ## _start, \
47892- bfa_ ## __mod ## _stop, \
47893- bfa_ ## __mod ## _iocdisable, \
47894+ .meminfo = bfa_ ## __mod ## _meminfo, \
47895+ .attach = bfa_ ## __mod ## _attach, \
47896+ .detach = bfa_ ## __mod ## _detach, \
47897+ .start = bfa_ ## __mod ## _start, \
47898+ .stop = bfa_ ## __mod ## _stop, \
47899+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
47900 }
47901
47902 #define BFA_CACHELINE_SZ (256)
47903diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
47904index 045c4e1..13de803 100644
47905--- a/drivers/scsi/fcoe/fcoe_sysfs.c
47906+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
47907@@ -33,8 +33,8 @@
47908 */
47909 #include "libfcoe.h"
47910
47911-static atomic_t ctlr_num;
47912-static atomic_t fcf_num;
47913+static atomic_unchecked_t ctlr_num;
47914+static atomic_unchecked_t fcf_num;
47915
47916 /*
47917 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
47918@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
47919 if (!ctlr)
47920 goto out;
47921
47922- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
47923+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
47924 ctlr->f = f;
47925 ctlr->mode = FIP_CONN_TYPE_FABRIC;
47926 INIT_LIST_HEAD(&ctlr->fcfs);
47927@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
47928 fcf->dev.parent = &ctlr->dev;
47929 fcf->dev.bus = &fcoe_bus_type;
47930 fcf->dev.type = &fcoe_fcf_device_type;
47931- fcf->id = atomic_inc_return(&fcf_num) - 1;
47932+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
47933 fcf->state = FCOE_FCF_STATE_UNKNOWN;
47934
47935 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
47936@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
47937 {
47938 int error;
47939
47940- atomic_set(&ctlr_num, 0);
47941- atomic_set(&fcf_num, 0);
47942+ atomic_set_unchecked(&ctlr_num, 0);
47943+ atomic_set_unchecked(&fcf_num, 0);
47944
47945 error = bus_register(&fcoe_bus_type);
47946 if (error)
47947diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
47948index f2c5005..db36c02 100644
47949--- a/drivers/scsi/hosts.c
47950+++ b/drivers/scsi/hosts.c
47951@@ -42,7 +42,7 @@
47952 #include "scsi_logging.h"
47953
47954
47955-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
47956+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
47957
47958
47959 static void scsi_host_cls_release(struct device *dev)
47960@@ -367,7 +367,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
47961 * subtract one because we increment first then return, but we need to
47962 * know what the next host number was before increment
47963 */
47964- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
47965+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
47966 shost->dma_channel = 0xff;
47967
47968 /* These three are default values which can be overridden */
47969diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
47970index 20a5e6e..8b23cea 100644
47971--- a/drivers/scsi/hpsa.c
47972+++ b/drivers/scsi/hpsa.c
47973@@ -578,7 +578,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
47974 unsigned long flags;
47975
47976 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
47977- return h->access.command_completed(h, q);
47978+ return h->access->command_completed(h, q);
47979
47980 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
47981 a = rq->head[rq->current_entry];
47982@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
47983 while (!list_empty(&h->reqQ)) {
47984 c = list_entry(h->reqQ.next, struct CommandList, list);
47985 /* can't do anything if fifo is full */
47986- if ((h->access.fifo_full(h))) {
47987+ if ((h->access->fifo_full(h))) {
47988 dev_warn(&h->pdev->dev, "fifo full\n");
47989 break;
47990 }
47991@@ -3466,7 +3466,7 @@ static void start_io(struct ctlr_info *h)
47992
47993 /* Tell the controller execute command */
47994 spin_unlock_irqrestore(&h->lock, flags);
47995- h->access.submit_command(h, c);
47996+ h->access->submit_command(h, c);
47997 spin_lock_irqsave(&h->lock, flags);
47998 }
47999 spin_unlock_irqrestore(&h->lock, flags);
48000@@ -3474,17 +3474,17 @@ static void start_io(struct ctlr_info *h)
48001
48002 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
48003 {
48004- return h->access.command_completed(h, q);
48005+ return h->access->command_completed(h, q);
48006 }
48007
48008 static inline bool interrupt_pending(struct ctlr_info *h)
48009 {
48010- return h->access.intr_pending(h);
48011+ return h->access->intr_pending(h);
48012 }
48013
48014 static inline long interrupt_not_for_us(struct ctlr_info *h)
48015 {
48016- return (h->access.intr_pending(h) == 0) ||
48017+ return (h->access->intr_pending(h) == 0) ||
48018 (h->interrupts_enabled == 0);
48019 }
48020
48021@@ -4386,7 +4386,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
48022 if (prod_index < 0)
48023 return -ENODEV;
48024 h->product_name = products[prod_index].product_name;
48025- h->access = *(products[prod_index].access);
48026+ h->access = products[prod_index].access;
48027
48028 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
48029 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
48030@@ -4668,7 +4668,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
48031
48032 assert_spin_locked(&lockup_detector_lock);
48033 remove_ctlr_from_lockup_detector_list(h);
48034- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48035+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48036 spin_lock_irqsave(&h->lock, flags);
48037 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
48038 spin_unlock_irqrestore(&h->lock, flags);
48039@@ -4845,7 +4845,7 @@ reinit_after_soft_reset:
48040 }
48041
48042 /* make sure the board interrupts are off */
48043- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48044+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48045
48046 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
48047 goto clean2;
48048@@ -4879,7 +4879,7 @@ reinit_after_soft_reset:
48049 * fake ones to scoop up any residual completions.
48050 */
48051 spin_lock_irqsave(&h->lock, flags);
48052- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48053+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48054 spin_unlock_irqrestore(&h->lock, flags);
48055 free_irqs(h);
48056 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
48057@@ -4898,9 +4898,9 @@ reinit_after_soft_reset:
48058 dev_info(&h->pdev->dev, "Board READY.\n");
48059 dev_info(&h->pdev->dev,
48060 "Waiting for stale completions to drain.\n");
48061- h->access.set_intr_mask(h, HPSA_INTR_ON);
48062+ h->access->set_intr_mask(h, HPSA_INTR_ON);
48063 msleep(10000);
48064- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48065+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48066
48067 rc = controller_reset_failed(h->cfgtable);
48068 if (rc)
48069@@ -4921,7 +4921,7 @@ reinit_after_soft_reset:
48070 }
48071
48072 /* Turn the interrupts on so we can service requests */
48073- h->access.set_intr_mask(h, HPSA_INTR_ON);
48074+ h->access->set_intr_mask(h, HPSA_INTR_ON);
48075
48076 hpsa_hba_inquiry(h);
48077 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
48078@@ -4976,7 +4976,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
48079 * To write all data in the battery backed cache to disks
48080 */
48081 hpsa_flush_cache(h);
48082- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48083+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48084 hpsa_free_irqs_and_disable_msix(h);
48085 }
48086
48087@@ -5143,7 +5143,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
48088 return;
48089 }
48090 /* Change the access methods to the performant access methods */
48091- h->access = SA5_performant_access;
48092+ h->access = &SA5_performant_access;
48093 h->transMethod = CFGTBL_Trans_Performant;
48094 }
48095
48096diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
48097index bc85e72..d463049 100644
48098--- a/drivers/scsi/hpsa.h
48099+++ b/drivers/scsi/hpsa.h
48100@@ -79,7 +79,7 @@ struct ctlr_info {
48101 unsigned int msix_vector;
48102 unsigned int msi_vector;
48103 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
48104- struct access_method access;
48105+ struct access_method *access;
48106
48107 /* queue and queue Info */
48108 struct list_head reqQ;
48109@@ -381,19 +381,19 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)
48110 }
48111
48112 static struct access_method SA5_access = {
48113- SA5_submit_command,
48114- SA5_intr_mask,
48115- SA5_fifo_full,
48116- SA5_intr_pending,
48117- SA5_completed,
48118+ .submit_command = SA5_submit_command,
48119+ .set_intr_mask = SA5_intr_mask,
48120+ .fifo_full = SA5_fifo_full,
48121+ .intr_pending = SA5_intr_pending,
48122+ .command_completed = SA5_completed,
48123 };
48124
48125 static struct access_method SA5_performant_access = {
48126- SA5_submit_command,
48127- SA5_performant_intr_mask,
48128- SA5_fifo_full,
48129- SA5_performant_intr_pending,
48130- SA5_performant_completed,
48131+ .submit_command = SA5_submit_command,
48132+ .set_intr_mask = SA5_performant_intr_mask,
48133+ .fifo_full = SA5_fifo_full,
48134+ .intr_pending = SA5_performant_intr_pending,
48135+ .command_completed = SA5_performant_completed,
48136 };
48137
48138 struct board_type {
48139diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
48140index 1b3a094..068e683 100644
48141--- a/drivers/scsi/libfc/fc_exch.c
48142+++ b/drivers/scsi/libfc/fc_exch.c
48143@@ -101,12 +101,12 @@ struct fc_exch_mgr {
48144 u16 pool_max_index;
48145
48146 struct {
48147- atomic_t no_free_exch;
48148- atomic_t no_free_exch_xid;
48149- atomic_t xid_not_found;
48150- atomic_t xid_busy;
48151- atomic_t seq_not_found;
48152- atomic_t non_bls_resp;
48153+ atomic_unchecked_t no_free_exch;
48154+ atomic_unchecked_t no_free_exch_xid;
48155+ atomic_unchecked_t xid_not_found;
48156+ atomic_unchecked_t xid_busy;
48157+ atomic_unchecked_t seq_not_found;
48158+ atomic_unchecked_t non_bls_resp;
48159 } stats;
48160 };
48161
48162@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
48163 /* allocate memory for exchange */
48164 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
48165 if (!ep) {
48166- atomic_inc(&mp->stats.no_free_exch);
48167+ atomic_inc_unchecked(&mp->stats.no_free_exch);
48168 goto out;
48169 }
48170 memset(ep, 0, sizeof(*ep));
48171@@ -874,7 +874,7 @@ out:
48172 return ep;
48173 err:
48174 spin_unlock_bh(&pool->lock);
48175- atomic_inc(&mp->stats.no_free_exch_xid);
48176+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
48177 mempool_free(ep, mp->ep_pool);
48178 return NULL;
48179 }
48180@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48181 xid = ntohs(fh->fh_ox_id); /* we originated exch */
48182 ep = fc_exch_find(mp, xid);
48183 if (!ep) {
48184- atomic_inc(&mp->stats.xid_not_found);
48185+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48186 reject = FC_RJT_OX_ID;
48187 goto out;
48188 }
48189@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48190 ep = fc_exch_find(mp, xid);
48191 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
48192 if (ep) {
48193- atomic_inc(&mp->stats.xid_busy);
48194+ atomic_inc_unchecked(&mp->stats.xid_busy);
48195 reject = FC_RJT_RX_ID;
48196 goto rel;
48197 }
48198@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48199 }
48200 xid = ep->xid; /* get our XID */
48201 } else if (!ep) {
48202- atomic_inc(&mp->stats.xid_not_found);
48203+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48204 reject = FC_RJT_RX_ID; /* XID not found */
48205 goto out;
48206 }
48207@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48208 } else {
48209 sp = &ep->seq;
48210 if (sp->id != fh->fh_seq_id) {
48211- atomic_inc(&mp->stats.seq_not_found);
48212+ atomic_inc_unchecked(&mp->stats.seq_not_found);
48213 if (f_ctl & FC_FC_END_SEQ) {
48214 /*
48215 * Update sequence_id based on incoming last
48216@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
48217
48218 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
48219 if (!ep) {
48220- atomic_inc(&mp->stats.xid_not_found);
48221+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48222 goto out;
48223 }
48224 if (ep->esb_stat & ESB_ST_COMPLETE) {
48225- atomic_inc(&mp->stats.xid_not_found);
48226+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48227 goto rel;
48228 }
48229 if (ep->rxid == FC_XID_UNKNOWN)
48230 ep->rxid = ntohs(fh->fh_rx_id);
48231 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
48232- atomic_inc(&mp->stats.xid_not_found);
48233+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48234 goto rel;
48235 }
48236 if (ep->did != ntoh24(fh->fh_s_id) &&
48237 ep->did != FC_FID_FLOGI) {
48238- atomic_inc(&mp->stats.xid_not_found);
48239+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48240 goto rel;
48241 }
48242 sof = fr_sof(fp);
48243@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
48244 sp->ssb_stat |= SSB_ST_RESP;
48245 sp->id = fh->fh_seq_id;
48246 } else if (sp->id != fh->fh_seq_id) {
48247- atomic_inc(&mp->stats.seq_not_found);
48248+ atomic_inc_unchecked(&mp->stats.seq_not_found);
48249 goto rel;
48250 }
48251
48252@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
48253 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
48254
48255 if (!sp)
48256- atomic_inc(&mp->stats.xid_not_found);
48257+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48258 else
48259- atomic_inc(&mp->stats.non_bls_resp);
48260+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
48261
48262 fc_frame_free(fp);
48263 }
48264@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
48265
48266 list_for_each_entry(ema, &lport->ema_list, ema_list) {
48267 mp = ema->mp;
48268- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
48269+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
48270 st->fc_no_free_exch_xid +=
48271- atomic_read(&mp->stats.no_free_exch_xid);
48272- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
48273- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
48274- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
48275- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
48276+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
48277+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
48278+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
48279+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
48280+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
48281 }
48282 }
48283 EXPORT_SYMBOL(fc_exch_update_stats);
48284diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
48285index d289583..b745eec 100644
48286--- a/drivers/scsi/libsas/sas_ata.c
48287+++ b/drivers/scsi/libsas/sas_ata.c
48288@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
48289 .postreset = ata_std_postreset,
48290 .error_handler = ata_std_error_handler,
48291 .post_internal_cmd = sas_ata_post_internal,
48292- .qc_defer = ata_std_qc_defer,
48293+ .qc_defer = ata_std_qc_defer,
48294 .qc_prep = ata_noop_qc_prep,
48295 .qc_issue = sas_ata_qc_issue,
48296 .qc_fill_rtf = sas_ata_qc_fill_rtf,
48297diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
48298index 4e1b75c..0bbdfa9 100644
48299--- a/drivers/scsi/lpfc/lpfc.h
48300+++ b/drivers/scsi/lpfc/lpfc.h
48301@@ -432,7 +432,7 @@ struct lpfc_vport {
48302 struct dentry *debug_nodelist;
48303 struct dentry *vport_debugfs_root;
48304 struct lpfc_debugfs_trc *disc_trc;
48305- atomic_t disc_trc_cnt;
48306+ atomic_unchecked_t disc_trc_cnt;
48307 #endif
48308 uint8_t stat_data_enabled;
48309 uint8_t stat_data_blocked;
48310@@ -865,8 +865,8 @@ struct lpfc_hba {
48311 struct timer_list fabric_block_timer;
48312 unsigned long bit_flags;
48313 #define FABRIC_COMANDS_BLOCKED 0
48314- atomic_t num_rsrc_err;
48315- atomic_t num_cmd_success;
48316+ atomic_unchecked_t num_rsrc_err;
48317+ atomic_unchecked_t num_cmd_success;
48318 unsigned long last_rsrc_error_time;
48319 unsigned long last_ramp_down_time;
48320 unsigned long last_ramp_up_time;
48321@@ -902,7 +902,7 @@ struct lpfc_hba {
48322
48323 struct dentry *debug_slow_ring_trc;
48324 struct lpfc_debugfs_trc *slow_ring_trc;
48325- atomic_t slow_ring_trc_cnt;
48326+ atomic_unchecked_t slow_ring_trc_cnt;
48327 /* iDiag debugfs sub-directory */
48328 struct dentry *idiag_root;
48329 struct dentry *idiag_pci_cfg;
48330diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
48331index 60084e6..0e2e700 100644
48332--- a/drivers/scsi/lpfc/lpfc_debugfs.c
48333+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
48334@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
48335
48336 #include <linux/debugfs.h>
48337
48338-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
48339+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
48340 static unsigned long lpfc_debugfs_start_time = 0L;
48341
48342 /* iDiag */
48343@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
48344 lpfc_debugfs_enable = 0;
48345
48346 len = 0;
48347- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
48348+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
48349 (lpfc_debugfs_max_disc_trc - 1);
48350 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
48351 dtp = vport->disc_trc + i;
48352@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
48353 lpfc_debugfs_enable = 0;
48354
48355 len = 0;
48356- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
48357+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
48358 (lpfc_debugfs_max_slow_ring_trc - 1);
48359 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
48360 dtp = phba->slow_ring_trc + i;
48361@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
48362 !vport || !vport->disc_trc)
48363 return;
48364
48365- index = atomic_inc_return(&vport->disc_trc_cnt) &
48366+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
48367 (lpfc_debugfs_max_disc_trc - 1);
48368 dtp = vport->disc_trc + index;
48369 dtp->fmt = fmt;
48370 dtp->data1 = data1;
48371 dtp->data2 = data2;
48372 dtp->data3 = data3;
48373- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
48374+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
48375 dtp->jif = jiffies;
48376 #endif
48377 return;
48378@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
48379 !phba || !phba->slow_ring_trc)
48380 return;
48381
48382- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
48383+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
48384 (lpfc_debugfs_max_slow_ring_trc - 1);
48385 dtp = phba->slow_ring_trc + index;
48386 dtp->fmt = fmt;
48387 dtp->data1 = data1;
48388 dtp->data2 = data2;
48389 dtp->data3 = data3;
48390- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
48391+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
48392 dtp->jif = jiffies;
48393 #endif
48394 return;
48395@@ -4168,7 +4168,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
48396 "slow_ring buffer\n");
48397 goto debug_failed;
48398 }
48399- atomic_set(&phba->slow_ring_trc_cnt, 0);
48400+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
48401 memset(phba->slow_ring_trc, 0,
48402 (sizeof(struct lpfc_debugfs_trc) *
48403 lpfc_debugfs_max_slow_ring_trc));
48404@@ -4214,7 +4214,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
48405 "buffer\n");
48406 goto debug_failed;
48407 }
48408- atomic_set(&vport->disc_trc_cnt, 0);
48409+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
48410
48411 snprintf(name, sizeof(name), "discovery_trace");
48412 vport->debug_disc_trc =
48413diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
48414index 68c94cc..8c27be5 100644
48415--- a/drivers/scsi/lpfc/lpfc_init.c
48416+++ b/drivers/scsi/lpfc/lpfc_init.c
48417@@ -10949,8 +10949,10 @@ lpfc_init(void)
48418 "misc_register returned with status %d", error);
48419
48420 if (lpfc_enable_npiv) {
48421- lpfc_transport_functions.vport_create = lpfc_vport_create;
48422- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
48423+ pax_open_kernel();
48424+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
48425+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
48426+ pax_close_kernel();
48427 }
48428 lpfc_transport_template =
48429 fc_attach_transport(&lpfc_transport_functions);
48430diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
48431index b2ede05..aaf482ca 100644
48432--- a/drivers/scsi/lpfc/lpfc_scsi.c
48433+++ b/drivers/scsi/lpfc/lpfc_scsi.c
48434@@ -353,7 +353,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
48435 uint32_t evt_posted;
48436
48437 spin_lock_irqsave(&phba->hbalock, flags);
48438- atomic_inc(&phba->num_rsrc_err);
48439+ atomic_inc_unchecked(&phba->num_rsrc_err);
48440 phba->last_rsrc_error_time = jiffies;
48441
48442 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
48443@@ -394,7 +394,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
48444 unsigned long flags;
48445 struct lpfc_hba *phba = vport->phba;
48446 uint32_t evt_posted;
48447- atomic_inc(&phba->num_cmd_success);
48448+ atomic_inc_unchecked(&phba->num_cmd_success);
48449
48450 if (vport->cfg_lun_queue_depth <= queue_depth)
48451 return;
48452@@ -438,8 +438,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
48453 unsigned long num_rsrc_err, num_cmd_success;
48454 int i;
48455
48456- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
48457- num_cmd_success = atomic_read(&phba->num_cmd_success);
48458+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
48459+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
48460
48461 /*
48462 * The error and success command counters are global per
48463@@ -467,8 +467,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
48464 }
48465 }
48466 lpfc_destroy_vport_work_array(phba, vports);
48467- atomic_set(&phba->num_rsrc_err, 0);
48468- atomic_set(&phba->num_cmd_success, 0);
48469+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
48470+ atomic_set_unchecked(&phba->num_cmd_success, 0);
48471 }
48472
48473 /**
48474@@ -502,8 +502,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
48475 }
48476 }
48477 lpfc_destroy_vport_work_array(phba, vports);
48478- atomic_set(&phba->num_rsrc_err, 0);
48479- atomic_set(&phba->num_cmd_success, 0);
48480+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
48481+ atomic_set_unchecked(&phba->num_cmd_success, 0);
48482 }
48483
48484 /**
48485diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
48486index 7f0af4f..193ac3e 100644
48487--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
48488+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
48489@@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
48490 {
48491 struct scsi_device *sdev = to_scsi_device(dev);
48492 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
48493- static struct _raid_device *raid_device;
48494+ struct _raid_device *raid_device;
48495 unsigned long flags;
48496 Mpi2RaidVolPage0_t vol_pg0;
48497 Mpi2ConfigReply_t mpi_reply;
48498@@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev)
48499 {
48500 struct scsi_device *sdev = to_scsi_device(dev);
48501 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
48502- static struct _raid_device *raid_device;
48503+ struct _raid_device *raid_device;
48504 unsigned long flags;
48505 Mpi2RaidVolPage0_t vol_pg0;
48506 Mpi2ConfigReply_t mpi_reply;
48507@@ -6637,7 +6637,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
48508 struct fw_event_work *fw_event)
48509 {
48510 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
48511- static struct _raid_device *raid_device;
48512+ struct _raid_device *raid_device;
48513 unsigned long flags;
48514 u16 handle;
48515
48516@@ -7108,7 +7108,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
48517 u64 sas_address;
48518 struct _sas_device *sas_device;
48519 struct _sas_node *expander_device;
48520- static struct _raid_device *raid_device;
48521+ struct _raid_device *raid_device;
48522 u8 retry_count;
48523 unsigned long flags;
48524
48525diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
48526index be8ce54..94ed33a 100644
48527--- a/drivers/scsi/pmcraid.c
48528+++ b/drivers/scsi/pmcraid.c
48529@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
48530 res->scsi_dev = scsi_dev;
48531 scsi_dev->hostdata = res;
48532 res->change_detected = 0;
48533- atomic_set(&res->read_failures, 0);
48534- atomic_set(&res->write_failures, 0);
48535+ atomic_set_unchecked(&res->read_failures, 0);
48536+ atomic_set_unchecked(&res->write_failures, 0);
48537 rc = 0;
48538 }
48539 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
48540@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
48541
48542 /* If this was a SCSI read/write command keep count of errors */
48543 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
48544- atomic_inc(&res->read_failures);
48545+ atomic_inc_unchecked(&res->read_failures);
48546 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
48547- atomic_inc(&res->write_failures);
48548+ atomic_inc_unchecked(&res->write_failures);
48549
48550 if (!RES_IS_GSCSI(res->cfg_entry) &&
48551 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
48552@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
48553 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
48554 * hrrq_id assigned here in queuecommand
48555 */
48556- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
48557+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
48558 pinstance->num_hrrq;
48559 cmd->cmd_done = pmcraid_io_done;
48560
48561@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
48562 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
48563 * hrrq_id assigned here in queuecommand
48564 */
48565- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
48566+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
48567 pinstance->num_hrrq;
48568
48569 if (request_size) {
48570@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
48571
48572 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
48573 /* add resources only after host is added into system */
48574- if (!atomic_read(&pinstance->expose_resources))
48575+ if (!atomic_read_unchecked(&pinstance->expose_resources))
48576 return;
48577
48578 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
48579@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
48580 init_waitqueue_head(&pinstance->reset_wait_q);
48581
48582 atomic_set(&pinstance->outstanding_cmds, 0);
48583- atomic_set(&pinstance->last_message_id, 0);
48584- atomic_set(&pinstance->expose_resources, 0);
48585+ atomic_set_unchecked(&pinstance->last_message_id, 0);
48586+ atomic_set_unchecked(&pinstance->expose_resources, 0);
48587
48588 INIT_LIST_HEAD(&pinstance->free_res_q);
48589 INIT_LIST_HEAD(&pinstance->used_res_q);
48590@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
48591 /* Schedule worker thread to handle CCN and take care of adding and
48592 * removing devices to OS
48593 */
48594- atomic_set(&pinstance->expose_resources, 1);
48595+ atomic_set_unchecked(&pinstance->expose_resources, 1);
48596 schedule_work(&pinstance->worker_q);
48597 return rc;
48598
48599diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
48600index e1d150f..6c6df44 100644
48601--- a/drivers/scsi/pmcraid.h
48602+++ b/drivers/scsi/pmcraid.h
48603@@ -748,7 +748,7 @@ struct pmcraid_instance {
48604 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
48605
48606 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
48607- atomic_t last_message_id;
48608+ atomic_unchecked_t last_message_id;
48609
48610 /* configuration table */
48611 struct pmcraid_config_table *cfg_table;
48612@@ -777,7 +777,7 @@ struct pmcraid_instance {
48613 atomic_t outstanding_cmds;
48614
48615 /* should add/delete resources to mid-layer now ?*/
48616- atomic_t expose_resources;
48617+ atomic_unchecked_t expose_resources;
48618
48619
48620
48621@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
48622 struct pmcraid_config_table_entry_ext cfg_entry_ext;
48623 };
48624 struct scsi_device *scsi_dev; /* Link scsi_device structure */
48625- atomic_t read_failures; /* count of failed READ commands */
48626- atomic_t write_failures; /* count of failed WRITE commands */
48627+ atomic_unchecked_t read_failures; /* count of failed READ commands */
48628+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
48629
48630 /* To indicate add/delete/modify during CCN */
48631 u8 change_detected;
48632diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
48633index 5f174b8..98d32b0 100644
48634--- a/drivers/scsi/qla2xxx/qla_attr.c
48635+++ b/drivers/scsi/qla2xxx/qla_attr.c
48636@@ -2040,7 +2040,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
48637 return 0;
48638 }
48639
48640-struct fc_function_template qla2xxx_transport_functions = {
48641+fc_function_template_no_const qla2xxx_transport_functions = {
48642
48643 .show_host_node_name = 1,
48644 .show_host_port_name = 1,
48645@@ -2088,7 +2088,7 @@ struct fc_function_template qla2xxx_transport_functions = {
48646 .bsg_timeout = qla24xx_bsg_timeout,
48647 };
48648
48649-struct fc_function_template qla2xxx_transport_vport_functions = {
48650+fc_function_template_no_const qla2xxx_transport_vport_functions = {
48651
48652 .show_host_node_name = 1,
48653 .show_host_port_name = 1,
48654diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
48655index 4446bf5..9a3574d 100644
48656--- a/drivers/scsi/qla2xxx/qla_gbl.h
48657+++ b/drivers/scsi/qla2xxx/qla_gbl.h
48658@@ -538,8 +538,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
48659 struct device_attribute;
48660 extern struct device_attribute *qla2x00_host_attrs[];
48661 struct fc_function_template;
48662-extern struct fc_function_template qla2xxx_transport_functions;
48663-extern struct fc_function_template qla2xxx_transport_vport_functions;
48664+extern fc_function_template_no_const qla2xxx_transport_functions;
48665+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
48666 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
48667 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
48668 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
48669diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
48670index 52be35e..b933907 100644
48671--- a/drivers/scsi/qla2xxx/qla_os.c
48672+++ b/drivers/scsi/qla2xxx/qla_os.c
48673@@ -1568,8 +1568,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
48674 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
48675 /* Ok, a 64bit DMA mask is applicable. */
48676 ha->flags.enable_64bit_addressing = 1;
48677- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
48678- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
48679+ pax_open_kernel();
48680+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
48681+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
48682+ pax_close_kernel();
48683 return;
48684 }
48685 }
48686diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
48687index 084d1fd..9f939eb 100644
48688--- a/drivers/scsi/qla4xxx/ql4_def.h
48689+++ b/drivers/scsi/qla4xxx/ql4_def.h
48690@@ -296,7 +296,7 @@ struct ddb_entry {
48691 * (4000 only) */
48692 atomic_t relogin_timer; /* Max Time to wait for
48693 * relogin to complete */
48694- atomic_t relogin_retry_count; /* Num of times relogin has been
48695+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
48696 * retried */
48697 uint32_t default_time2wait; /* Default Min time between
48698 * relogins (+aens) */
48699diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
48700index cf174a4..128a420 100644
48701--- a/drivers/scsi/qla4xxx/ql4_os.c
48702+++ b/drivers/scsi/qla4xxx/ql4_os.c
48703@@ -3311,12 +3311,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
48704 */
48705 if (!iscsi_is_session_online(cls_sess)) {
48706 /* Reset retry relogin timer */
48707- atomic_inc(&ddb_entry->relogin_retry_count);
48708+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
48709 DEBUG2(ql4_printk(KERN_INFO, ha,
48710 "%s: index[%d] relogin timed out-retrying"
48711 " relogin (%d), retry (%d)\n", __func__,
48712 ddb_entry->fw_ddb_index,
48713- atomic_read(&ddb_entry->relogin_retry_count),
48714+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
48715 ddb_entry->default_time2wait + 4));
48716 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
48717 atomic_set(&ddb_entry->retry_relogin_timer,
48718@@ -5458,7 +5458,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
48719
48720 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
48721 atomic_set(&ddb_entry->relogin_timer, 0);
48722- atomic_set(&ddb_entry->relogin_retry_count, 0);
48723+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
48724 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
48725 ddb_entry->default_relogin_timeout =
48726 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
48727diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
48728index fe0bcb1..c9255be 100644
48729--- a/drivers/scsi/scsi.c
48730+++ b/drivers/scsi/scsi.c
48731@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
48732 struct Scsi_Host *host = cmd->device->host;
48733 int rtn = 0;
48734
48735- atomic_inc(&cmd->device->iorequest_cnt);
48736+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
48737
48738 /* check if the device is still usable */
48739 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
48740diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
48741index 7bd7f0d..44147bf 100644
48742--- a/drivers/scsi/scsi_lib.c
48743+++ b/drivers/scsi/scsi_lib.c
48744@@ -1474,7 +1474,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
48745 shost = sdev->host;
48746 scsi_init_cmd_errh(cmd);
48747 cmd->result = DID_NO_CONNECT << 16;
48748- atomic_inc(&cmd->device->iorequest_cnt);
48749+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
48750
48751 /*
48752 * SCSI request completion path will do scsi_device_unbusy(),
48753@@ -1500,9 +1500,9 @@ static void scsi_softirq_done(struct request *rq)
48754
48755 INIT_LIST_HEAD(&cmd->eh_entry);
48756
48757- atomic_inc(&cmd->device->iodone_cnt);
48758+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
48759 if (cmd->result)
48760- atomic_inc(&cmd->device->ioerr_cnt);
48761+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
48762
48763 disposition = scsi_decide_disposition(cmd);
48764 if (disposition != SUCCESS &&
48765diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
48766index 8ff62c2..693b6f7 100644
48767--- a/drivers/scsi/scsi_sysfs.c
48768+++ b/drivers/scsi/scsi_sysfs.c
48769@@ -725,7 +725,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
48770 char *buf) \
48771 { \
48772 struct scsi_device *sdev = to_scsi_device(dev); \
48773- unsigned long long count = atomic_read(&sdev->field); \
48774+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
48775 return snprintf(buf, 20, "0x%llx\n", count); \
48776 } \
48777 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
48778diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
48779index 84a1fdf..693b0d6 100644
48780--- a/drivers/scsi/scsi_tgt_lib.c
48781+++ b/drivers/scsi/scsi_tgt_lib.c
48782@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
48783 int err;
48784
48785 dprintk("%lx %u\n", uaddr, len);
48786- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
48787+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
48788 if (err) {
48789 /*
48790 * TODO: need to fixup sg_tablesize, max_segment_size,
48791diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
48792index 4628fd5..a94a1c2 100644
48793--- a/drivers/scsi/scsi_transport_fc.c
48794+++ b/drivers/scsi/scsi_transport_fc.c
48795@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
48796 * Netlink Infrastructure
48797 */
48798
48799-static atomic_t fc_event_seq;
48800+static atomic_unchecked_t fc_event_seq;
48801
48802 /**
48803 * fc_get_event_number - Obtain the next sequential FC event number
48804@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
48805 u32
48806 fc_get_event_number(void)
48807 {
48808- return atomic_add_return(1, &fc_event_seq);
48809+ return atomic_add_return_unchecked(1, &fc_event_seq);
48810 }
48811 EXPORT_SYMBOL(fc_get_event_number);
48812
48813@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
48814 {
48815 int error;
48816
48817- atomic_set(&fc_event_seq, 0);
48818+ atomic_set_unchecked(&fc_event_seq, 0);
48819
48820 error = transport_class_register(&fc_host_class);
48821 if (error)
48822@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
48823 char *cp;
48824
48825 *val = simple_strtoul(buf, &cp, 0);
48826- if ((*cp && (*cp != '\n')) || (*val < 0))
48827+ if (*cp && (*cp != '\n'))
48828 return -EINVAL;
48829 /*
48830 * Check for overflow; dev_loss_tmo is u32
48831diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
48832index 63a6ca4..5d5cadd 100644
48833--- a/drivers/scsi/scsi_transport_iscsi.c
48834+++ b/drivers/scsi/scsi_transport_iscsi.c
48835@@ -79,7 +79,7 @@ struct iscsi_internal {
48836 struct transport_container session_cont;
48837 };
48838
48839-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
48840+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
48841 static struct workqueue_struct *iscsi_eh_timer_workq;
48842
48843 static DEFINE_IDA(iscsi_sess_ida);
48844@@ -1737,7 +1737,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
48845 int err;
48846
48847 ihost = shost->shost_data;
48848- session->sid = atomic_add_return(1, &iscsi_session_nr);
48849+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
48850
48851 if (target_id == ISCSI_MAX_TARGET) {
48852 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
48853@@ -4103,7 +4103,7 @@ static __init int iscsi_transport_init(void)
48854 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
48855 ISCSI_TRANSPORT_VERSION);
48856
48857- atomic_set(&iscsi_session_nr, 0);
48858+ atomic_set_unchecked(&iscsi_session_nr, 0);
48859
48860 err = class_register(&iscsi_transport_class);
48861 if (err)
48862diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
48863index 2700a5a..752ec38 100644
48864--- a/drivers/scsi/scsi_transport_srp.c
48865+++ b/drivers/scsi/scsi_transport_srp.c
48866@@ -36,7 +36,7 @@
48867 #include "scsi_transport_srp_internal.h"
48868
48869 struct srp_host_attrs {
48870- atomic_t next_port_id;
48871+ atomic_unchecked_t next_port_id;
48872 };
48873 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
48874
48875@@ -94,7 +94,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
48876 struct Scsi_Host *shost = dev_to_shost(dev);
48877 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
48878
48879- atomic_set(&srp_host->next_port_id, 0);
48880+ atomic_set_unchecked(&srp_host->next_port_id, 0);
48881 return 0;
48882 }
48883
48884@@ -730,7 +730,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
48885 rport_fast_io_fail_timedout);
48886 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
48887
48888- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
48889+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
48890 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
48891
48892 transport_setup_device(&rport->dev);
48893diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
48894index 69725f7..03aaee1 100644
48895--- a/drivers/scsi/sd.c
48896+++ b/drivers/scsi/sd.c
48897@@ -2964,7 +2964,7 @@ static int sd_probe(struct device *dev)
48898 sdkp->disk = gd;
48899 sdkp->index = index;
48900 atomic_set(&sdkp->openers, 0);
48901- atomic_set(&sdkp->device->ioerr_cnt, 0);
48902+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
48903
48904 if (!sdp->request_queue->rq_timeout) {
48905 if (sdp->type != TYPE_MOD)
48906diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
48907index df5e961..df6b97f 100644
48908--- a/drivers/scsi/sg.c
48909+++ b/drivers/scsi/sg.c
48910@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
48911 sdp->disk->disk_name,
48912 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
48913 NULL,
48914- (char *)arg);
48915+ (char __user *)arg);
48916 case BLKTRACESTART:
48917 return blk_trace_startstop(sdp->device->request_queue, 1);
48918 case BLKTRACESTOP:
48919diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
48920index 349ebba..ff2a249 100644
48921--- a/drivers/spi/spi.c
48922+++ b/drivers/spi/spi.c
48923@@ -1945,7 +1945,7 @@ int spi_bus_unlock(struct spi_master *master)
48924 EXPORT_SYMBOL_GPL(spi_bus_unlock);
48925
48926 /* portable code must never pass more than 32 bytes */
48927-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
48928+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
48929
48930 static u8 *buf;
48931
48932diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
48933index 2c61783..4d49e4e 100644
48934--- a/drivers/staging/android/timed_output.c
48935+++ b/drivers/staging/android/timed_output.c
48936@@ -25,7 +25,7 @@
48937 #include "timed_output.h"
48938
48939 static struct class *timed_output_class;
48940-static atomic_t device_count;
48941+static atomic_unchecked_t device_count;
48942
48943 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
48944 char *buf)
48945@@ -63,7 +63,7 @@ static int create_timed_output_class(void)
48946 timed_output_class = class_create(THIS_MODULE, "timed_output");
48947 if (IS_ERR(timed_output_class))
48948 return PTR_ERR(timed_output_class);
48949- atomic_set(&device_count, 0);
48950+ atomic_set_unchecked(&device_count, 0);
48951 timed_output_class->dev_groups = timed_output_groups;
48952 }
48953
48954@@ -81,7 +81,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
48955 if (ret < 0)
48956 return ret;
48957
48958- tdev->index = atomic_inc_return(&device_count);
48959+ tdev->index = atomic_inc_return_unchecked(&device_count);
48960 tdev->dev = device_create(timed_output_class, NULL,
48961 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
48962 if (IS_ERR(tdev->dev))
48963diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
48964index c0f7cd7..5424212 100644
48965--- a/drivers/staging/gdm724x/gdm_tty.c
48966+++ b/drivers/staging/gdm724x/gdm_tty.c
48967@@ -45,7 +45,7 @@
48968 #define gdm_tty_send_control(n, r, v, d, l) (\
48969 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
48970
48971-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
48972+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
48973
48974 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
48975 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
48976diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
48977index b7613c8..c302392 100644
48978--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
48979+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
48980@@ -487,13 +487,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
48981 return 0;
48982 }
48983
48984-sfw_test_client_ops_t brw_test_client;
48985-void brw_init_test_client(void)
48986-{
48987- brw_test_client.tso_init = brw_client_init;
48988- brw_test_client.tso_fini = brw_client_fini;
48989- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
48990- brw_test_client.tso_done_rpc = brw_client_done_rpc;
48991+sfw_test_client_ops_t brw_test_client = {
48992+ .tso_init = brw_client_init,
48993+ .tso_fini = brw_client_fini,
48994+ .tso_prep_rpc = brw_client_prep_rpc,
48995+ .tso_done_rpc = brw_client_done_rpc,
48996 };
48997
48998 srpc_service_t brw_test_service;
48999diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
49000index 483c785..e1a2a7b 100644
49001--- a/drivers/staging/lustre/lnet/selftest/framework.c
49002+++ b/drivers/staging/lustre/lnet/selftest/framework.c
49003@@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] =
49004
49005 extern sfw_test_client_ops_t ping_test_client;
49006 extern srpc_service_t ping_test_service;
49007-extern void ping_init_test_client(void);
49008 extern void ping_init_test_service(void);
49009
49010 extern sfw_test_client_ops_t brw_test_client;
49011 extern srpc_service_t brw_test_service;
49012-extern void brw_init_test_client(void);
49013 extern void brw_init_test_service(void);
49014
49015
49016@@ -1684,12 +1682,10 @@ sfw_startup (void)
49017 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
49018 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
49019
49020- brw_init_test_client();
49021 brw_init_test_service();
49022 rc = sfw_register_test(&brw_test_service, &brw_test_client);
49023 LASSERT (rc == 0);
49024
49025- ping_init_test_client();
49026 ping_init_test_service();
49027 rc = sfw_register_test(&ping_test_service, &ping_test_client);
49028 LASSERT (rc == 0);
49029diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
49030index f0f9194..b589047 100644
49031--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
49032+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
49033@@ -210,14 +210,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
49034 return 0;
49035 }
49036
49037-sfw_test_client_ops_t ping_test_client;
49038-void ping_init_test_client(void)
49039-{
49040- ping_test_client.tso_init = ping_client_init;
49041- ping_test_client.tso_fini = ping_client_fini;
49042- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
49043- ping_test_client.tso_done_rpc = ping_client_done_rpc;
49044-}
49045+sfw_test_client_ops_t ping_test_client = {
49046+ .tso_init = ping_client_init,
49047+ .tso_fini = ping_client_fini,
49048+ .tso_prep_rpc = ping_client_prep_rpc,
49049+ .tso_done_rpc = ping_client_done_rpc,
49050+};
49051
49052 srpc_service_t ping_test_service;
49053 void ping_init_test_service(void)
49054diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
49055index bc2b82f..67fd598 100644
49056--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
49057+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
49058@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
49059 ldlm_completion_callback lcs_completion;
49060 ldlm_blocking_callback lcs_blocking;
49061 ldlm_glimpse_callback lcs_glimpse;
49062-};
49063+} __no_const;
49064
49065 /* ldlm_lockd.c */
49066 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
49067diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
49068index d0aea15..7af68e1 100644
49069--- a/drivers/staging/lustre/lustre/include/obd.h
49070+++ b/drivers/staging/lustre/lustre/include/obd.h
49071@@ -1417,7 +1417,7 @@ struct md_ops {
49072 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
49073 * wrapper function in include/linux/obd_class.h.
49074 */
49075-};
49076+} __no_const;
49077
49078 struct lsm_operations {
49079 void (*lsm_free)(struct lov_stripe_md *);
49080diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
49081index 39fcdac..222780f 100644
49082--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
49083+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
49084@@ -249,7 +249,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
49085 int added = (mode == LCK_NL);
49086 int overlaps = 0;
49087 int splitted = 0;
49088- const struct ldlm_callback_suite null_cbs = { NULL };
49089+ const struct ldlm_callback_suite null_cbs = { };
49090 int rc;
49091
49092 CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
49093diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
49094index fc6c977..df1f956 100644
49095--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
49096+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
49097@@ -219,7 +219,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb)
49098 int LL_PROC_PROTO(proc_console_max_delay_cs)
49099 {
49100 int rc, max_delay_cs;
49101- ctl_table_t dummy = *table;
49102+ ctl_table_no_const dummy = *table;
49103 cfs_duration_t d;
49104
49105 dummy.data = &max_delay_cs;
49106@@ -250,7 +250,7 @@ int LL_PROC_PROTO(proc_console_max_delay_cs)
49107 int LL_PROC_PROTO(proc_console_min_delay_cs)
49108 {
49109 int rc, min_delay_cs;
49110- ctl_table_t dummy = *table;
49111+ ctl_table_no_const dummy = *table;
49112 cfs_duration_t d;
49113
49114 dummy.data = &min_delay_cs;
49115@@ -281,7 +281,7 @@ int LL_PROC_PROTO(proc_console_min_delay_cs)
49116 int LL_PROC_PROTO(proc_console_backoff)
49117 {
49118 int rc, backoff;
49119- ctl_table_t dummy = *table;
49120+ ctl_table_no_const dummy = *table;
49121
49122 dummy.data = &backoff;
49123 dummy.proc_handler = &proc_dointvec;
49124diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
49125index f3108c7..cd4f9da 100644
49126--- a/drivers/staging/lustre/lustre/libcfs/module.c
49127+++ b/drivers/staging/lustre/lustre/libcfs/module.c
49128@@ -348,11 +348,11 @@ out:
49129
49130
49131 struct cfs_psdev_ops libcfs_psdev_ops = {
49132- libcfs_psdev_open,
49133- libcfs_psdev_release,
49134- NULL,
49135- NULL,
49136- libcfs_ioctl
49137+ .p_open = libcfs_psdev_open,
49138+ .p_close = libcfs_psdev_release,
49139+ .p_read = NULL,
49140+ .p_write = NULL,
49141+ .p_ioctl = libcfs_ioctl
49142 };
49143
49144 extern int insert_proc(void);
49145diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
49146index 3675020..e80d92c 100644
49147--- a/drivers/staging/media/solo6x10/solo6x10-core.c
49148+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
49149@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
49150
49151 static int solo_sysfs_init(struct solo_dev *solo_dev)
49152 {
49153- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
49154+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
49155 struct device *dev = &solo_dev->dev;
49156 const char *driver;
49157 int i;
49158diff --git a/drivers/staging/media/solo6x10/solo6x10-g723.c b/drivers/staging/media/solo6x10/solo6x10-g723.c
49159index 1db18c7..35e6afc 100644
49160--- a/drivers/staging/media/solo6x10/solo6x10-g723.c
49161+++ b/drivers/staging/media/solo6x10/solo6x10-g723.c
49162@@ -355,7 +355,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
49163
49164 int solo_g723_init(struct solo_dev *solo_dev)
49165 {
49166- static struct snd_device_ops ops = { NULL };
49167+ static struct snd_device_ops ops = { };
49168 struct snd_card *card;
49169 struct snd_kcontrol_new kctl;
49170 char name[32];
49171diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
49172index 7f2f247..d999137 100644
49173--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
49174+++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
49175@@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
49176
49177 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
49178 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
49179- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
49180+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
49181 if (p2m_id < 0)
49182 p2m_id = -p2m_id;
49183 }
49184diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
49185index f1bbb8c..a73eaba 100644
49186--- a/drivers/staging/media/solo6x10/solo6x10.h
49187+++ b/drivers/staging/media/solo6x10/solo6x10.h
49188@@ -237,7 +237,7 @@ struct solo_dev {
49189
49190 /* P2M DMA Engine */
49191 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
49192- atomic_t p2m_count;
49193+ atomic_unchecked_t p2m_count;
49194 int p2m_jiffies;
49195 unsigned int p2m_timeouts;
49196
49197diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
49198index 0315f60..2ecae10 100644
49199--- a/drivers/staging/octeon/ethernet-rx.c
49200+++ b/drivers/staging/octeon/ethernet-rx.c
49201@@ -418,11 +418,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
49202 /* Increment RX stats for virtual ports */
49203 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
49204 #ifdef CONFIG_64BIT
49205- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
49206- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
49207+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
49208+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
49209 #else
49210- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
49211- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
49212+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
49213+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
49214 #endif
49215 }
49216 netif_receive_skb(skb);
49217@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
49218 dev->name);
49219 */
49220 #ifdef CONFIG_64BIT
49221- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
49222+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
49223 #else
49224- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
49225+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
49226 #endif
49227 dev_kfree_skb_irq(skb);
49228 }
49229diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
49230index bd6ca71..8f0961e 100644
49231--- a/drivers/staging/octeon/ethernet.c
49232+++ b/drivers/staging/octeon/ethernet.c
49233@@ -254,11 +254,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
49234 * since the RX tasklet also increments it.
49235 */
49236 #ifdef CONFIG_64BIT
49237- atomic64_add(rx_status.dropped_packets,
49238- (atomic64_t *)&priv->stats.rx_dropped);
49239+ atomic64_add_unchecked(rx_status.dropped_packets,
49240+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
49241 #else
49242- atomic_add(rx_status.dropped_packets,
49243- (atomic_t *)&priv->stats.rx_dropped);
49244+ atomic_add_unchecked(rx_status.dropped_packets,
49245+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
49246 #endif
49247 }
49248
49249diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
49250index 439c3c9..2d74293 100644
49251--- a/drivers/staging/rtl8188eu/include/hal_intf.h
49252+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
49253@@ -271,7 +271,7 @@ struct hal_ops {
49254 s32 (*c2h_handler)(struct adapter *padapter,
49255 struct c2h_evt_hdr *c2h_evt);
49256 c2h_id_filter c2h_id_filter_ccx;
49257-};
49258+} __no_const;
49259
49260 enum rt_eeprom_type {
49261 EEPROM_93C46,
49262diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
49263index eb6f0e5..e6a0958 100644
49264--- a/drivers/staging/rtl8188eu/include/rtw_io.h
49265+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
49266@@ -126,7 +126,7 @@ struct _io_ops {
49267 u32 (*_write_scsi)(struct intf_hdl *pintfhdl,u32 cnt, u8 *pmem);
49268 void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
49269 void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
49270-};
49271+} __no_const;
49272
49273 struct io_req {
49274 struct list_head list;
49275diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
49276index dc23395..cf7e9b1 100644
49277--- a/drivers/staging/rtl8712/rtl871x_io.h
49278+++ b/drivers/staging/rtl8712/rtl871x_io.h
49279@@ -108,7 +108,7 @@ struct _io_ops {
49280 u8 *pmem);
49281 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
49282 u8 *pmem);
49283-};
49284+} __no_const;
49285
49286 struct io_req {
49287 struct list_head list;
49288diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
49289index 1f5088b..0e59820 100644
49290--- a/drivers/staging/sbe-2t3e3/netdev.c
49291+++ b/drivers/staging/sbe-2t3e3/netdev.c
49292@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49293 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
49294
49295 if (rlen)
49296- if (copy_to_user(data, &resp, rlen))
49297+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
49298 return -EFAULT;
49299
49300 return 0;
49301diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
49302index a863a98..d272795 100644
49303--- a/drivers/staging/usbip/vhci.h
49304+++ b/drivers/staging/usbip/vhci.h
49305@@ -83,7 +83,7 @@ struct vhci_hcd {
49306 unsigned resuming:1;
49307 unsigned long re_timeout;
49308
49309- atomic_t seqnum;
49310+ atomic_unchecked_t seqnum;
49311
49312 /*
49313 * NOTE:
49314diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
49315index e810ad5..931336f 100644
49316--- a/drivers/staging/usbip/vhci_hcd.c
49317+++ b/drivers/staging/usbip/vhci_hcd.c
49318@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
49319
49320 spin_lock(&vdev->priv_lock);
49321
49322- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
49323+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
49324 if (priv->seqnum == 0xffff)
49325 dev_info(&urb->dev->dev, "seqnum max\n");
49326
49327@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
49328 return -ENOMEM;
49329 }
49330
49331- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
49332+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
49333 if (unlink->seqnum == 0xffff)
49334 pr_info("seqnum max\n");
49335
49336@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
49337 vdev->rhport = rhport;
49338 }
49339
49340- atomic_set(&vhci->seqnum, 0);
49341+ atomic_set_unchecked(&vhci->seqnum, 0);
49342 spin_lock_init(&vhci->lock);
49343
49344 hcd->power_budget = 0; /* no limit */
49345diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
49346index d07fcb5..358e1e1 100644
49347--- a/drivers/staging/usbip/vhci_rx.c
49348+++ b/drivers/staging/usbip/vhci_rx.c
49349@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
49350 if (!urb) {
49351 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
49352 pr_info("max seqnum %d\n",
49353- atomic_read(&the_controller->seqnum));
49354+ atomic_read_unchecked(&the_controller->seqnum));
49355 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
49356 return;
49357 }
49358diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
49359index ab8b2ba..99184aa 100644
49360--- a/drivers/staging/vt6655/hostap.c
49361+++ b/drivers/staging/vt6655/hostap.c
49362@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
49363 *
49364 */
49365
49366+static net_device_ops_no_const apdev_netdev_ops;
49367+
49368 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
49369 {
49370 PSDevice apdev_priv;
49371 struct net_device *dev = pDevice->dev;
49372 int ret;
49373- const struct net_device_ops apdev_netdev_ops = {
49374- .ndo_start_xmit = pDevice->tx_80211,
49375- };
49376
49377 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
49378
49379@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
49380 *apdev_priv = *pDevice;
49381 eth_hw_addr_inherit(pDevice->apdev, dev);
49382
49383+ /* only half broken now */
49384+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
49385 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
49386
49387 pDevice->apdev->type = ARPHRD_IEEE80211;
49388diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
49389index 67ba48b..24e602f 100644
49390--- a/drivers/staging/vt6656/hostap.c
49391+++ b/drivers/staging/vt6656/hostap.c
49392@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
49393 *
49394 */
49395
49396+static net_device_ops_no_const apdev_netdev_ops;
49397+
49398 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
49399 {
49400 struct vnt_private *apdev_priv;
49401 struct net_device *dev = pDevice->dev;
49402 int ret;
49403- const struct net_device_ops apdev_netdev_ops = {
49404- .ndo_start_xmit = pDevice->tx_80211,
49405- };
49406
49407 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
49408
49409@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
49410 *apdev_priv = *pDevice;
49411 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
49412
49413+ /* only half broken now */
49414+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
49415 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
49416
49417 pDevice->apdev->type = ARPHRD_IEEE80211;
49418diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
49419index 24884ca..26c8220 100644
49420--- a/drivers/target/sbp/sbp_target.c
49421+++ b/drivers/target/sbp/sbp_target.c
49422@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
49423
49424 #define SESSION_MAINTENANCE_INTERVAL HZ
49425
49426-static atomic_t login_id = ATOMIC_INIT(0);
49427+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
49428
49429 static void session_maintenance_work(struct work_struct *);
49430 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
49431@@ -444,7 +444,7 @@ static void sbp_management_request_login(
49432 login->lun = se_lun;
49433 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
49434 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
49435- login->login_id = atomic_inc_return(&login_id);
49436+ login->login_id = atomic_inc_return_unchecked(&login_id);
49437
49438 login->tgt_agt = sbp_target_agent_register(login);
49439 if (IS_ERR(login->tgt_agt)) {
49440diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
49441index d06de84..fd38c9b 100644
49442--- a/drivers/target/target_core_device.c
49443+++ b/drivers/target/target_core_device.c
49444@@ -1435,7 +1435,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
49445 spin_lock_init(&dev->se_tmr_lock);
49446 spin_lock_init(&dev->qf_cmd_lock);
49447 sema_init(&dev->caw_sem, 1);
49448- atomic_set(&dev->dev_ordered_id, 0);
49449+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
49450 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
49451 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
49452 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
49453diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
49454index 91953da..a842b90 100644
49455--- a/drivers/target/target_core_transport.c
49456+++ b/drivers/target/target_core_transport.c
49457@@ -1112,7 +1112,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
49458 * Used to determine when ORDERED commands should go from
49459 * Dormant to Active status.
49460 */
49461- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
49462+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
49463 smp_mb__after_atomic_inc();
49464 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
49465 cmd->se_ordered_id, cmd->sam_task_attr,
49466diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
49467index 33f83fe..d80f8e1 100644
49468--- a/drivers/tty/cyclades.c
49469+++ b/drivers/tty/cyclades.c
49470@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
49471 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
49472 info->port.count);
49473 #endif
49474- info->port.count++;
49475+ atomic_inc(&info->port.count);
49476 #ifdef CY_DEBUG_COUNT
49477 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
49478- current->pid, info->port.count);
49479+ current->pid, atomic_read(&info->port.count));
49480 #endif
49481
49482 /*
49483@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
49484 for (j = 0; j < cy_card[i].nports; j++) {
49485 info = &cy_card[i].ports[j];
49486
49487- if (info->port.count) {
49488+ if (atomic_read(&info->port.count)) {
49489 /* XXX is the ldisc num worth this? */
49490 struct tty_struct *tty;
49491 struct tty_ldisc *ld;
49492diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
49493index 9eba119..5070303 100644
49494--- a/drivers/tty/hvc/hvc_console.c
49495+++ b/drivers/tty/hvc/hvc_console.c
49496@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
49497
49498 spin_lock_irqsave(&hp->port.lock, flags);
49499 /* Check and then increment for fast path open. */
49500- if (hp->port.count++ > 0) {
49501+ if (atomic_inc_return(&hp->port.count) > 1) {
49502 spin_unlock_irqrestore(&hp->port.lock, flags);
49503 hvc_kick();
49504 return 0;
49505@@ -393,7 +393,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
49506
49507 spin_lock_irqsave(&hp->port.lock, flags);
49508
49509- if (--hp->port.count == 0) {
49510+ if (atomic_dec_return(&hp->port.count) == 0) {
49511 spin_unlock_irqrestore(&hp->port.lock, flags);
49512 /* We are done with the tty pointer now. */
49513 tty_port_tty_set(&hp->port, NULL);
49514@@ -415,9 +415,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
49515 */
49516 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
49517 } else {
49518- if (hp->port.count < 0)
49519+ if (atomic_read(&hp->port.count) < 0)
49520 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
49521- hp->vtermno, hp->port.count);
49522+ hp->vtermno, atomic_read(&hp->port.count));
49523 spin_unlock_irqrestore(&hp->port.lock, flags);
49524 }
49525 }
49526@@ -447,12 +447,12 @@ static void hvc_hangup(struct tty_struct *tty)
49527 * open->hangup case this can be called after the final close so prevent
49528 * that from happening for now.
49529 */
49530- if (hp->port.count <= 0) {
49531+ if (atomic_read(&hp->port.count) <= 0) {
49532 spin_unlock_irqrestore(&hp->port.lock, flags);
49533 return;
49534 }
49535
49536- hp->port.count = 0;
49537+ atomic_set(&hp->port.count, 0);
49538 spin_unlock_irqrestore(&hp->port.lock, flags);
49539 tty_port_tty_set(&hp->port, NULL);
49540
49541@@ -500,7 +500,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
49542 return -EPIPE;
49543
49544 /* FIXME what's this (unprotected) check for? */
49545- if (hp->port.count <= 0)
49546+ if (atomic_read(&hp->port.count) <= 0)
49547 return -EIO;
49548
49549 spin_lock_irqsave(&hp->lock, flags);
49550diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
49551index 81e939e..95ead10 100644
49552--- a/drivers/tty/hvc/hvcs.c
49553+++ b/drivers/tty/hvc/hvcs.c
49554@@ -83,6 +83,7 @@
49555 #include <asm/hvcserver.h>
49556 #include <asm/uaccess.h>
49557 #include <asm/vio.h>
49558+#include <asm/local.h>
49559
49560 /*
49561 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
49562@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
49563
49564 spin_lock_irqsave(&hvcsd->lock, flags);
49565
49566- if (hvcsd->port.count > 0) {
49567+ if (atomic_read(&hvcsd->port.count) > 0) {
49568 spin_unlock_irqrestore(&hvcsd->lock, flags);
49569 printk(KERN_INFO "HVCS: vterm state unchanged. "
49570 "The hvcs device node is still in use.\n");
49571@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
49572 }
49573 }
49574
49575- hvcsd->port.count = 0;
49576+ atomic_set(&hvcsd->port.count, 0);
49577 hvcsd->port.tty = tty;
49578 tty->driver_data = hvcsd;
49579
49580@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
49581 unsigned long flags;
49582
49583 spin_lock_irqsave(&hvcsd->lock, flags);
49584- hvcsd->port.count++;
49585+ atomic_inc(&hvcsd->port.count);
49586 hvcsd->todo_mask |= HVCS_SCHED_READ;
49587 spin_unlock_irqrestore(&hvcsd->lock, flags);
49588
49589@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
49590 hvcsd = tty->driver_data;
49591
49592 spin_lock_irqsave(&hvcsd->lock, flags);
49593- if (--hvcsd->port.count == 0) {
49594+ if (atomic_dec_and_test(&hvcsd->port.count)) {
49595
49596 vio_disable_interrupts(hvcsd->vdev);
49597
49598@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
49599
49600 free_irq(irq, hvcsd);
49601 return;
49602- } else if (hvcsd->port.count < 0) {
49603+ } else if (atomic_read(&hvcsd->port.count) < 0) {
49604 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
49605 " is missmanaged.\n",
49606- hvcsd->vdev->unit_address, hvcsd->port.count);
49607+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
49608 }
49609
49610 spin_unlock_irqrestore(&hvcsd->lock, flags);
49611@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
49612
49613 spin_lock_irqsave(&hvcsd->lock, flags);
49614 /* Preserve this so that we know how many kref refs to put */
49615- temp_open_count = hvcsd->port.count;
49616+ temp_open_count = atomic_read(&hvcsd->port.count);
49617
49618 /*
49619 * Don't kref put inside the spinlock because the destruction
49620@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
49621 tty->driver_data = NULL;
49622 hvcsd->port.tty = NULL;
49623
49624- hvcsd->port.count = 0;
49625+ atomic_set(&hvcsd->port.count, 0);
49626
49627 /* This will drop any buffered data on the floor which is OK in a hangup
49628 * scenario. */
49629@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
49630 * the middle of a write operation? This is a crummy place to do this
49631 * but we want to keep it all in the spinlock.
49632 */
49633- if (hvcsd->port.count <= 0) {
49634+ if (atomic_read(&hvcsd->port.count) <= 0) {
49635 spin_unlock_irqrestore(&hvcsd->lock, flags);
49636 return -ENODEV;
49637 }
49638@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
49639 {
49640 struct hvcs_struct *hvcsd = tty->driver_data;
49641
49642- if (!hvcsd || hvcsd->port.count <= 0)
49643+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
49644 return 0;
49645
49646 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
49647diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
49648index 4190199..48f2920 100644
49649--- a/drivers/tty/hvc/hvsi.c
49650+++ b/drivers/tty/hvc/hvsi.c
49651@@ -85,7 +85,7 @@ struct hvsi_struct {
49652 int n_outbuf;
49653 uint32_t vtermno;
49654 uint32_t virq;
49655- atomic_t seqno; /* HVSI packet sequence number */
49656+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
49657 uint16_t mctrl;
49658 uint8_t state; /* HVSI protocol state */
49659 uint8_t flags;
49660@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
49661
49662 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
49663 packet.hdr.len = sizeof(struct hvsi_query_response);
49664- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
49665+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
49666 packet.verb = VSV_SEND_VERSION_NUMBER;
49667 packet.u.version = HVSI_VERSION;
49668 packet.query_seqno = query_seqno+1;
49669@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
49670
49671 packet.hdr.type = VS_QUERY_PACKET_HEADER;
49672 packet.hdr.len = sizeof(struct hvsi_query);
49673- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
49674+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
49675 packet.verb = verb;
49676
49677 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
49678@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
49679 int wrote;
49680
49681 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
49682- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
49683+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
49684 packet.hdr.len = sizeof(struct hvsi_control);
49685 packet.verb = VSV_SET_MODEM_CTL;
49686 packet.mask = HVSI_TSDTR;
49687@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
49688 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
49689
49690 packet.hdr.type = VS_DATA_PACKET_HEADER;
49691- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
49692+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
49693 packet.hdr.len = count + sizeof(struct hvsi_header);
49694 memcpy(&packet.data, buf, count);
49695
49696@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
49697 struct hvsi_control packet __ALIGNED__;
49698
49699 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
49700- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
49701+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
49702 packet.hdr.len = 6;
49703 packet.verb = VSV_CLOSE_PROTOCOL;
49704
49705diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
49706index 347050e..14f8fbf 100644
49707--- a/drivers/tty/hvc/hvsi_lib.c
49708+++ b/drivers/tty/hvc/hvsi_lib.c
49709@@ -9,7 +9,7 @@
49710
49711 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
49712 {
49713- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
49714+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
49715
49716 /* Assumes that always succeeds, works in practice */
49717 return pv->put_chars(pv->termno, (char *)packet, packet->len);
49718@@ -21,7 +21,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
49719
49720 /* Reset state */
49721 pv->established = 0;
49722- atomic_set(&pv->seqno, 0);
49723+ atomic_set_unchecked(&pv->seqno, 0);
49724
49725 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
49726
49727diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
49728index 8fd72ff..34a0bed 100644
49729--- a/drivers/tty/ipwireless/tty.c
49730+++ b/drivers/tty/ipwireless/tty.c
49731@@ -29,6 +29,7 @@
49732 #include <linux/tty_driver.h>
49733 #include <linux/tty_flip.h>
49734 #include <linux/uaccess.h>
49735+#include <asm/local.h>
49736
49737 #include "tty.h"
49738 #include "network.h"
49739@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
49740 mutex_unlock(&tty->ipw_tty_mutex);
49741 return -ENODEV;
49742 }
49743- if (tty->port.count == 0)
49744+ if (atomic_read(&tty->port.count) == 0)
49745 tty->tx_bytes_queued = 0;
49746
49747- tty->port.count++;
49748+ atomic_inc(&tty->port.count);
49749
49750 tty->port.tty = linux_tty;
49751 linux_tty->driver_data = tty;
49752@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
49753
49754 static void do_ipw_close(struct ipw_tty *tty)
49755 {
49756- tty->port.count--;
49757-
49758- if (tty->port.count == 0) {
49759+ if (atomic_dec_return(&tty->port.count) == 0) {
49760 struct tty_struct *linux_tty = tty->port.tty;
49761
49762 if (linux_tty != NULL) {
49763@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
49764 return;
49765
49766 mutex_lock(&tty->ipw_tty_mutex);
49767- if (tty->port.count == 0) {
49768+ if (atomic_read(&tty->port.count) == 0) {
49769 mutex_unlock(&tty->ipw_tty_mutex);
49770 return;
49771 }
49772@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
49773
49774 mutex_lock(&tty->ipw_tty_mutex);
49775
49776- if (!tty->port.count) {
49777+ if (!atomic_read(&tty->port.count)) {
49778 mutex_unlock(&tty->ipw_tty_mutex);
49779 return;
49780 }
49781@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
49782 return -ENODEV;
49783
49784 mutex_lock(&tty->ipw_tty_mutex);
49785- if (!tty->port.count) {
49786+ if (!atomic_read(&tty->port.count)) {
49787 mutex_unlock(&tty->ipw_tty_mutex);
49788 return -EINVAL;
49789 }
49790@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
49791 if (!tty)
49792 return -ENODEV;
49793
49794- if (!tty->port.count)
49795+ if (!atomic_read(&tty->port.count))
49796 return -EINVAL;
49797
49798 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
49799@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
49800 if (!tty)
49801 return 0;
49802
49803- if (!tty->port.count)
49804+ if (!atomic_read(&tty->port.count))
49805 return 0;
49806
49807 return tty->tx_bytes_queued;
49808@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
49809 if (!tty)
49810 return -ENODEV;
49811
49812- if (!tty->port.count)
49813+ if (!atomic_read(&tty->port.count))
49814 return -EINVAL;
49815
49816 return get_control_lines(tty);
49817@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
49818 if (!tty)
49819 return -ENODEV;
49820
49821- if (!tty->port.count)
49822+ if (!atomic_read(&tty->port.count))
49823 return -EINVAL;
49824
49825 return set_control_lines(tty, set, clear);
49826@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
49827 if (!tty)
49828 return -ENODEV;
49829
49830- if (!tty->port.count)
49831+ if (!atomic_read(&tty->port.count))
49832 return -EINVAL;
49833
49834 /* FIXME: Exactly how is the tty object locked here .. */
49835@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
49836 * are gone */
49837 mutex_lock(&ttyj->ipw_tty_mutex);
49838 }
49839- while (ttyj->port.count)
49840+ while (atomic_read(&ttyj->port.count))
49841 do_ipw_close(ttyj);
49842 ipwireless_disassociate_network_ttys(network,
49843 ttyj->channel_idx);
49844diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
49845index 1deaca4..c8582d4 100644
49846--- a/drivers/tty/moxa.c
49847+++ b/drivers/tty/moxa.c
49848@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
49849 }
49850
49851 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
49852- ch->port.count++;
49853+ atomic_inc(&ch->port.count);
49854 tty->driver_data = ch;
49855 tty_port_tty_set(&ch->port, tty);
49856 mutex_lock(&ch->port.mutex);
49857diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
49858index c0f76da..d974c32 100644
49859--- a/drivers/tty/n_gsm.c
49860+++ b/drivers/tty/n_gsm.c
49861@@ -1632,7 +1632,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
49862 spin_lock_init(&dlci->lock);
49863 mutex_init(&dlci->mutex);
49864 dlci->fifo = &dlci->_fifo;
49865- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
49866+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
49867 kfree(dlci);
49868 return NULL;
49869 }
49870@@ -2935,7 +2935,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
49871 struct gsm_dlci *dlci = tty->driver_data;
49872 struct tty_port *port = &dlci->port;
49873
49874- port->count++;
49875+ atomic_inc(&port->count);
49876 dlci_get(dlci);
49877 dlci_get(dlci->gsm->dlci[0]);
49878 mux_get(dlci->gsm);
49879diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
49880index 34aacaa..dad073b 100644
49881--- a/drivers/tty/n_tty.c
49882+++ b/drivers/tty/n_tty.c
49883@@ -114,7 +114,7 @@ struct n_tty_data {
49884 int minimum_to_wake;
49885
49886 /* consumer-published */
49887- size_t read_tail;
49888+ size_t read_tail __intentional_overflow(-1);
49889 size_t line_start;
49890
49891 /* protected by output lock */
49892@@ -2502,6 +2502,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
49893 {
49894 *ops = tty_ldisc_N_TTY;
49895 ops->owner = NULL;
49896- ops->refcount = ops->flags = 0;
49897+ atomic_set(&ops->refcount, 0);
49898+ ops->flags = 0;
49899 }
49900 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
49901diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
49902index 25c9bc7..24077b7 100644
49903--- a/drivers/tty/pty.c
49904+++ b/drivers/tty/pty.c
49905@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
49906 panic("Couldn't register Unix98 pts driver");
49907
49908 /* Now create the /dev/ptmx special device */
49909+ pax_open_kernel();
49910 tty_default_fops(&ptmx_fops);
49911- ptmx_fops.open = ptmx_open;
49912+ *(void **)&ptmx_fops.open = ptmx_open;
49913+ pax_close_kernel();
49914
49915 cdev_init(&ptmx_cdev, &ptmx_fops);
49916 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
49917diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
49918index 354564e..fe50d9a 100644
49919--- a/drivers/tty/rocket.c
49920+++ b/drivers/tty/rocket.c
49921@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
49922 tty->driver_data = info;
49923 tty_port_tty_set(port, tty);
49924
49925- if (port->count++ == 0) {
49926+ if (atomic_inc_return(&port->count) == 1) {
49927 atomic_inc(&rp_num_ports_open);
49928
49929 #ifdef ROCKET_DEBUG_OPEN
49930@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
49931 #endif
49932 }
49933 #ifdef ROCKET_DEBUG_OPEN
49934- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
49935+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
49936 #endif
49937
49938 /*
49939@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
49940 spin_unlock_irqrestore(&info->port.lock, flags);
49941 return;
49942 }
49943- if (info->port.count)
49944+ if (atomic_read(&info->port.count))
49945 atomic_dec(&rp_num_ports_open);
49946 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
49947 spin_unlock_irqrestore(&info->port.lock, flags);
49948diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
49949index 1274499..f541382 100644
49950--- a/drivers/tty/serial/ioc4_serial.c
49951+++ b/drivers/tty/serial/ioc4_serial.c
49952@@ -437,7 +437,7 @@ struct ioc4_soft {
49953 } is_intr_info[MAX_IOC4_INTR_ENTS];
49954
49955 /* Number of entries active in the above array */
49956- atomic_t is_num_intrs;
49957+ atomic_unchecked_t is_num_intrs;
49958 } is_intr_type[IOC4_NUM_INTR_TYPES];
49959
49960 /* is_ir_lock must be held while
49961@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
49962 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
49963 || (type == IOC4_OTHER_INTR_TYPE)));
49964
49965- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
49966+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
49967 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
49968
49969 /* Save off the lower level interrupt handler */
49970@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
49971
49972 soft = arg;
49973 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
49974- num_intrs = (int)atomic_read(
49975+ num_intrs = (int)atomic_read_unchecked(
49976 &soft->is_intr_type[intr_type].is_num_intrs);
49977
49978 this_mir = this_ir = pending_intrs(soft, intr_type);
49979diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
49980index a260cde..6b2b5ce 100644
49981--- a/drivers/tty/serial/kgdboc.c
49982+++ b/drivers/tty/serial/kgdboc.c
49983@@ -24,8 +24,9 @@
49984 #define MAX_CONFIG_LEN 40
49985
49986 static struct kgdb_io kgdboc_io_ops;
49987+static struct kgdb_io kgdboc_io_ops_console;
49988
49989-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
49990+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
49991 static int configured = -1;
49992
49993 static char config[MAX_CONFIG_LEN];
49994@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
49995 kgdboc_unregister_kbd();
49996 if (configured == 1)
49997 kgdb_unregister_io_module(&kgdboc_io_ops);
49998+ else if (configured == 2)
49999+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
50000 }
50001
50002 static int configure_kgdboc(void)
50003@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
50004 int err;
50005 char *cptr = config;
50006 struct console *cons;
50007+ int is_console = 0;
50008
50009 err = kgdboc_option_setup(config);
50010 if (err || !strlen(config) || isspace(config[0]))
50011 goto noconfig;
50012
50013 err = -ENODEV;
50014- kgdboc_io_ops.is_console = 0;
50015 kgdb_tty_driver = NULL;
50016
50017 kgdboc_use_kms = 0;
50018@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
50019 int idx;
50020 if (cons->device && cons->device(cons, &idx) == p &&
50021 idx == tty_line) {
50022- kgdboc_io_ops.is_console = 1;
50023+ is_console = 1;
50024 break;
50025 }
50026 cons = cons->next;
50027@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
50028 kgdb_tty_line = tty_line;
50029
50030 do_register:
50031- err = kgdb_register_io_module(&kgdboc_io_ops);
50032+ if (is_console) {
50033+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
50034+ configured = 2;
50035+ } else {
50036+ err = kgdb_register_io_module(&kgdboc_io_ops);
50037+ configured = 1;
50038+ }
50039 if (err)
50040 goto noconfig;
50041
50042@@ -205,8 +214,6 @@ do_register:
50043 if (err)
50044 goto nmi_con_failed;
50045
50046- configured = 1;
50047-
50048 return 0;
50049
50050 nmi_con_failed:
50051@@ -223,7 +230,7 @@ noconfig:
50052 static int __init init_kgdboc(void)
50053 {
50054 /* Already configured? */
50055- if (configured == 1)
50056+ if (configured >= 1)
50057 return 0;
50058
50059 return configure_kgdboc();
50060@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
50061 if (config[len - 1] == '\n')
50062 config[len - 1] = '\0';
50063
50064- if (configured == 1)
50065+ if (configured >= 1)
50066 cleanup_kgdboc();
50067
50068 /* Go and configure with the new params. */
50069@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
50070 .post_exception = kgdboc_post_exp_handler,
50071 };
50072
50073+static struct kgdb_io kgdboc_io_ops_console = {
50074+ .name = "kgdboc",
50075+ .read_char = kgdboc_get_char,
50076+ .write_char = kgdboc_put_char,
50077+ .pre_exception = kgdboc_pre_exp_handler,
50078+ .post_exception = kgdboc_post_exp_handler,
50079+ .is_console = 1
50080+};
50081+
50082 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
50083 /* This is only available if kgdboc is a built in for early debugging */
50084 static int __init kgdboc_early_init(char *opt)
50085diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
50086index b5d779c..3622cfe 100644
50087--- a/drivers/tty/serial/msm_serial.c
50088+++ b/drivers/tty/serial/msm_serial.c
50089@@ -897,7 +897,7 @@ static struct uart_driver msm_uart_driver = {
50090 .cons = MSM_CONSOLE,
50091 };
50092
50093-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
50094+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
50095
50096 static const struct of_device_id msm_uartdm_table[] = {
50097 { .compatible = "qcom,msm-uartdm" },
50098@@ -912,7 +912,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
50099 int irq;
50100
50101 if (pdev->id == -1)
50102- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
50103+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
50104
50105 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
50106 return -ENXIO;
50107diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
50108index c1af04d..0815c8a 100644
50109--- a/drivers/tty/serial/samsung.c
50110+++ b/drivers/tty/serial/samsung.c
50111@@ -463,11 +463,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
50112 }
50113 }
50114
50115+static int s3c64xx_serial_startup(struct uart_port *port);
50116 static int s3c24xx_serial_startup(struct uart_port *port)
50117 {
50118 struct s3c24xx_uart_port *ourport = to_ourport(port);
50119 int ret;
50120
50121+ /* Startup sequence is different for s3c64xx and higher SoC's */
50122+ if (s3c24xx_serial_has_interrupt_mask(port))
50123+ return s3c64xx_serial_startup(port);
50124+
50125 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
50126 port->mapbase, port->membase);
50127
50128@@ -1141,10 +1146,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
50129 /* setup info for port */
50130 port->dev = &platdev->dev;
50131
50132- /* Startup sequence is different for s3c64xx and higher SoC's */
50133- if (s3c24xx_serial_has_interrupt_mask(port))
50134- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
50135-
50136 port->uartclk = 1;
50137
50138 if (cfg->uart_flags & UPF_CONS_FLOW) {
50139diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
50140index 0f02351..07c59c5 100644
50141--- a/drivers/tty/serial/serial_core.c
50142+++ b/drivers/tty/serial/serial_core.c
50143@@ -1448,7 +1448,7 @@ static void uart_hangup(struct tty_struct *tty)
50144 uart_flush_buffer(tty);
50145 uart_shutdown(tty, state);
50146 spin_lock_irqsave(&port->lock, flags);
50147- port->count = 0;
50148+ atomic_set(&port->count, 0);
50149 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
50150 spin_unlock_irqrestore(&port->lock, flags);
50151 tty_port_tty_set(port, NULL);
50152@@ -1544,7 +1544,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
50153 goto end;
50154 }
50155
50156- port->count++;
50157+ atomic_inc(&port->count);
50158 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
50159 retval = -ENXIO;
50160 goto err_dec_count;
50161@@ -1572,7 +1572,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
50162 /*
50163 * Make sure the device is in D0 state.
50164 */
50165- if (port->count == 1)
50166+ if (atomic_read(&port->count) == 1)
50167 uart_change_pm(state, UART_PM_STATE_ON);
50168
50169 /*
50170@@ -1590,7 +1590,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
50171 end:
50172 return retval;
50173 err_dec_count:
50174- port->count--;
50175+ atomic_inc(&port->count);
50176 mutex_unlock(&port->mutex);
50177 goto end;
50178 }
50179diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
50180index e1ce141..6d4ed80 100644
50181--- a/drivers/tty/synclink.c
50182+++ b/drivers/tty/synclink.c
50183@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
50184
50185 if (debug_level >= DEBUG_LEVEL_INFO)
50186 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
50187- __FILE__,__LINE__, info->device_name, info->port.count);
50188+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
50189
50190 if (tty_port_close_start(&info->port, tty, filp) == 0)
50191 goto cleanup;
50192@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
50193 cleanup:
50194 if (debug_level >= DEBUG_LEVEL_INFO)
50195 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
50196- tty->driver->name, info->port.count);
50197+ tty->driver->name, atomic_read(&info->port.count));
50198
50199 } /* end of mgsl_close() */
50200
50201@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
50202
50203 mgsl_flush_buffer(tty);
50204 shutdown(info);
50205-
50206- info->port.count = 0;
50207+
50208+ atomic_set(&info->port.count, 0);
50209 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
50210 info->port.tty = NULL;
50211
50212@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
50213
50214 if (debug_level >= DEBUG_LEVEL_INFO)
50215 printk("%s(%d):block_til_ready before block on %s count=%d\n",
50216- __FILE__,__LINE__, tty->driver->name, port->count );
50217+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50218
50219 spin_lock_irqsave(&info->irq_spinlock, flags);
50220 if (!tty_hung_up_p(filp)) {
50221 extra_count = true;
50222- port->count--;
50223+ atomic_dec(&port->count);
50224 }
50225 spin_unlock_irqrestore(&info->irq_spinlock, flags);
50226 port->blocked_open++;
50227@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
50228
50229 if (debug_level >= DEBUG_LEVEL_INFO)
50230 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
50231- __FILE__,__LINE__, tty->driver->name, port->count );
50232+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50233
50234 tty_unlock(tty);
50235 schedule();
50236@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
50237
50238 /* FIXME: Racy on hangup during close wait */
50239 if (extra_count)
50240- port->count++;
50241+ atomic_inc(&port->count);
50242 port->blocked_open--;
50243
50244 if (debug_level >= DEBUG_LEVEL_INFO)
50245 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
50246- __FILE__,__LINE__, tty->driver->name, port->count );
50247+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50248
50249 if (!retval)
50250 port->flags |= ASYNC_NORMAL_ACTIVE;
50251@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
50252
50253 if (debug_level >= DEBUG_LEVEL_INFO)
50254 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
50255- __FILE__,__LINE__,tty->driver->name, info->port.count);
50256+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
50257
50258 /* If port is closing, signal caller to try again */
50259 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
50260@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
50261 spin_unlock_irqrestore(&info->netlock, flags);
50262 goto cleanup;
50263 }
50264- info->port.count++;
50265+ atomic_inc(&info->port.count);
50266 spin_unlock_irqrestore(&info->netlock, flags);
50267
50268- if (info->port.count == 1) {
50269+ if (atomic_read(&info->port.count) == 1) {
50270 /* 1st open on this device, init hardware */
50271 retval = startup(info);
50272 if (retval < 0)
50273@@ -3446,8 +3446,8 @@ cleanup:
50274 if (retval) {
50275 if (tty->count == 1)
50276 info->port.tty = NULL; /* tty layer will release tty struct */
50277- if(info->port.count)
50278- info->port.count--;
50279+ if (atomic_read(&info->port.count))
50280+ atomic_dec(&info->port.count);
50281 }
50282
50283 return retval;
50284@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
50285 unsigned short new_crctype;
50286
50287 /* return error if TTY interface open */
50288- if (info->port.count)
50289+ if (atomic_read(&info->port.count))
50290 return -EBUSY;
50291
50292 switch (encoding)
50293@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
50294
50295 /* arbitrate between network and tty opens */
50296 spin_lock_irqsave(&info->netlock, flags);
50297- if (info->port.count != 0 || info->netcount != 0) {
50298+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
50299 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
50300 spin_unlock_irqrestore(&info->netlock, flags);
50301 return -EBUSY;
50302@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
50303 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
50304
50305 /* return error if TTY interface open */
50306- if (info->port.count)
50307+ if (atomic_read(&info->port.count))
50308 return -EBUSY;
50309
50310 if (cmd != SIOCWANDEV)
50311diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
50312index 1abf946..1ee34fc 100644
50313--- a/drivers/tty/synclink_gt.c
50314+++ b/drivers/tty/synclink_gt.c
50315@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
50316 tty->driver_data = info;
50317 info->port.tty = tty;
50318
50319- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
50320+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
50321
50322 /* If port is closing, signal caller to try again */
50323 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
50324@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
50325 mutex_unlock(&info->port.mutex);
50326 goto cleanup;
50327 }
50328- info->port.count++;
50329+ atomic_inc(&info->port.count);
50330 spin_unlock_irqrestore(&info->netlock, flags);
50331
50332- if (info->port.count == 1) {
50333+ if (atomic_read(&info->port.count) == 1) {
50334 /* 1st open on this device, init hardware */
50335 retval = startup(info);
50336 if (retval < 0) {
50337@@ -715,8 +715,8 @@ cleanup:
50338 if (retval) {
50339 if (tty->count == 1)
50340 info->port.tty = NULL; /* tty layer will release tty struct */
50341- if(info->port.count)
50342- info->port.count--;
50343+ if(atomic_read(&info->port.count))
50344+ atomic_dec(&info->port.count);
50345 }
50346
50347 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
50348@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
50349
50350 if (sanity_check(info, tty->name, "close"))
50351 return;
50352- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
50353+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
50354
50355 if (tty_port_close_start(&info->port, tty, filp) == 0)
50356 goto cleanup;
50357@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
50358 tty_port_close_end(&info->port, tty);
50359 info->port.tty = NULL;
50360 cleanup:
50361- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
50362+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
50363 }
50364
50365 static void hangup(struct tty_struct *tty)
50366@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
50367 shutdown(info);
50368
50369 spin_lock_irqsave(&info->port.lock, flags);
50370- info->port.count = 0;
50371+ atomic_set(&info->port.count, 0);
50372 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
50373 info->port.tty = NULL;
50374 spin_unlock_irqrestore(&info->port.lock, flags);
50375@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
50376 unsigned short new_crctype;
50377
50378 /* return error if TTY interface open */
50379- if (info->port.count)
50380+ if (atomic_read(&info->port.count))
50381 return -EBUSY;
50382
50383 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
50384@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
50385
50386 /* arbitrate between network and tty opens */
50387 spin_lock_irqsave(&info->netlock, flags);
50388- if (info->port.count != 0 || info->netcount != 0) {
50389+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
50390 DBGINFO(("%s hdlc_open busy\n", dev->name));
50391 spin_unlock_irqrestore(&info->netlock, flags);
50392 return -EBUSY;
50393@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
50394 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
50395
50396 /* return error if TTY interface open */
50397- if (info->port.count)
50398+ if (atomic_read(&info->port.count))
50399 return -EBUSY;
50400
50401 if (cmd != SIOCWANDEV)
50402@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
50403 if (port == NULL)
50404 continue;
50405 spin_lock(&port->lock);
50406- if ((port->port.count || port->netcount) &&
50407+ if ((atomic_read(&port->port.count) || port->netcount) &&
50408 port->pending_bh && !port->bh_running &&
50409 !port->bh_requested) {
50410 DBGISR(("%s bh queued\n", port->device_name));
50411@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
50412 spin_lock_irqsave(&info->lock, flags);
50413 if (!tty_hung_up_p(filp)) {
50414 extra_count = true;
50415- port->count--;
50416+ atomic_dec(&port->count);
50417 }
50418 spin_unlock_irqrestore(&info->lock, flags);
50419 port->blocked_open++;
50420@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
50421 remove_wait_queue(&port->open_wait, &wait);
50422
50423 if (extra_count)
50424- port->count++;
50425+ atomic_inc(&port->count);
50426 port->blocked_open--;
50427
50428 if (!retval)
50429diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
50430index dc6e969..5dc8786 100644
50431--- a/drivers/tty/synclinkmp.c
50432+++ b/drivers/tty/synclinkmp.c
50433@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
50434
50435 if (debug_level >= DEBUG_LEVEL_INFO)
50436 printk("%s(%d):%s open(), old ref count = %d\n",
50437- __FILE__,__LINE__,tty->driver->name, info->port.count);
50438+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
50439
50440 /* If port is closing, signal caller to try again */
50441 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
50442@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
50443 spin_unlock_irqrestore(&info->netlock, flags);
50444 goto cleanup;
50445 }
50446- info->port.count++;
50447+ atomic_inc(&info->port.count);
50448 spin_unlock_irqrestore(&info->netlock, flags);
50449
50450- if (info->port.count == 1) {
50451+ if (atomic_read(&info->port.count) == 1) {
50452 /* 1st open on this device, init hardware */
50453 retval = startup(info);
50454 if (retval < 0)
50455@@ -796,8 +796,8 @@ cleanup:
50456 if (retval) {
50457 if (tty->count == 1)
50458 info->port.tty = NULL; /* tty layer will release tty struct */
50459- if(info->port.count)
50460- info->port.count--;
50461+ if(atomic_read(&info->port.count))
50462+ atomic_dec(&info->port.count);
50463 }
50464
50465 return retval;
50466@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
50467
50468 if (debug_level >= DEBUG_LEVEL_INFO)
50469 printk("%s(%d):%s close() entry, count=%d\n",
50470- __FILE__,__LINE__, info->device_name, info->port.count);
50471+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
50472
50473 if (tty_port_close_start(&info->port, tty, filp) == 0)
50474 goto cleanup;
50475@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
50476 cleanup:
50477 if (debug_level >= DEBUG_LEVEL_INFO)
50478 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
50479- tty->driver->name, info->port.count);
50480+ tty->driver->name, atomic_read(&info->port.count));
50481 }
50482
50483 /* Called by tty_hangup() when a hangup is signaled.
50484@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
50485 shutdown(info);
50486
50487 spin_lock_irqsave(&info->port.lock, flags);
50488- info->port.count = 0;
50489+ atomic_set(&info->port.count, 0);
50490 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
50491 info->port.tty = NULL;
50492 spin_unlock_irqrestore(&info->port.lock, flags);
50493@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
50494 unsigned short new_crctype;
50495
50496 /* return error if TTY interface open */
50497- if (info->port.count)
50498+ if (atomic_read(&info->port.count))
50499 return -EBUSY;
50500
50501 switch (encoding)
50502@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
50503
50504 /* arbitrate between network and tty opens */
50505 spin_lock_irqsave(&info->netlock, flags);
50506- if (info->port.count != 0 || info->netcount != 0) {
50507+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
50508 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
50509 spin_unlock_irqrestore(&info->netlock, flags);
50510 return -EBUSY;
50511@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
50512 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
50513
50514 /* return error if TTY interface open */
50515- if (info->port.count)
50516+ if (atomic_read(&info->port.count))
50517 return -EBUSY;
50518
50519 if (cmd != SIOCWANDEV)
50520@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
50521 * do not request bottom half processing if the
50522 * device is not open in a normal mode.
50523 */
50524- if ( port && (port->port.count || port->netcount) &&
50525+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
50526 port->pending_bh && !port->bh_running &&
50527 !port->bh_requested ) {
50528 if ( debug_level >= DEBUG_LEVEL_ISR )
50529@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
50530
50531 if (debug_level >= DEBUG_LEVEL_INFO)
50532 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
50533- __FILE__,__LINE__, tty->driver->name, port->count );
50534+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50535
50536 spin_lock_irqsave(&info->lock, flags);
50537 if (!tty_hung_up_p(filp)) {
50538 extra_count = true;
50539- port->count--;
50540+ atomic_dec(&port->count);
50541 }
50542 spin_unlock_irqrestore(&info->lock, flags);
50543 port->blocked_open++;
50544@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
50545
50546 if (debug_level >= DEBUG_LEVEL_INFO)
50547 printk("%s(%d):%s block_til_ready() count=%d\n",
50548- __FILE__,__LINE__, tty->driver->name, port->count );
50549+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50550
50551 tty_unlock(tty);
50552 schedule();
50553@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
50554 remove_wait_queue(&port->open_wait, &wait);
50555
50556 if (extra_count)
50557- port->count++;
50558+ atomic_inc(&port->count);
50559 port->blocked_open--;
50560
50561 if (debug_level >= DEBUG_LEVEL_INFO)
50562 printk("%s(%d):%s block_til_ready() after, count=%d\n",
50563- __FILE__,__LINE__, tty->driver->name, port->count );
50564+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50565
50566 if (!retval)
50567 port->flags |= ASYNC_NORMAL_ACTIVE;
50568diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
50569index ce396ec..04a37be 100644
50570--- a/drivers/tty/sysrq.c
50571+++ b/drivers/tty/sysrq.c
50572@@ -1075,7 +1075,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
50573 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
50574 size_t count, loff_t *ppos)
50575 {
50576- if (count) {
50577+ if (count && capable(CAP_SYS_ADMIN)) {
50578 char c;
50579
50580 if (get_user(c, buf))
50581diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
50582index c74a00a..02cf211a 100644
50583--- a/drivers/tty/tty_io.c
50584+++ b/drivers/tty/tty_io.c
50585@@ -3474,7 +3474,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
50586
50587 void tty_default_fops(struct file_operations *fops)
50588 {
50589- *fops = tty_fops;
50590+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
50591 }
50592
50593 /*
50594diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
50595index 6458e11..6cfc218 100644
50596--- a/drivers/tty/tty_ldisc.c
50597+++ b/drivers/tty/tty_ldisc.c
50598@@ -72,7 +72,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
50599 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
50600 tty_ldiscs[disc] = new_ldisc;
50601 new_ldisc->num = disc;
50602- new_ldisc->refcount = 0;
50603+ atomic_set(&new_ldisc->refcount, 0);
50604 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
50605
50606 return ret;
50607@@ -100,7 +100,7 @@ int tty_unregister_ldisc(int disc)
50608 return -EINVAL;
50609
50610 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
50611- if (tty_ldiscs[disc]->refcount)
50612+ if (atomic_read(&tty_ldiscs[disc]->refcount))
50613 ret = -EBUSY;
50614 else
50615 tty_ldiscs[disc] = NULL;
50616@@ -121,7 +121,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
50617 if (ldops) {
50618 ret = ERR_PTR(-EAGAIN);
50619 if (try_module_get(ldops->owner)) {
50620- ldops->refcount++;
50621+ atomic_inc(&ldops->refcount);
50622 ret = ldops;
50623 }
50624 }
50625@@ -134,7 +134,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
50626 unsigned long flags;
50627
50628 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
50629- ldops->refcount--;
50630+ atomic_dec(&ldops->refcount);
50631 module_put(ldops->owner);
50632 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
50633 }
50634diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
50635index c94d234..8210f2d 100644
50636--- a/drivers/tty/tty_port.c
50637+++ b/drivers/tty/tty_port.c
50638@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
50639 unsigned long flags;
50640
50641 spin_lock_irqsave(&port->lock, flags);
50642- port->count = 0;
50643+ atomic_set(&port->count, 0);
50644 port->flags &= ~ASYNC_NORMAL_ACTIVE;
50645 tty = port->tty;
50646 if (tty)
50647@@ -394,7 +394,7 @@ int tty_port_block_til_ready(struct tty_port *port,
50648 /* The port lock protects the port counts */
50649 spin_lock_irqsave(&port->lock, flags);
50650 if (!tty_hung_up_p(filp))
50651- port->count--;
50652+ atomic_dec(&port->count);
50653 port->blocked_open++;
50654 spin_unlock_irqrestore(&port->lock, flags);
50655
50656@@ -436,7 +436,7 @@ int tty_port_block_til_ready(struct tty_port *port,
50657 we must not mess that up further */
50658 spin_lock_irqsave(&port->lock, flags);
50659 if (!tty_hung_up_p(filp))
50660- port->count++;
50661+ atomic_inc(&port->count);
50662 port->blocked_open--;
50663 if (retval == 0)
50664 port->flags |= ASYNC_NORMAL_ACTIVE;
50665@@ -470,19 +470,19 @@ int tty_port_close_start(struct tty_port *port,
50666 return 0;
50667 }
50668
50669- if (tty->count == 1 && port->count != 1) {
50670+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
50671 printk(KERN_WARNING
50672 "tty_port_close_start: tty->count = 1 port count = %d.\n",
50673- port->count);
50674- port->count = 1;
50675+ atomic_read(&port->count));
50676+ atomic_set(&port->count, 1);
50677 }
50678- if (--port->count < 0) {
50679+ if (atomic_dec_return(&port->count) < 0) {
50680 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
50681- port->count);
50682- port->count = 0;
50683+ atomic_read(&port->count));
50684+ atomic_set(&port->count, 0);
50685 }
50686
50687- if (port->count) {
50688+ if (atomic_read(&port->count)) {
50689 spin_unlock_irqrestore(&port->lock, flags);
50690 return 0;
50691 }
50692@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
50693 {
50694 spin_lock_irq(&port->lock);
50695 if (!tty_hung_up_p(filp))
50696- ++port->count;
50697+ atomic_inc(&port->count);
50698 spin_unlock_irq(&port->lock);
50699 tty_port_tty_set(port, tty);
50700
50701diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
50702index d0e3a44..5f8b754 100644
50703--- a/drivers/tty/vt/keyboard.c
50704+++ b/drivers/tty/vt/keyboard.c
50705@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
50706 kbd->kbdmode == VC_OFF) &&
50707 value != KVAL(K_SAK))
50708 return; /* SAK is allowed even in raw mode */
50709+
50710+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
50711+ {
50712+ void *func = fn_handler[value];
50713+ if (func == fn_show_state || func == fn_show_ptregs ||
50714+ func == fn_show_mem)
50715+ return;
50716+ }
50717+#endif
50718+
50719 fn_handler[value](vc);
50720 }
50721
50722@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
50723 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
50724 return -EFAULT;
50725
50726- if (!capable(CAP_SYS_TTY_CONFIG))
50727- perm = 0;
50728-
50729 switch (cmd) {
50730 case KDGKBENT:
50731 /* Ensure another thread doesn't free it under us */
50732@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
50733 spin_unlock_irqrestore(&kbd_event_lock, flags);
50734 return put_user(val, &user_kbe->kb_value);
50735 case KDSKBENT:
50736+ if (!capable(CAP_SYS_TTY_CONFIG))
50737+ perm = 0;
50738+
50739 if (!perm)
50740 return -EPERM;
50741 if (!i && v == K_NOSUCHMAP) {
50742@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
50743 int i, j, k;
50744 int ret;
50745
50746- if (!capable(CAP_SYS_TTY_CONFIG))
50747- perm = 0;
50748-
50749 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
50750 if (!kbs) {
50751 ret = -ENOMEM;
50752@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
50753 kfree(kbs);
50754 return ((p && *p) ? -EOVERFLOW : 0);
50755 case KDSKBSENT:
50756+ if (!capable(CAP_SYS_TTY_CONFIG))
50757+ perm = 0;
50758+
50759 if (!perm) {
50760 ret = -EPERM;
50761 goto reterr;
50762diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
50763index a673e5b..36e5d32 100644
50764--- a/drivers/uio/uio.c
50765+++ b/drivers/uio/uio.c
50766@@ -25,6 +25,7 @@
50767 #include <linux/kobject.h>
50768 #include <linux/cdev.h>
50769 #include <linux/uio_driver.h>
50770+#include <asm/local.h>
50771
50772 #define UIO_MAX_DEVICES (1U << MINORBITS)
50773
50774@@ -32,7 +33,7 @@ struct uio_device {
50775 struct module *owner;
50776 struct device *dev;
50777 int minor;
50778- atomic_t event;
50779+ atomic_unchecked_t event;
50780 struct fasync_struct *async_queue;
50781 wait_queue_head_t wait;
50782 struct uio_info *info;
50783@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
50784 struct device_attribute *attr, char *buf)
50785 {
50786 struct uio_device *idev = dev_get_drvdata(dev);
50787- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
50788+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
50789 }
50790 static DEVICE_ATTR_RO(event);
50791
50792@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
50793 {
50794 struct uio_device *idev = info->uio_dev;
50795
50796- atomic_inc(&idev->event);
50797+ atomic_inc_unchecked(&idev->event);
50798 wake_up_interruptible(&idev->wait);
50799 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
50800 }
50801@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
50802 }
50803
50804 listener->dev = idev;
50805- listener->event_count = atomic_read(&idev->event);
50806+ listener->event_count = atomic_read_unchecked(&idev->event);
50807 filep->private_data = listener;
50808
50809 if (idev->info->open) {
50810@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
50811 return -EIO;
50812
50813 poll_wait(filep, &idev->wait, wait);
50814- if (listener->event_count != atomic_read(&idev->event))
50815+ if (listener->event_count != atomic_read_unchecked(&idev->event))
50816 return POLLIN | POLLRDNORM;
50817 return 0;
50818 }
50819@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
50820 do {
50821 set_current_state(TASK_INTERRUPTIBLE);
50822
50823- event_count = atomic_read(&idev->event);
50824+ event_count = atomic_read_unchecked(&idev->event);
50825 if (event_count != listener->event_count) {
50826 if (copy_to_user(buf, &event_count, count))
50827 retval = -EFAULT;
50828@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
50829 static int uio_find_mem_index(struct vm_area_struct *vma)
50830 {
50831 struct uio_device *idev = vma->vm_private_data;
50832+ unsigned long size;
50833
50834 if (vma->vm_pgoff < MAX_UIO_MAPS) {
50835- if (idev->info->mem[vma->vm_pgoff].size == 0)
50836+ size = idev->info->mem[vma->vm_pgoff].size;
50837+ if (size == 0)
50838+ return -1;
50839+ if (vma->vm_end - vma->vm_start > size)
50840 return -1;
50841 return (int)vma->vm_pgoff;
50842 }
50843@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
50844 idev->owner = owner;
50845 idev->info = info;
50846 init_waitqueue_head(&idev->wait);
50847- atomic_set(&idev->event, 0);
50848+ atomic_set_unchecked(&idev->event, 0);
50849
50850 ret = uio_get_minor(idev);
50851 if (ret)
50852diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
50853index 8a7eb77..c00402f 100644
50854--- a/drivers/usb/atm/cxacru.c
50855+++ b/drivers/usb/atm/cxacru.c
50856@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
50857 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
50858 if (ret < 2)
50859 return -EINVAL;
50860- if (index < 0 || index > 0x7f)
50861+ if (index > 0x7f)
50862 return -EINVAL;
50863 pos += tmp;
50864
50865diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
50866index 25a7bfc..57f3cf5 100644
50867--- a/drivers/usb/atm/usbatm.c
50868+++ b/drivers/usb/atm/usbatm.c
50869@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
50870 if (printk_ratelimit())
50871 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
50872 __func__, vpi, vci);
50873- atomic_inc(&vcc->stats->rx_err);
50874+ atomic_inc_unchecked(&vcc->stats->rx_err);
50875 return;
50876 }
50877
50878@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
50879 if (length > ATM_MAX_AAL5_PDU) {
50880 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
50881 __func__, length, vcc);
50882- atomic_inc(&vcc->stats->rx_err);
50883+ atomic_inc_unchecked(&vcc->stats->rx_err);
50884 goto out;
50885 }
50886
50887@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
50888 if (sarb->len < pdu_length) {
50889 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
50890 __func__, pdu_length, sarb->len, vcc);
50891- atomic_inc(&vcc->stats->rx_err);
50892+ atomic_inc_unchecked(&vcc->stats->rx_err);
50893 goto out;
50894 }
50895
50896 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
50897 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
50898 __func__, vcc);
50899- atomic_inc(&vcc->stats->rx_err);
50900+ atomic_inc_unchecked(&vcc->stats->rx_err);
50901 goto out;
50902 }
50903
50904@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
50905 if (printk_ratelimit())
50906 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
50907 __func__, length);
50908- atomic_inc(&vcc->stats->rx_drop);
50909+ atomic_inc_unchecked(&vcc->stats->rx_drop);
50910 goto out;
50911 }
50912
50913@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
50914
50915 vcc->push(vcc, skb);
50916
50917- atomic_inc(&vcc->stats->rx);
50918+ atomic_inc_unchecked(&vcc->stats->rx);
50919 out:
50920 skb_trim(sarb, 0);
50921 }
50922@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
50923 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
50924
50925 usbatm_pop(vcc, skb);
50926- atomic_inc(&vcc->stats->tx);
50927+ atomic_inc_unchecked(&vcc->stats->tx);
50928
50929 skb = skb_dequeue(&instance->sndqueue);
50930 }
50931@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
50932 if (!left--)
50933 return sprintf(page,
50934 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
50935- atomic_read(&atm_dev->stats.aal5.tx),
50936- atomic_read(&atm_dev->stats.aal5.tx_err),
50937- atomic_read(&atm_dev->stats.aal5.rx),
50938- atomic_read(&atm_dev->stats.aal5.rx_err),
50939- atomic_read(&atm_dev->stats.aal5.rx_drop));
50940+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
50941+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
50942+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
50943+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
50944+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
50945
50946 if (!left--) {
50947 if (instance->disconnected)
50948diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
50949index 2a3bbdf..91d72cf 100644
50950--- a/drivers/usb/core/devices.c
50951+++ b/drivers/usb/core/devices.c
50952@@ -126,7 +126,7 @@ static const char format_endpt[] =
50953 * time it gets called.
50954 */
50955 static struct device_connect_event {
50956- atomic_t count;
50957+ atomic_unchecked_t count;
50958 wait_queue_head_t wait;
50959 } device_event = {
50960 .count = ATOMIC_INIT(1),
50961@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
50962
50963 void usbfs_conn_disc_event(void)
50964 {
50965- atomic_add(2, &device_event.count);
50966+ atomic_add_unchecked(2, &device_event.count);
50967 wake_up(&device_event.wait);
50968 }
50969
50970@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
50971
50972 poll_wait(file, &device_event.wait, wait);
50973
50974- event_count = atomic_read(&device_event.count);
50975+ event_count = atomic_read_unchecked(&device_event.count);
50976 if (file->f_version != event_count) {
50977 file->f_version = event_count;
50978 return POLLIN | POLLRDNORM;
50979diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
50980index 967152a..16fa2e5 100644
50981--- a/drivers/usb/core/devio.c
50982+++ b/drivers/usb/core/devio.c
50983@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
50984 struct dev_state *ps = file->private_data;
50985 struct usb_device *dev = ps->dev;
50986 ssize_t ret = 0;
50987- unsigned len;
50988+ size_t len;
50989 loff_t pos;
50990 int i;
50991
50992@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
50993 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
50994 struct usb_config_descriptor *config =
50995 (struct usb_config_descriptor *)dev->rawdescriptors[i];
50996- unsigned int length = le16_to_cpu(config->wTotalLength);
50997+ size_t length = le16_to_cpu(config->wTotalLength);
50998
50999 if (*ppos < pos + length) {
51000
51001 /* The descriptor may claim to be longer than it
51002 * really is. Here is the actual allocated length. */
51003- unsigned alloclen =
51004+ size_t alloclen =
51005 le16_to_cpu(dev->config[i].desc.wTotalLength);
51006
51007- len = length - (*ppos - pos);
51008+ len = length + pos - *ppos;
51009 if (len > nbytes)
51010 len = nbytes;
51011
51012 /* Simply don't write (skip over) unallocated parts */
51013 if (alloclen > (*ppos - pos)) {
51014- alloclen -= (*ppos - pos);
51015+ alloclen = alloclen + pos - *ppos;
51016 if (copy_to_user(buf,
51017 dev->rawdescriptors[i] + (*ppos - pos),
51018 min(len, alloclen))) {
51019diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
51020index 6bffb8c..b404e8b 100644
51021--- a/drivers/usb/core/hcd.c
51022+++ b/drivers/usb/core/hcd.c
51023@@ -1550,7 +1550,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
51024 */
51025 usb_get_urb(urb);
51026 atomic_inc(&urb->use_count);
51027- atomic_inc(&urb->dev->urbnum);
51028+ atomic_inc_unchecked(&urb->dev->urbnum);
51029 usbmon_urb_submit(&hcd->self, urb);
51030
51031 /* NOTE requirements on root-hub callers (usbfs and the hub
51032@@ -1577,7 +1577,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
51033 urb->hcpriv = NULL;
51034 INIT_LIST_HEAD(&urb->urb_list);
51035 atomic_dec(&urb->use_count);
51036- atomic_dec(&urb->dev->urbnum);
51037+ atomic_dec_unchecked(&urb->dev->urbnum);
51038 if (atomic_read(&urb->reject))
51039 wake_up(&usb_kill_urb_queue);
51040 usb_put_urb(urb);
51041diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
51042index 07e6654..6420edf 100644
51043--- a/drivers/usb/core/hub.c
51044+++ b/drivers/usb/core/hub.c
51045@@ -27,6 +27,7 @@
51046 #include <linux/freezer.h>
51047 #include <linux/random.h>
51048 #include <linux/pm_qos.h>
51049+#include <linux/grsecurity.h>
51050
51051 #include <asm/uaccess.h>
51052 #include <asm/byteorder.h>
51053@@ -4442,6 +4443,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
51054 goto done;
51055 return;
51056 }
51057+
51058+ if (gr_handle_new_usb())
51059+ goto done;
51060+
51061 if (hub_is_superspeed(hub->hdev))
51062 unit_load = 150;
51063 else
51064diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
51065index bb31597..6c5ef8b 100644
51066--- a/drivers/usb/core/message.c
51067+++ b/drivers/usb/core/message.c
51068@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
51069 * Return: If successful, the number of bytes transferred. Otherwise, a negative
51070 * error number.
51071 */
51072-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
51073+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
51074 __u8 requesttype, __u16 value, __u16 index, void *data,
51075 __u16 size, int timeout)
51076 {
51077@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
51078 * If successful, 0. Otherwise a negative error number. The number of actual
51079 * bytes transferred will be stored in the @actual_length paramater.
51080 */
51081-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
51082+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
51083 void *data, int len, int *actual_length, int timeout)
51084 {
51085 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
51086@@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
51087 * bytes transferred will be stored in the @actual_length paramater.
51088 *
51089 */
51090-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
51091+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
51092 void *data, int len, int *actual_length, int timeout)
51093 {
51094 struct urb *urb;
51095diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
51096index 52a97ad..e73330f 100644
51097--- a/drivers/usb/core/sysfs.c
51098+++ b/drivers/usb/core/sysfs.c
51099@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
51100 struct usb_device *udev;
51101
51102 udev = to_usb_device(dev);
51103- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
51104+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
51105 }
51106 static DEVICE_ATTR_RO(urbnum);
51107
51108diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
51109index 4d11449..f4ccabf 100644
51110--- a/drivers/usb/core/usb.c
51111+++ b/drivers/usb/core/usb.c
51112@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
51113 set_dev_node(&dev->dev, dev_to_node(bus->controller));
51114 dev->state = USB_STATE_ATTACHED;
51115 dev->lpm_disable_count = 1;
51116- atomic_set(&dev->urbnum, 0);
51117+ atomic_set_unchecked(&dev->urbnum, 0);
51118
51119 INIT_LIST_HEAD(&dev->ep0.urb_list);
51120 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
51121diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
51122index 02e44fc..3c4fe64 100644
51123--- a/drivers/usb/dwc3/gadget.c
51124+++ b/drivers/usb/dwc3/gadget.c
51125@@ -532,8 +532,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
51126 if (!usb_endpoint_xfer_isoc(desc))
51127 return 0;
51128
51129- memset(&trb_link, 0, sizeof(trb_link));
51130-
51131 /* Link TRB for ISOC. The HWO bit is never reset */
51132 trb_st_hw = &dep->trb_pool[0];
51133
51134diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
51135index 8cfc319..4868255 100644
51136--- a/drivers/usb/early/ehci-dbgp.c
51137+++ b/drivers/usb/early/ehci-dbgp.c
51138@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
51139
51140 #ifdef CONFIG_KGDB
51141 static struct kgdb_io kgdbdbgp_io_ops;
51142-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
51143+static struct kgdb_io kgdbdbgp_io_ops_console;
51144+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
51145 #else
51146 #define dbgp_kgdb_mode (0)
51147 #endif
51148@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
51149 .write_char = kgdbdbgp_write_char,
51150 };
51151
51152+static struct kgdb_io kgdbdbgp_io_ops_console = {
51153+ .name = "kgdbdbgp",
51154+ .read_char = kgdbdbgp_read_char,
51155+ .write_char = kgdbdbgp_write_char,
51156+ .is_console = 1
51157+};
51158+
51159 static int kgdbdbgp_wait_time;
51160
51161 static int __init kgdbdbgp_parse_config(char *str)
51162@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
51163 ptr++;
51164 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
51165 }
51166- kgdb_register_io_module(&kgdbdbgp_io_ops);
51167- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
51168+ if (early_dbgp_console.index != -1)
51169+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
51170+ else
51171+ kgdb_register_io_module(&kgdbdbgp_io_ops);
51172
51173 return 0;
51174 }
51175diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
51176index b369292..9f3ba40 100644
51177--- a/drivers/usb/gadget/u_serial.c
51178+++ b/drivers/usb/gadget/u_serial.c
51179@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
51180 spin_lock_irq(&port->port_lock);
51181
51182 /* already open? Great. */
51183- if (port->port.count) {
51184+ if (atomic_read(&port->port.count)) {
51185 status = 0;
51186- port->port.count++;
51187+ atomic_inc(&port->port.count);
51188
51189 /* currently opening/closing? wait ... */
51190 } else if (port->openclose) {
51191@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
51192 tty->driver_data = port;
51193 port->port.tty = tty;
51194
51195- port->port.count = 1;
51196+ atomic_set(&port->port.count, 1);
51197 port->openclose = false;
51198
51199 /* if connected, start the I/O stream */
51200@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
51201
51202 spin_lock_irq(&port->port_lock);
51203
51204- if (port->port.count != 1) {
51205- if (port->port.count == 0)
51206+ if (atomic_read(&port->port.count) != 1) {
51207+ if (atomic_read(&port->port.count) == 0)
51208 WARN_ON(1);
51209 else
51210- --port->port.count;
51211+ atomic_dec(&port->port.count);
51212 goto exit;
51213 }
51214
51215@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
51216 * and sleep if necessary
51217 */
51218 port->openclose = true;
51219- port->port.count = 0;
51220+ atomic_set(&port->port.count, 0);
51221
51222 gser = port->port_usb;
51223 if (gser && gser->disconnect)
51224@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
51225 int cond;
51226
51227 spin_lock_irq(&port->port_lock);
51228- cond = (port->port.count == 0) && !port->openclose;
51229+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
51230 spin_unlock_irq(&port->port_lock);
51231 return cond;
51232 }
51233@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
51234 /* if it's already open, start I/O ... and notify the serial
51235 * protocol about open/close status (connect/disconnect).
51236 */
51237- if (port->port.count) {
51238+ if (atomic_read(&port->port.count)) {
51239 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
51240 gs_start_io(port);
51241 if (gser->connect)
51242@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
51243
51244 port->port_usb = NULL;
51245 gser->ioport = NULL;
51246- if (port->port.count > 0 || port->openclose) {
51247+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
51248 wake_up_interruptible(&port->drain_wait);
51249 if (port->port.tty)
51250 tty_hangup(port->port.tty);
51251@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
51252
51253 /* finally, free any unused/unusable I/O buffers */
51254 spin_lock_irqsave(&port->port_lock, flags);
51255- if (port->port.count == 0 && !port->openclose)
51256+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
51257 gs_buf_free(&port->port_write_buf);
51258 gs_free_requests(gser->out, &port->read_pool, NULL);
51259 gs_free_requests(gser->out, &port->read_queue, NULL);
51260diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
51261index 835fc08..f8b22bf 100644
51262--- a/drivers/usb/host/ehci-hub.c
51263+++ b/drivers/usb/host/ehci-hub.c
51264@@ -762,7 +762,7 @@ static struct urb *request_single_step_set_feature_urb(
51265 urb->transfer_flags = URB_DIR_IN;
51266 usb_get_urb(urb);
51267 atomic_inc(&urb->use_count);
51268- atomic_inc(&urb->dev->urbnum);
51269+ atomic_inc_unchecked(&urb->dev->urbnum);
51270 urb->setup_dma = dma_map_single(
51271 hcd->self.controller,
51272 urb->setup_packet,
51273@@ -829,7 +829,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
51274 urb->status = -EINPROGRESS;
51275 usb_get_urb(urb);
51276 atomic_inc(&urb->use_count);
51277- atomic_inc(&urb->dev->urbnum);
51278+ atomic_inc_unchecked(&urb->dev->urbnum);
51279 retval = submit_single_step_set_feature(hcd, urb, 0);
51280 if (!retval && !wait_for_completion_timeout(&done,
51281 msecs_to_jiffies(2000))) {
51282diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
51283index ba6a5d6..f88f7f3 100644
51284--- a/drivers/usb/misc/appledisplay.c
51285+++ b/drivers/usb/misc/appledisplay.c
51286@@ -83,7 +83,7 @@ struct appledisplay {
51287 spinlock_t lock;
51288 };
51289
51290-static atomic_t count_displays = ATOMIC_INIT(0);
51291+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
51292 static struct workqueue_struct *wq;
51293
51294 static void appledisplay_complete(struct urb *urb)
51295@@ -281,7 +281,7 @@ static int appledisplay_probe(struct usb_interface *iface,
51296
51297 /* Register backlight device */
51298 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
51299- atomic_inc_return(&count_displays) - 1);
51300+ atomic_inc_return_unchecked(&count_displays) - 1);
51301 memset(&props, 0, sizeof(struct backlight_properties));
51302 props.type = BACKLIGHT_RAW;
51303 props.max_brightness = 0xff;
51304diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
51305index c69bb50..215ef37 100644
51306--- a/drivers/usb/serial/console.c
51307+++ b/drivers/usb/serial/console.c
51308@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
51309
51310 info->port = port;
51311
51312- ++port->port.count;
51313+ atomic_inc(&port->port.count);
51314 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
51315 if (serial->type->set_termios) {
51316 /*
51317@@ -170,7 +170,7 @@ static int usb_console_setup(struct console *co, char *options)
51318 }
51319 /* Now that any required fake tty operations are completed restore
51320 * the tty port count */
51321- --port->port.count;
51322+ atomic_dec(&port->port.count);
51323 /* The console is special in terms of closing the device so
51324 * indicate this port is now acting as a system console. */
51325 port->port.console = 1;
51326@@ -183,7 +183,7 @@ static int usb_console_setup(struct console *co, char *options)
51327 free_tty:
51328 kfree(tty);
51329 reset_open_count:
51330- port->port.count = 0;
51331+ atomic_set(&port->port.count, 0);
51332 usb_autopm_put_interface(serial->interface);
51333 error_get_interface:
51334 usb_serial_put(serial);
51335@@ -194,7 +194,7 @@ static int usb_console_setup(struct console *co, char *options)
51336 static void usb_console_write(struct console *co,
51337 const char *buf, unsigned count)
51338 {
51339- static struct usbcons_info *info = &usbcons_info;
51340+ struct usbcons_info *info = &usbcons_info;
51341 struct usb_serial_port *port = info->port;
51342 struct usb_serial *serial;
51343 int retval = -ENODEV;
51344diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
51345index 75f70f0..d467e1a 100644
51346--- a/drivers/usb/storage/usb.h
51347+++ b/drivers/usb/storage/usb.h
51348@@ -63,7 +63,7 @@ struct us_unusual_dev {
51349 __u8 useProtocol;
51350 __u8 useTransport;
51351 int (*initFunction)(struct us_data *);
51352-};
51353+} __do_const;
51354
51355
51356 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
51357diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
51358index e614f02..3fd60e2 100644
51359--- a/drivers/usb/wusbcore/wa-hc.h
51360+++ b/drivers/usb/wusbcore/wa-hc.h
51361@@ -225,7 +225,7 @@ struct wahc {
51362 spinlock_t xfer_list_lock;
51363 struct work_struct xfer_enqueue_work;
51364 struct work_struct xfer_error_work;
51365- atomic_t xfer_id_count;
51366+ atomic_unchecked_t xfer_id_count;
51367
51368 kernel_ulong_t quirks;
51369 };
51370@@ -287,7 +287,7 @@ static inline void wa_init(struct wahc *wa)
51371 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
51372 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
51373 wa->dto_in_use = 0;
51374- atomic_set(&wa->xfer_id_count, 1);
51375+ atomic_set_unchecked(&wa->xfer_id_count, 1);
51376 }
51377
51378 /**
51379diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
51380index ed5abe8..7036400 100644
51381--- a/drivers/usb/wusbcore/wa-xfer.c
51382+++ b/drivers/usb/wusbcore/wa-xfer.c
51383@@ -312,7 +312,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
51384 */
51385 static void wa_xfer_id_init(struct wa_xfer *xfer)
51386 {
51387- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
51388+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
51389 }
51390
51391 /* Return the xfer's ID. */
51392diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
51393index 1eab4ac..e21efc9 100644
51394--- a/drivers/vfio/vfio.c
51395+++ b/drivers/vfio/vfio.c
51396@@ -488,7 +488,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
51397 return 0;
51398
51399 /* TODO Prevent device auto probing */
51400- WARN("Device %s added to live group %d!\n", dev_name(dev),
51401+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
51402 iommu_group_id(group->iommu_group));
51403
51404 return 0;
51405diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
51406index 5174eba..451e6bc 100644
51407--- a/drivers/vhost/vringh.c
51408+++ b/drivers/vhost/vringh.c
51409@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
51410 /* Userspace access helpers: in this case, addresses are really userspace. */
51411 static inline int getu16_user(u16 *val, const u16 *p)
51412 {
51413- return get_user(*val, (__force u16 __user *)p);
51414+ return get_user(*val, (u16 __force_user *)p);
51415 }
51416
51417 static inline int putu16_user(u16 *p, u16 val)
51418 {
51419- return put_user(val, (__force u16 __user *)p);
51420+ return put_user(val, (u16 __force_user *)p);
51421 }
51422
51423 static inline int copydesc_user(void *dst, const void *src, size_t len)
51424 {
51425- return copy_from_user(dst, (__force void __user *)src, len) ?
51426+ return copy_from_user(dst, (void __force_user *)src, len) ?
51427 -EFAULT : 0;
51428 }
51429
51430@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
51431 const struct vring_used_elem *src,
51432 unsigned int num)
51433 {
51434- return copy_to_user((__force void __user *)dst, src,
51435+ return copy_to_user((void __force_user *)dst, src,
51436 sizeof(*dst) * num) ? -EFAULT : 0;
51437 }
51438
51439 static inline int xfer_from_user(void *src, void *dst, size_t len)
51440 {
51441- return copy_from_user(dst, (__force void __user *)src, len) ?
51442+ return copy_from_user(dst, (void __force_user *)src, len) ?
51443 -EFAULT : 0;
51444 }
51445
51446 static inline int xfer_to_user(void *dst, void *src, size_t len)
51447 {
51448- return copy_to_user((__force void __user *)dst, src, len) ?
51449+ return copy_to_user((void __force_user *)dst, src, len) ?
51450 -EFAULT : 0;
51451 }
51452
51453@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
51454 vrh->last_used_idx = 0;
51455 vrh->vring.num = num;
51456 /* vring expects kernel addresses, but only used via accessors. */
51457- vrh->vring.desc = (__force struct vring_desc *)desc;
51458- vrh->vring.avail = (__force struct vring_avail *)avail;
51459- vrh->vring.used = (__force struct vring_used *)used;
51460+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
51461+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
51462+ vrh->vring.used = (__force_kernel struct vring_used *)used;
51463 return 0;
51464 }
51465 EXPORT_SYMBOL(vringh_init_user);
51466@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
51467
51468 static inline int putu16_kern(u16 *p, u16 val)
51469 {
51470- ACCESS_ONCE(*p) = val;
51471+ ACCESS_ONCE_RW(*p) = val;
51472 return 0;
51473 }
51474
51475diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
51476index 1b0b233..6f34c2c 100644
51477--- a/drivers/video/arcfb.c
51478+++ b/drivers/video/arcfb.c
51479@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
51480 return -ENOSPC;
51481
51482 err = 0;
51483- if ((count + p) > fbmemlength) {
51484+ if (count > (fbmemlength - p)) {
51485 count = fbmemlength - p;
51486 err = -ENOSPC;
51487 }
51488diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
51489index 12ca031..84a8a74 100644
51490--- a/drivers/video/aty/aty128fb.c
51491+++ b/drivers/video/aty/aty128fb.c
51492@@ -149,7 +149,7 @@ enum {
51493 };
51494
51495 /* Must match above enum */
51496-static char * const r128_family[] = {
51497+static const char * const r128_family[] = {
51498 "AGP",
51499 "PCI",
51500 "PRO AGP",
51501diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
51502index 28fafbf..ae91651 100644
51503--- a/drivers/video/aty/atyfb_base.c
51504+++ b/drivers/video/aty/atyfb_base.c
51505@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
51506 par->accel_flags = var->accel_flags; /* hack */
51507
51508 if (var->accel_flags) {
51509- info->fbops->fb_sync = atyfb_sync;
51510+ pax_open_kernel();
51511+ *(void **)&info->fbops->fb_sync = atyfb_sync;
51512+ pax_close_kernel();
51513 info->flags &= ~FBINFO_HWACCEL_DISABLED;
51514 } else {
51515- info->fbops->fb_sync = NULL;
51516+ pax_open_kernel();
51517+ *(void **)&info->fbops->fb_sync = NULL;
51518+ pax_close_kernel();
51519 info->flags |= FBINFO_HWACCEL_DISABLED;
51520 }
51521
51522diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
51523index 95ec042..e6affdd 100644
51524--- a/drivers/video/aty/mach64_cursor.c
51525+++ b/drivers/video/aty/mach64_cursor.c
51526@@ -7,6 +7,7 @@
51527 #include <linux/string.h>
51528
51529 #include <asm/io.h>
51530+#include <asm/pgtable.h>
51531
51532 #ifdef __sparc__
51533 #include <asm/fbio.h>
51534@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
51535 info->sprite.buf_align = 16; /* and 64 lines tall. */
51536 info->sprite.flags = FB_PIXMAP_IO;
51537
51538- info->fbops->fb_cursor = atyfb_cursor;
51539+ pax_open_kernel();
51540+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
51541+ pax_close_kernel();
51542
51543 return 0;
51544 }
51545diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
51546index 7592cc2..92feb56 100644
51547--- a/drivers/video/backlight/kb3886_bl.c
51548+++ b/drivers/video/backlight/kb3886_bl.c
51549@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
51550 static unsigned long kb3886bl_flags;
51551 #define KB3886BL_SUSPENDED 0x01
51552
51553-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
51554+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
51555 {
51556 .ident = "Sahara Touch-iT",
51557 .matches = {
51558diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
51559index 900aa4e..6d49418 100644
51560--- a/drivers/video/fb_defio.c
51561+++ b/drivers/video/fb_defio.c
51562@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
51563
51564 BUG_ON(!fbdefio);
51565 mutex_init(&fbdefio->lock);
51566- info->fbops->fb_mmap = fb_deferred_io_mmap;
51567+ pax_open_kernel();
51568+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
51569+ pax_close_kernel();
51570 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
51571 INIT_LIST_HEAD(&fbdefio->pagelist);
51572 if (fbdefio->delay == 0) /* set a default of 1 s */
51573@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
51574 page->mapping = NULL;
51575 }
51576
51577- info->fbops->fb_mmap = NULL;
51578+ *(void **)&info->fbops->fb_mmap = NULL;
51579 mutex_destroy(&fbdefio->lock);
51580 }
51581 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
51582diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
51583index 010d191..7b8235a 100644
51584--- a/drivers/video/fbmem.c
51585+++ b/drivers/video/fbmem.c
51586@@ -433,7 +433,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
51587 image->dx += image->width + 8;
51588 }
51589 } else if (rotate == FB_ROTATE_UD) {
51590- for (x = 0; x < num && image->dx >= 0; x++) {
51591+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
51592 info->fbops->fb_imageblit(info, image);
51593 image->dx -= image->width + 8;
51594 }
51595@@ -445,7 +445,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
51596 image->dy += image->height + 8;
51597 }
51598 } else if (rotate == FB_ROTATE_CCW) {
51599- for (x = 0; x < num && image->dy >= 0; x++) {
51600+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
51601 info->fbops->fb_imageblit(info, image);
51602 image->dy -= image->height + 8;
51603 }
51604@@ -1179,7 +1179,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
51605 return -EFAULT;
51606 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
51607 return -EINVAL;
51608- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
51609+ if (con2fb.framebuffer >= FB_MAX)
51610 return -EINVAL;
51611 if (!registered_fb[con2fb.framebuffer])
51612 request_module("fb%d", con2fb.framebuffer);
51613@@ -1300,7 +1300,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
51614 __u32 data;
51615 int err;
51616
51617- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
51618+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
51619
51620 data = (__u32) (unsigned long) fix->smem_start;
51621 err |= put_user(data, &fix32->smem_start);
51622diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
51623index 130708f..cdac1a9 100644
51624--- a/drivers/video/hyperv_fb.c
51625+++ b/drivers/video/hyperv_fb.c
51626@@ -233,7 +233,7 @@ static uint screen_fb_size;
51627 static inline int synthvid_send(struct hv_device *hdev,
51628 struct synthvid_msg *msg)
51629 {
51630- static atomic64_t request_id = ATOMIC64_INIT(0);
51631+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
51632 int ret;
51633
51634 msg->pipe_hdr.type = PIPE_MSG_DATA;
51635@@ -241,7 +241,7 @@ static inline int synthvid_send(struct hv_device *hdev,
51636
51637 ret = vmbus_sendpacket(hdev->channel, msg,
51638 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
51639- atomic64_inc_return(&request_id),
51640+ atomic64_inc_return_unchecked(&request_id),
51641 VM_PKT_DATA_INBAND, 0);
51642
51643 if (ret)
51644diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
51645index 7672d2e..b56437f 100644
51646--- a/drivers/video/i810/i810_accel.c
51647+++ b/drivers/video/i810/i810_accel.c
51648@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
51649 }
51650 }
51651 printk("ringbuffer lockup!!!\n");
51652+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
51653 i810_report_error(mmio);
51654 par->dev_flags |= LOCKUP;
51655 info->pixmap.scan_align = 1;
51656diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
51657index 3c14e43..2630570 100644
51658--- a/drivers/video/logo/logo_linux_clut224.ppm
51659+++ b/drivers/video/logo/logo_linux_clut224.ppm
51660@@ -2,1603 +2,1123 @@ P3
51661 # Standard 224-color Linux logo
51662 80 80
51663 255
51664- 0 0 0 0 0 0 0 0 0 0 0 0
51665- 0 0 0 0 0 0 0 0 0 0 0 0
51666- 0 0 0 0 0 0 0 0 0 0 0 0
51667- 0 0 0 0 0 0 0 0 0 0 0 0
51668- 0 0 0 0 0 0 0 0 0 0 0 0
51669- 0 0 0 0 0 0 0 0 0 0 0 0
51670- 0 0 0 0 0 0 0 0 0 0 0 0
51671- 0 0 0 0 0 0 0 0 0 0 0 0
51672- 0 0 0 0 0 0 0 0 0 0 0 0
51673- 6 6 6 6 6 6 10 10 10 10 10 10
51674- 10 10 10 6 6 6 6 6 6 6 6 6
51675- 0 0 0 0 0 0 0 0 0 0 0 0
51676- 0 0 0 0 0 0 0 0 0 0 0 0
51677- 0 0 0 0 0 0 0 0 0 0 0 0
51678- 0 0 0 0 0 0 0 0 0 0 0 0
51679- 0 0 0 0 0 0 0 0 0 0 0 0
51680- 0 0 0 0 0 0 0 0 0 0 0 0
51681- 0 0 0 0 0 0 0 0 0 0 0 0
51682- 0 0 0 0 0 0 0 0 0 0 0 0
51683- 0 0 0 0 0 0 0 0 0 0 0 0
51684- 0 0 0 0 0 0 0 0 0 0 0 0
51685- 0 0 0 0 0 0 0 0 0 0 0 0
51686- 0 0 0 0 0 0 0 0 0 0 0 0
51687- 0 0 0 0 0 0 0 0 0 0 0 0
51688- 0 0 0 0 0 0 0 0 0 0 0 0
51689- 0 0 0 0 0 0 0 0 0 0 0 0
51690- 0 0 0 0 0 0 0 0 0 0 0 0
51691- 0 0 0 0 0 0 0 0 0 0 0 0
51692- 0 0 0 6 6 6 10 10 10 14 14 14
51693- 22 22 22 26 26 26 30 30 30 34 34 34
51694- 30 30 30 30 30 30 26 26 26 18 18 18
51695- 14 14 14 10 10 10 6 6 6 0 0 0
51696- 0 0 0 0 0 0 0 0 0 0 0 0
51697- 0 0 0 0 0 0 0 0 0 0 0 0
51698- 0 0 0 0 0 0 0 0 0 0 0 0
51699- 0 0 0 0 0 0 0 0 0 0 0 0
51700- 0 0 0 0 0 0 0 0 0 0 0 0
51701- 0 0 0 0 0 0 0 0 0 0 0 0
51702- 0 0 0 0 0 0 0 0 0 0 0 0
51703- 0 0 0 0 0 0 0 0 0 0 0 0
51704- 0 0 0 0 0 0 0 0 0 0 0 0
51705- 0 0 0 0 0 1 0 0 1 0 0 0
51706- 0 0 0 0 0 0 0 0 0 0 0 0
51707- 0 0 0 0 0 0 0 0 0 0 0 0
51708- 0 0 0 0 0 0 0 0 0 0 0 0
51709- 0 0 0 0 0 0 0 0 0 0 0 0
51710- 0 0 0 0 0 0 0 0 0 0 0 0
51711- 0 0 0 0 0 0 0 0 0 0 0 0
51712- 6 6 6 14 14 14 26 26 26 42 42 42
51713- 54 54 54 66 66 66 78 78 78 78 78 78
51714- 78 78 78 74 74 74 66 66 66 54 54 54
51715- 42 42 42 26 26 26 18 18 18 10 10 10
51716- 6 6 6 0 0 0 0 0 0 0 0 0
51717- 0 0 0 0 0 0 0 0 0 0 0 0
51718- 0 0 0 0 0 0 0 0 0 0 0 0
51719- 0 0 0 0 0 0 0 0 0 0 0 0
51720- 0 0 0 0 0 0 0 0 0 0 0 0
51721- 0 0 0 0 0 0 0 0 0 0 0 0
51722- 0 0 0 0 0 0 0 0 0 0 0 0
51723- 0 0 0 0 0 0 0 0 0 0 0 0
51724- 0 0 0 0 0 0 0 0 0 0 0 0
51725- 0 0 1 0 0 0 0 0 0 0 0 0
51726- 0 0 0 0 0 0 0 0 0 0 0 0
51727- 0 0 0 0 0 0 0 0 0 0 0 0
51728- 0 0 0 0 0 0 0 0 0 0 0 0
51729- 0 0 0 0 0 0 0 0 0 0 0 0
51730- 0 0 0 0 0 0 0 0 0 0 0 0
51731- 0 0 0 0 0 0 0 0 0 10 10 10
51732- 22 22 22 42 42 42 66 66 66 86 86 86
51733- 66 66 66 38 38 38 38 38 38 22 22 22
51734- 26 26 26 34 34 34 54 54 54 66 66 66
51735- 86 86 86 70 70 70 46 46 46 26 26 26
51736- 14 14 14 6 6 6 0 0 0 0 0 0
51737- 0 0 0 0 0 0 0 0 0 0 0 0
51738- 0 0 0 0 0 0 0 0 0 0 0 0
51739- 0 0 0 0 0 0 0 0 0 0 0 0
51740- 0 0 0 0 0 0 0 0 0 0 0 0
51741- 0 0 0 0 0 0 0 0 0 0 0 0
51742- 0 0 0 0 0 0 0 0 0 0 0 0
51743- 0 0 0 0 0 0 0 0 0 0 0 0
51744- 0 0 0 0 0 0 0 0 0 0 0 0
51745- 0 0 1 0 0 1 0 0 1 0 0 0
51746- 0 0 0 0 0 0 0 0 0 0 0 0
51747- 0 0 0 0 0 0 0 0 0 0 0 0
51748- 0 0 0 0 0 0 0 0 0 0 0 0
51749- 0 0 0 0 0 0 0 0 0 0 0 0
51750- 0 0 0 0 0 0 0 0 0 0 0 0
51751- 0 0 0 0 0 0 10 10 10 26 26 26
51752- 50 50 50 82 82 82 58 58 58 6 6 6
51753- 2 2 6 2 2 6 2 2 6 2 2 6
51754- 2 2 6 2 2 6 2 2 6 2 2 6
51755- 6 6 6 54 54 54 86 86 86 66 66 66
51756- 38 38 38 18 18 18 6 6 6 0 0 0
51757- 0 0 0 0 0 0 0 0 0 0 0 0
51758- 0 0 0 0 0 0 0 0 0 0 0 0
51759- 0 0 0 0 0 0 0 0 0 0 0 0
51760- 0 0 0 0 0 0 0 0 0 0 0 0
51761- 0 0 0 0 0 0 0 0 0 0 0 0
51762- 0 0 0 0 0 0 0 0 0 0 0 0
51763- 0 0 0 0 0 0 0 0 0 0 0 0
51764- 0 0 0 0 0 0 0 0 0 0 0 0
51765- 0 0 0 0 0 0 0 0 0 0 0 0
51766- 0 0 0 0 0 0 0 0 0 0 0 0
51767- 0 0 0 0 0 0 0 0 0 0 0 0
51768- 0 0 0 0 0 0 0 0 0 0 0 0
51769- 0 0 0 0 0 0 0 0 0 0 0 0
51770- 0 0 0 0 0 0 0 0 0 0 0 0
51771- 0 0 0 6 6 6 22 22 22 50 50 50
51772- 78 78 78 34 34 34 2 2 6 2 2 6
51773- 2 2 6 2 2 6 2 2 6 2 2 6
51774- 2 2 6 2 2 6 2 2 6 2 2 6
51775- 2 2 6 2 2 6 6 6 6 70 70 70
51776- 78 78 78 46 46 46 22 22 22 6 6 6
51777- 0 0 0 0 0 0 0 0 0 0 0 0
51778- 0 0 0 0 0 0 0 0 0 0 0 0
51779- 0 0 0 0 0 0 0 0 0 0 0 0
51780- 0 0 0 0 0 0 0 0 0 0 0 0
51781- 0 0 0 0 0 0 0 0 0 0 0 0
51782- 0 0 0 0 0 0 0 0 0 0 0 0
51783- 0 0 0 0 0 0 0 0 0 0 0 0
51784- 0 0 0 0 0 0 0 0 0 0 0 0
51785- 0 0 1 0 0 1 0 0 1 0 0 0
51786- 0 0 0 0 0 0 0 0 0 0 0 0
51787- 0 0 0 0 0 0 0 0 0 0 0 0
51788- 0 0 0 0 0 0 0 0 0 0 0 0
51789- 0 0 0 0 0 0 0 0 0 0 0 0
51790- 0 0 0 0 0 0 0 0 0 0 0 0
51791- 6 6 6 18 18 18 42 42 42 82 82 82
51792- 26 26 26 2 2 6 2 2 6 2 2 6
51793- 2 2 6 2 2 6 2 2 6 2 2 6
51794- 2 2 6 2 2 6 2 2 6 14 14 14
51795- 46 46 46 34 34 34 6 6 6 2 2 6
51796- 42 42 42 78 78 78 42 42 42 18 18 18
51797- 6 6 6 0 0 0 0 0 0 0 0 0
51798- 0 0 0 0 0 0 0 0 0 0 0 0
51799- 0 0 0 0 0 0 0 0 0 0 0 0
51800- 0 0 0 0 0 0 0 0 0 0 0 0
51801- 0 0 0 0 0 0 0 0 0 0 0 0
51802- 0 0 0 0 0 0 0 0 0 0 0 0
51803- 0 0 0 0 0 0 0 0 0 0 0 0
51804- 0 0 0 0 0 0 0 0 0 0 0 0
51805- 0 0 1 0 0 0 0 0 1 0 0 0
51806- 0 0 0 0 0 0 0 0 0 0 0 0
51807- 0 0 0 0 0 0 0 0 0 0 0 0
51808- 0 0 0 0 0 0 0 0 0 0 0 0
51809- 0 0 0 0 0 0 0 0 0 0 0 0
51810- 0 0 0 0 0 0 0 0 0 0 0 0
51811- 10 10 10 30 30 30 66 66 66 58 58 58
51812- 2 2 6 2 2 6 2 2 6 2 2 6
51813- 2 2 6 2 2 6 2 2 6 2 2 6
51814- 2 2 6 2 2 6 2 2 6 26 26 26
51815- 86 86 86 101 101 101 46 46 46 10 10 10
51816- 2 2 6 58 58 58 70 70 70 34 34 34
51817- 10 10 10 0 0 0 0 0 0 0 0 0
51818- 0 0 0 0 0 0 0 0 0 0 0 0
51819- 0 0 0 0 0 0 0 0 0 0 0 0
51820- 0 0 0 0 0 0 0 0 0 0 0 0
51821- 0 0 0 0 0 0 0 0 0 0 0 0
51822- 0 0 0 0 0 0 0 0 0 0 0 0
51823- 0 0 0 0 0 0 0 0 0 0 0 0
51824- 0 0 0 0 0 0 0 0 0 0 0 0
51825- 0 0 1 0 0 1 0 0 1 0 0 0
51826- 0 0 0 0 0 0 0 0 0 0 0 0
51827- 0 0 0 0 0 0 0 0 0 0 0 0
51828- 0 0 0 0 0 0 0 0 0 0 0 0
51829- 0 0 0 0 0 0 0 0 0 0 0 0
51830- 0 0 0 0 0 0 0 0 0 0 0 0
51831- 14 14 14 42 42 42 86 86 86 10 10 10
51832- 2 2 6 2 2 6 2 2 6 2 2 6
51833- 2 2 6 2 2 6 2 2 6 2 2 6
51834- 2 2 6 2 2 6 2 2 6 30 30 30
51835- 94 94 94 94 94 94 58 58 58 26 26 26
51836- 2 2 6 6 6 6 78 78 78 54 54 54
51837- 22 22 22 6 6 6 0 0 0 0 0 0
51838- 0 0 0 0 0 0 0 0 0 0 0 0
51839- 0 0 0 0 0 0 0 0 0 0 0 0
51840- 0 0 0 0 0 0 0 0 0 0 0 0
51841- 0 0 0 0 0 0 0 0 0 0 0 0
51842- 0 0 0 0 0 0 0 0 0 0 0 0
51843- 0 0 0 0 0 0 0 0 0 0 0 0
51844- 0 0 0 0 0 0 0 0 0 0 0 0
51845- 0 0 0 0 0 0 0 0 0 0 0 0
51846- 0 0 0 0 0 0 0 0 0 0 0 0
51847- 0 0 0 0 0 0 0 0 0 0 0 0
51848- 0 0 0 0 0 0 0 0 0 0 0 0
51849- 0 0 0 0 0 0 0 0 0 0 0 0
51850- 0 0 0 0 0 0 0 0 0 6 6 6
51851- 22 22 22 62 62 62 62 62 62 2 2 6
51852- 2 2 6 2 2 6 2 2 6 2 2 6
51853- 2 2 6 2 2 6 2 2 6 2 2 6
51854- 2 2 6 2 2 6 2 2 6 26 26 26
51855- 54 54 54 38 38 38 18 18 18 10 10 10
51856- 2 2 6 2 2 6 34 34 34 82 82 82
51857- 38 38 38 14 14 14 0 0 0 0 0 0
51858- 0 0 0 0 0 0 0 0 0 0 0 0
51859- 0 0 0 0 0 0 0 0 0 0 0 0
51860- 0 0 0 0 0 0 0 0 0 0 0 0
51861- 0 0 0 0 0 0 0 0 0 0 0 0
51862- 0 0 0 0 0 0 0 0 0 0 0 0
51863- 0 0 0 0 0 0 0 0 0 0 0 0
51864- 0 0 0 0 0 0 0 0 0 0 0 0
51865- 0 0 0 0 0 1 0 0 1 0 0 0
51866- 0 0 0 0 0 0 0 0 0 0 0 0
51867- 0 0 0 0 0 0 0 0 0 0 0 0
51868- 0 0 0 0 0 0 0 0 0 0 0 0
51869- 0 0 0 0 0 0 0 0 0 0 0 0
51870- 0 0 0 0 0 0 0 0 0 6 6 6
51871- 30 30 30 78 78 78 30 30 30 2 2 6
51872- 2 2 6 2 2 6 2 2 6 2 2 6
51873- 2 2 6 2 2 6 2 2 6 2 2 6
51874- 2 2 6 2 2 6 2 2 6 10 10 10
51875- 10 10 10 2 2 6 2 2 6 2 2 6
51876- 2 2 6 2 2 6 2 2 6 78 78 78
51877- 50 50 50 18 18 18 6 6 6 0 0 0
51878- 0 0 0 0 0 0 0 0 0 0 0 0
51879- 0 0 0 0 0 0 0 0 0 0 0 0
51880- 0 0 0 0 0 0 0 0 0 0 0 0
51881- 0 0 0 0 0 0 0 0 0 0 0 0
51882- 0 0 0 0 0 0 0 0 0 0 0 0
51883- 0 0 0 0 0 0 0 0 0 0 0 0
51884- 0 0 0 0 0 0 0 0 0 0 0 0
51885- 0 0 1 0 0 0 0 0 0 0 0 0
51886- 0 0 0 0 0 0 0 0 0 0 0 0
51887- 0 0 0 0 0 0 0 0 0 0 0 0
51888- 0 0 0 0 0 0 0 0 0 0 0 0
51889- 0 0 0 0 0 0 0 0 0 0 0 0
51890- 0 0 0 0 0 0 0 0 0 10 10 10
51891- 38 38 38 86 86 86 14 14 14 2 2 6
51892- 2 2 6 2 2 6 2 2 6 2 2 6
51893- 2 2 6 2 2 6 2 2 6 2 2 6
51894- 2 2 6 2 2 6 2 2 6 2 2 6
51895- 2 2 6 2 2 6 2 2 6 2 2 6
51896- 2 2 6 2 2 6 2 2 6 54 54 54
51897- 66 66 66 26 26 26 6 6 6 0 0 0
51898- 0 0 0 0 0 0 0 0 0 0 0 0
51899- 0 0 0 0 0 0 0 0 0 0 0 0
51900- 0 0 0 0 0 0 0 0 0 0 0 0
51901- 0 0 0 0 0 0 0 0 0 0 0 0
51902- 0 0 0 0 0 0 0 0 0 0 0 0
51903- 0 0 0 0 0 0 0 0 0 0 0 0
51904- 0 0 0 0 0 0 0 0 0 0 0 0
51905- 0 0 0 0 0 1 0 0 1 0 0 0
51906- 0 0 0 0 0 0 0 0 0 0 0 0
51907- 0 0 0 0 0 0 0 0 0 0 0 0
51908- 0 0 0 0 0 0 0 0 0 0 0 0
51909- 0 0 0 0 0 0 0 0 0 0 0 0
51910- 0 0 0 0 0 0 0 0 0 14 14 14
51911- 42 42 42 82 82 82 2 2 6 2 2 6
51912- 2 2 6 6 6 6 10 10 10 2 2 6
51913- 2 2 6 2 2 6 2 2 6 2 2 6
51914- 2 2 6 2 2 6 2 2 6 6 6 6
51915- 14 14 14 10 10 10 2 2 6 2 2 6
51916- 2 2 6 2 2 6 2 2 6 18 18 18
51917- 82 82 82 34 34 34 10 10 10 0 0 0
51918- 0 0 0 0 0 0 0 0 0 0 0 0
51919- 0 0 0 0 0 0 0 0 0 0 0 0
51920- 0 0 0 0 0 0 0 0 0 0 0 0
51921- 0 0 0 0 0 0 0 0 0 0 0 0
51922- 0 0 0 0 0 0 0 0 0 0 0 0
51923- 0 0 0 0 0 0 0 0 0 0 0 0
51924- 0 0 0 0 0 0 0 0 0 0 0 0
51925- 0 0 1 0 0 0 0 0 0 0 0 0
51926- 0 0 0 0 0 0 0 0 0 0 0 0
51927- 0 0 0 0 0 0 0 0 0 0 0 0
51928- 0 0 0 0 0 0 0 0 0 0 0 0
51929- 0 0 0 0 0 0 0 0 0 0 0 0
51930- 0 0 0 0 0 0 0 0 0 14 14 14
51931- 46 46 46 86 86 86 2 2 6 2 2 6
51932- 6 6 6 6 6 6 22 22 22 34 34 34
51933- 6 6 6 2 2 6 2 2 6 2 2 6
51934- 2 2 6 2 2 6 18 18 18 34 34 34
51935- 10 10 10 50 50 50 22 22 22 2 2 6
51936- 2 2 6 2 2 6 2 2 6 10 10 10
51937- 86 86 86 42 42 42 14 14 14 0 0 0
51938- 0 0 0 0 0 0 0 0 0 0 0 0
51939- 0 0 0 0 0 0 0 0 0 0 0 0
51940- 0 0 0 0 0 0 0 0 0 0 0 0
51941- 0 0 0 0 0 0 0 0 0 0 0 0
51942- 0 0 0 0 0 0 0 0 0 0 0 0
51943- 0 0 0 0 0 0 0 0 0 0 0 0
51944- 0 0 0 0 0 0 0 0 0 0 0 0
51945- 0 0 1 0 0 1 0 0 1 0 0 0
51946- 0 0 0 0 0 0 0 0 0 0 0 0
51947- 0 0 0 0 0 0 0 0 0 0 0 0
51948- 0 0 0 0 0 0 0 0 0 0 0 0
51949- 0 0 0 0 0 0 0 0 0 0 0 0
51950- 0 0 0 0 0 0 0 0 0 14 14 14
51951- 46 46 46 86 86 86 2 2 6 2 2 6
51952- 38 38 38 116 116 116 94 94 94 22 22 22
51953- 22 22 22 2 2 6 2 2 6 2 2 6
51954- 14 14 14 86 86 86 138 138 138 162 162 162
51955-154 154 154 38 38 38 26 26 26 6 6 6
51956- 2 2 6 2 2 6 2 2 6 2 2 6
51957- 86 86 86 46 46 46 14 14 14 0 0 0
51958- 0 0 0 0 0 0 0 0 0 0 0 0
51959- 0 0 0 0 0 0 0 0 0 0 0 0
51960- 0 0 0 0 0 0 0 0 0 0 0 0
51961- 0 0 0 0 0 0 0 0 0 0 0 0
51962- 0 0 0 0 0 0 0 0 0 0 0 0
51963- 0 0 0 0 0 0 0 0 0 0 0 0
51964- 0 0 0 0 0 0 0 0 0 0 0 0
51965- 0 0 0 0 0 0 0 0 0 0 0 0
51966- 0 0 0 0 0 0 0 0 0 0 0 0
51967- 0 0 0 0 0 0 0 0 0 0 0 0
51968- 0 0 0 0 0 0 0 0 0 0 0 0
51969- 0 0 0 0 0 0 0 0 0 0 0 0
51970- 0 0 0 0 0 0 0 0 0 14 14 14
51971- 46 46 46 86 86 86 2 2 6 14 14 14
51972-134 134 134 198 198 198 195 195 195 116 116 116
51973- 10 10 10 2 2 6 2 2 6 6 6 6
51974-101 98 89 187 187 187 210 210 210 218 218 218
51975-214 214 214 134 134 134 14 14 14 6 6 6
51976- 2 2 6 2 2 6 2 2 6 2 2 6
51977- 86 86 86 50 50 50 18 18 18 6 6 6
51978- 0 0 0 0 0 0 0 0 0 0 0 0
51979- 0 0 0 0 0 0 0 0 0 0 0 0
51980- 0 0 0 0 0 0 0 0 0 0 0 0
51981- 0 0 0 0 0 0 0 0 0 0 0 0
51982- 0 0 0 0 0 0 0 0 0 0 0 0
51983- 0 0 0 0 0 0 0 0 0 0 0 0
51984- 0 0 0 0 0 0 0 0 1 0 0 0
51985- 0 0 1 0 0 1 0 0 1 0 0 0
51986- 0 0 0 0 0 0 0 0 0 0 0 0
51987- 0 0 0 0 0 0 0 0 0 0 0 0
51988- 0 0 0 0 0 0 0 0 0 0 0 0
51989- 0 0 0 0 0 0 0 0 0 0 0 0
51990- 0 0 0 0 0 0 0 0 0 14 14 14
51991- 46 46 46 86 86 86 2 2 6 54 54 54
51992-218 218 218 195 195 195 226 226 226 246 246 246
51993- 58 58 58 2 2 6 2 2 6 30 30 30
51994-210 210 210 253 253 253 174 174 174 123 123 123
51995-221 221 221 234 234 234 74 74 74 2 2 6
51996- 2 2 6 2 2 6 2 2 6 2 2 6
51997- 70 70 70 58 58 58 22 22 22 6 6 6
51998- 0 0 0 0 0 0 0 0 0 0 0 0
51999- 0 0 0 0 0 0 0 0 0 0 0 0
52000- 0 0 0 0 0 0 0 0 0 0 0 0
52001- 0 0 0 0 0 0 0 0 0 0 0 0
52002- 0 0 0 0 0 0 0 0 0 0 0 0
52003- 0 0 0 0 0 0 0 0 0 0 0 0
52004- 0 0 0 0 0 0 0 0 0 0 0 0
52005- 0 0 0 0 0 0 0 0 0 0 0 0
52006- 0 0 0 0 0 0 0 0 0 0 0 0
52007- 0 0 0 0 0 0 0 0 0 0 0 0
52008- 0 0 0 0 0 0 0 0 0 0 0 0
52009- 0 0 0 0 0 0 0 0 0 0 0 0
52010- 0 0 0 0 0 0 0 0 0 14 14 14
52011- 46 46 46 82 82 82 2 2 6 106 106 106
52012-170 170 170 26 26 26 86 86 86 226 226 226
52013-123 123 123 10 10 10 14 14 14 46 46 46
52014-231 231 231 190 190 190 6 6 6 70 70 70
52015- 90 90 90 238 238 238 158 158 158 2 2 6
52016- 2 2 6 2 2 6 2 2 6 2 2 6
52017- 70 70 70 58 58 58 22 22 22 6 6 6
52018- 0 0 0 0 0 0 0 0 0 0 0 0
52019- 0 0 0 0 0 0 0 0 0 0 0 0
52020- 0 0 0 0 0 0 0 0 0 0 0 0
52021- 0 0 0 0 0 0 0 0 0 0 0 0
52022- 0 0 0 0 0 0 0 0 0 0 0 0
52023- 0 0 0 0 0 0 0 0 0 0 0 0
52024- 0 0 0 0 0 0 0 0 1 0 0 0
52025- 0 0 1 0 0 1 0 0 1 0 0 0
52026- 0 0 0 0 0 0 0 0 0 0 0 0
52027- 0 0 0 0 0 0 0 0 0 0 0 0
52028- 0 0 0 0 0 0 0 0 0 0 0 0
52029- 0 0 0 0 0 0 0 0 0 0 0 0
52030- 0 0 0 0 0 0 0 0 0 14 14 14
52031- 42 42 42 86 86 86 6 6 6 116 116 116
52032-106 106 106 6 6 6 70 70 70 149 149 149
52033-128 128 128 18 18 18 38 38 38 54 54 54
52034-221 221 221 106 106 106 2 2 6 14 14 14
52035- 46 46 46 190 190 190 198 198 198 2 2 6
52036- 2 2 6 2 2 6 2 2 6 2 2 6
52037- 74 74 74 62 62 62 22 22 22 6 6 6
52038- 0 0 0 0 0 0 0 0 0 0 0 0
52039- 0 0 0 0 0 0 0 0 0 0 0 0
52040- 0 0 0 0 0 0 0 0 0 0 0 0
52041- 0 0 0 0 0 0 0 0 0 0 0 0
52042- 0 0 0 0 0 0 0 0 0 0 0 0
52043- 0 0 0 0 0 0 0 0 0 0 0 0
52044- 0 0 0 0 0 0 0 0 1 0 0 0
52045- 0 0 1 0 0 0 0 0 1 0 0 0
52046- 0 0 0 0 0 0 0 0 0 0 0 0
52047- 0 0 0 0 0 0 0 0 0 0 0 0
52048- 0 0 0 0 0 0 0 0 0 0 0 0
52049- 0 0 0 0 0 0 0 0 0 0 0 0
52050- 0 0 0 0 0 0 0 0 0 14 14 14
52051- 42 42 42 94 94 94 14 14 14 101 101 101
52052-128 128 128 2 2 6 18 18 18 116 116 116
52053-118 98 46 121 92 8 121 92 8 98 78 10
52054-162 162 162 106 106 106 2 2 6 2 2 6
52055- 2 2 6 195 195 195 195 195 195 6 6 6
52056- 2 2 6 2 2 6 2 2 6 2 2 6
52057- 74 74 74 62 62 62 22 22 22 6 6 6
52058- 0 0 0 0 0 0 0 0 0 0 0 0
52059- 0 0 0 0 0 0 0 0 0 0 0 0
52060- 0 0 0 0 0 0 0 0 0 0 0 0
52061- 0 0 0 0 0 0 0 0 0 0 0 0
52062- 0 0 0 0 0 0 0 0 0 0 0 0
52063- 0 0 0 0 0 0 0 0 0 0 0 0
52064- 0 0 0 0 0 0 0 0 1 0 0 1
52065- 0 0 1 0 0 0 0 0 1 0 0 0
52066- 0 0 0 0 0 0 0 0 0 0 0 0
52067- 0 0 0 0 0 0 0 0 0 0 0 0
52068- 0 0 0 0 0 0 0 0 0 0 0 0
52069- 0 0 0 0 0 0 0 0 0 0 0 0
52070- 0 0 0 0 0 0 0 0 0 10 10 10
52071- 38 38 38 90 90 90 14 14 14 58 58 58
52072-210 210 210 26 26 26 54 38 6 154 114 10
52073-226 170 11 236 186 11 225 175 15 184 144 12
52074-215 174 15 175 146 61 37 26 9 2 2 6
52075- 70 70 70 246 246 246 138 138 138 2 2 6
52076- 2 2 6 2 2 6 2 2 6 2 2 6
52077- 70 70 70 66 66 66 26 26 26 6 6 6
52078- 0 0 0 0 0 0 0 0 0 0 0 0
52079- 0 0 0 0 0 0 0 0 0 0 0 0
52080- 0 0 0 0 0 0 0 0 0 0 0 0
52081- 0 0 0 0 0 0 0 0 0 0 0 0
52082- 0 0 0 0 0 0 0 0 0 0 0 0
52083- 0 0 0 0 0 0 0 0 0 0 0 0
52084- 0 0 0 0 0 0 0 0 0 0 0 0
52085- 0 0 0 0 0 0 0 0 0 0 0 0
52086- 0 0 0 0 0 0 0 0 0 0 0 0
52087- 0 0 0 0 0 0 0 0 0 0 0 0
52088- 0 0 0 0 0 0 0 0 0 0 0 0
52089- 0 0 0 0 0 0 0 0 0 0 0 0
52090- 0 0 0 0 0 0 0 0 0 10 10 10
52091- 38 38 38 86 86 86 14 14 14 10 10 10
52092-195 195 195 188 164 115 192 133 9 225 175 15
52093-239 182 13 234 190 10 232 195 16 232 200 30
52094-245 207 45 241 208 19 232 195 16 184 144 12
52095-218 194 134 211 206 186 42 42 42 2 2 6
52096- 2 2 6 2 2 6 2 2 6 2 2 6
52097- 50 50 50 74 74 74 30 30 30 6 6 6
52098- 0 0 0 0 0 0 0 0 0 0 0 0
52099- 0 0 0 0 0 0 0 0 0 0 0 0
52100- 0 0 0 0 0 0 0 0 0 0 0 0
52101- 0 0 0 0 0 0 0 0 0 0 0 0
52102- 0 0 0 0 0 0 0 0 0 0 0 0
52103- 0 0 0 0 0 0 0 0 0 0 0 0
52104- 0 0 0 0 0 0 0 0 0 0 0 0
52105- 0 0 0 0 0 0 0 0 0 0 0 0
52106- 0 0 0 0 0 0 0 0 0 0 0 0
52107- 0 0 0 0 0 0 0 0 0 0 0 0
52108- 0 0 0 0 0 0 0 0 0 0 0 0
52109- 0 0 0 0 0 0 0 0 0 0 0 0
52110- 0 0 0 0 0 0 0 0 0 10 10 10
52111- 34 34 34 86 86 86 14 14 14 2 2 6
52112-121 87 25 192 133 9 219 162 10 239 182 13
52113-236 186 11 232 195 16 241 208 19 244 214 54
52114-246 218 60 246 218 38 246 215 20 241 208 19
52115-241 208 19 226 184 13 121 87 25 2 2 6
52116- 2 2 6 2 2 6 2 2 6 2 2 6
52117- 50 50 50 82 82 82 34 34 34 10 10 10
52118- 0 0 0 0 0 0 0 0 0 0 0 0
52119- 0 0 0 0 0 0 0 0 0 0 0 0
52120- 0 0 0 0 0 0 0 0 0 0 0 0
52121- 0 0 0 0 0 0 0 0 0 0 0 0
52122- 0 0 0 0 0 0 0 0 0 0 0 0
52123- 0 0 0 0 0 0 0 0 0 0 0 0
52124- 0 0 0 0 0 0 0 0 0 0 0 0
52125- 0 0 0 0 0 0 0 0 0 0 0 0
52126- 0 0 0 0 0 0 0 0 0 0 0 0
52127- 0 0 0 0 0 0 0 0 0 0 0 0
52128- 0 0 0 0 0 0 0 0 0 0 0 0
52129- 0 0 0 0 0 0 0 0 0 0 0 0
52130- 0 0 0 0 0 0 0 0 0 10 10 10
52131- 34 34 34 82 82 82 30 30 30 61 42 6
52132-180 123 7 206 145 10 230 174 11 239 182 13
52133-234 190 10 238 202 15 241 208 19 246 218 74
52134-246 218 38 246 215 20 246 215 20 246 215 20
52135-226 184 13 215 174 15 184 144 12 6 6 6
52136- 2 2 6 2 2 6 2 2 6 2 2 6
52137- 26 26 26 94 94 94 42 42 42 14 14 14
52138- 0 0 0 0 0 0 0 0 0 0 0 0
52139- 0 0 0 0 0 0 0 0 0 0 0 0
52140- 0 0 0 0 0 0 0 0 0 0 0 0
52141- 0 0 0 0 0 0 0 0 0 0 0 0
52142- 0 0 0 0 0 0 0 0 0 0 0 0
52143- 0 0 0 0 0 0 0 0 0 0 0 0
52144- 0 0 0 0 0 0 0 0 0 0 0 0
52145- 0 0 0 0 0 0 0 0 0 0 0 0
52146- 0 0 0 0 0 0 0 0 0 0 0 0
52147- 0 0 0 0 0 0 0 0 0 0 0 0
52148- 0 0 0 0 0 0 0 0 0 0 0 0
52149- 0 0 0 0 0 0 0 0 0 0 0 0
52150- 0 0 0 0 0 0 0 0 0 10 10 10
52151- 30 30 30 78 78 78 50 50 50 104 69 6
52152-192 133 9 216 158 10 236 178 12 236 186 11
52153-232 195 16 241 208 19 244 214 54 245 215 43
52154-246 215 20 246 215 20 241 208 19 198 155 10
52155-200 144 11 216 158 10 156 118 10 2 2 6
52156- 2 2 6 2 2 6 2 2 6 2 2 6
52157- 6 6 6 90 90 90 54 54 54 18 18 18
52158- 6 6 6 0 0 0 0 0 0 0 0 0
52159- 0 0 0 0 0 0 0 0 0 0 0 0
52160- 0 0 0 0 0 0 0 0 0 0 0 0
52161- 0 0 0 0 0 0 0 0 0 0 0 0
52162- 0 0 0 0 0 0 0 0 0 0 0 0
52163- 0 0 0 0 0 0 0 0 0 0 0 0
52164- 0 0 0 0 0 0 0 0 0 0 0 0
52165- 0 0 0 0 0 0 0 0 0 0 0 0
52166- 0 0 0 0 0 0 0 0 0 0 0 0
52167- 0 0 0 0 0 0 0 0 0 0 0 0
52168- 0 0 0 0 0 0 0 0 0 0 0 0
52169- 0 0 0 0 0 0 0 0 0 0 0 0
52170- 0 0 0 0 0 0 0 0 0 10 10 10
52171- 30 30 30 78 78 78 46 46 46 22 22 22
52172-137 92 6 210 162 10 239 182 13 238 190 10
52173-238 202 15 241 208 19 246 215 20 246 215 20
52174-241 208 19 203 166 17 185 133 11 210 150 10
52175-216 158 10 210 150 10 102 78 10 2 2 6
52176- 6 6 6 54 54 54 14 14 14 2 2 6
52177- 2 2 6 62 62 62 74 74 74 30 30 30
52178- 10 10 10 0 0 0 0 0 0 0 0 0
52179- 0 0 0 0 0 0 0 0 0 0 0 0
52180- 0 0 0 0 0 0 0 0 0 0 0 0
52181- 0 0 0 0 0 0 0 0 0 0 0 0
52182- 0 0 0 0 0 0 0 0 0 0 0 0
52183- 0 0 0 0 0 0 0 0 0 0 0 0
52184- 0 0 0 0 0 0 0 0 0 0 0 0
52185- 0 0 0 0 0 0 0 0 0 0 0 0
52186- 0 0 0 0 0 0 0 0 0 0 0 0
52187- 0 0 0 0 0 0 0 0 0 0 0 0
52188- 0 0 0 0 0 0 0 0 0 0 0 0
52189- 0 0 0 0 0 0 0 0 0 0 0 0
52190- 0 0 0 0 0 0 0 0 0 10 10 10
52191- 34 34 34 78 78 78 50 50 50 6 6 6
52192- 94 70 30 139 102 15 190 146 13 226 184 13
52193-232 200 30 232 195 16 215 174 15 190 146 13
52194-168 122 10 192 133 9 210 150 10 213 154 11
52195-202 150 34 182 157 106 101 98 89 2 2 6
52196- 2 2 6 78 78 78 116 116 116 58 58 58
52197- 2 2 6 22 22 22 90 90 90 46 46 46
52198- 18 18 18 6 6 6 0 0 0 0 0 0
52199- 0 0 0 0 0 0 0 0 0 0 0 0
52200- 0 0 0 0 0 0 0 0 0 0 0 0
52201- 0 0 0 0 0 0 0 0 0 0 0 0
52202- 0 0 0 0 0 0 0 0 0 0 0 0
52203- 0 0 0 0 0 0 0 0 0 0 0 0
52204- 0 0 0 0 0 0 0 0 0 0 0 0
52205- 0 0 0 0 0 0 0 0 0 0 0 0
52206- 0 0 0 0 0 0 0 0 0 0 0 0
52207- 0 0 0 0 0 0 0 0 0 0 0 0
52208- 0 0 0 0 0 0 0 0 0 0 0 0
52209- 0 0 0 0 0 0 0 0 0 0 0 0
52210- 0 0 0 0 0 0 0 0 0 10 10 10
52211- 38 38 38 86 86 86 50 50 50 6 6 6
52212-128 128 128 174 154 114 156 107 11 168 122 10
52213-198 155 10 184 144 12 197 138 11 200 144 11
52214-206 145 10 206 145 10 197 138 11 188 164 115
52215-195 195 195 198 198 198 174 174 174 14 14 14
52216- 2 2 6 22 22 22 116 116 116 116 116 116
52217- 22 22 22 2 2 6 74 74 74 70 70 70
52218- 30 30 30 10 10 10 0 0 0 0 0 0
52219- 0 0 0 0 0 0 0 0 0 0 0 0
52220- 0 0 0 0 0 0 0 0 0 0 0 0
52221- 0 0 0 0 0 0 0 0 0 0 0 0
52222- 0 0 0 0 0 0 0 0 0 0 0 0
52223- 0 0 0 0 0 0 0 0 0 0 0 0
52224- 0 0 0 0 0 0 0 0 0 0 0 0
52225- 0 0 0 0 0 0 0 0 0 0 0 0
52226- 0 0 0 0 0 0 0 0 0 0 0 0
52227- 0 0 0 0 0 0 0 0 0 0 0 0
52228- 0 0 0 0 0 0 0 0 0 0 0 0
52229- 0 0 0 0 0 0 0 0 0 0 0 0
52230- 0 0 0 0 0 0 6 6 6 18 18 18
52231- 50 50 50 101 101 101 26 26 26 10 10 10
52232-138 138 138 190 190 190 174 154 114 156 107 11
52233-197 138 11 200 144 11 197 138 11 192 133 9
52234-180 123 7 190 142 34 190 178 144 187 187 187
52235-202 202 202 221 221 221 214 214 214 66 66 66
52236- 2 2 6 2 2 6 50 50 50 62 62 62
52237- 6 6 6 2 2 6 10 10 10 90 90 90
52238- 50 50 50 18 18 18 6 6 6 0 0 0
52239- 0 0 0 0 0 0 0 0 0 0 0 0
52240- 0 0 0 0 0 0 0 0 0 0 0 0
52241- 0 0 0 0 0 0 0 0 0 0 0 0
52242- 0 0 0 0 0 0 0 0 0 0 0 0
52243- 0 0 0 0 0 0 0 0 0 0 0 0
52244- 0 0 0 0 0 0 0 0 0 0 0 0
52245- 0 0 0 0 0 0 0 0 0 0 0 0
52246- 0 0 0 0 0 0 0 0 0 0 0 0
52247- 0 0 0 0 0 0 0 0 0 0 0 0
52248- 0 0 0 0 0 0 0 0 0 0 0 0
52249- 0 0 0 0 0 0 0 0 0 0 0 0
52250- 0 0 0 0 0 0 10 10 10 34 34 34
52251- 74 74 74 74 74 74 2 2 6 6 6 6
52252-144 144 144 198 198 198 190 190 190 178 166 146
52253-154 121 60 156 107 11 156 107 11 168 124 44
52254-174 154 114 187 187 187 190 190 190 210 210 210
52255-246 246 246 253 253 253 253 253 253 182 182 182
52256- 6 6 6 2 2 6 2 2 6 2 2 6
52257- 2 2 6 2 2 6 2 2 6 62 62 62
52258- 74 74 74 34 34 34 14 14 14 0 0 0
52259- 0 0 0 0 0 0 0 0 0 0 0 0
52260- 0 0 0 0 0 0 0 0 0 0 0 0
52261- 0 0 0 0 0 0 0 0 0 0 0 0
52262- 0 0 0 0 0 0 0 0 0 0 0 0
52263- 0 0 0 0 0 0 0 0 0 0 0 0
52264- 0 0 0 0 0 0 0 0 0 0 0 0
52265- 0 0 0 0 0 0 0 0 0 0 0 0
52266- 0 0 0 0 0 0 0 0 0 0 0 0
52267- 0 0 0 0 0 0 0 0 0 0 0 0
52268- 0 0 0 0 0 0 0 0 0 0 0 0
52269- 0 0 0 0 0 0 0 0 0 0 0 0
52270- 0 0 0 10 10 10 22 22 22 54 54 54
52271- 94 94 94 18 18 18 2 2 6 46 46 46
52272-234 234 234 221 221 221 190 190 190 190 190 190
52273-190 190 190 187 187 187 187 187 187 190 190 190
52274-190 190 190 195 195 195 214 214 214 242 242 242
52275-253 253 253 253 253 253 253 253 253 253 253 253
52276- 82 82 82 2 2 6 2 2 6 2 2 6
52277- 2 2 6 2 2 6 2 2 6 14 14 14
52278- 86 86 86 54 54 54 22 22 22 6 6 6
52279- 0 0 0 0 0 0 0 0 0 0 0 0
52280- 0 0 0 0 0 0 0 0 0 0 0 0
52281- 0 0 0 0 0 0 0 0 0 0 0 0
52282- 0 0 0 0 0 0 0 0 0 0 0 0
52283- 0 0 0 0 0 0 0 0 0 0 0 0
52284- 0 0 0 0 0 0 0 0 0 0 0 0
52285- 0 0 0 0 0 0 0 0 0 0 0 0
52286- 0 0 0 0 0 0 0 0 0 0 0 0
52287- 0 0 0 0 0 0 0 0 0 0 0 0
52288- 0 0 0 0 0 0 0 0 0 0 0 0
52289- 0 0 0 0 0 0 0 0 0 0 0 0
52290- 6 6 6 18 18 18 46 46 46 90 90 90
52291- 46 46 46 18 18 18 6 6 6 182 182 182
52292-253 253 253 246 246 246 206 206 206 190 190 190
52293-190 190 190 190 190 190 190 190 190 190 190 190
52294-206 206 206 231 231 231 250 250 250 253 253 253
52295-253 253 253 253 253 253 253 253 253 253 253 253
52296-202 202 202 14 14 14 2 2 6 2 2 6
52297- 2 2 6 2 2 6 2 2 6 2 2 6
52298- 42 42 42 86 86 86 42 42 42 18 18 18
52299- 6 6 6 0 0 0 0 0 0 0 0 0
52300- 0 0 0 0 0 0 0 0 0 0 0 0
52301- 0 0 0 0 0 0 0 0 0 0 0 0
52302- 0 0 0 0 0 0 0 0 0 0 0 0
52303- 0 0 0 0 0 0 0 0 0 0 0 0
52304- 0 0 0 0 0 0 0 0 0 0 0 0
52305- 0 0 0 0 0 0 0 0 0 0 0 0
52306- 0 0 0 0 0 0 0 0 0 0 0 0
52307- 0 0 0 0 0 0 0 0 0 0 0 0
52308- 0 0 0 0 0 0 0 0 0 0 0 0
52309- 0 0 0 0 0 0 0 0 0 6 6 6
52310- 14 14 14 38 38 38 74 74 74 66 66 66
52311- 2 2 6 6 6 6 90 90 90 250 250 250
52312-253 253 253 253 253 253 238 238 238 198 198 198
52313-190 190 190 190 190 190 195 195 195 221 221 221
52314-246 246 246 253 253 253 253 253 253 253 253 253
52315-253 253 253 253 253 253 253 253 253 253 253 253
52316-253 253 253 82 82 82 2 2 6 2 2 6
52317- 2 2 6 2 2 6 2 2 6 2 2 6
52318- 2 2 6 78 78 78 70 70 70 34 34 34
52319- 14 14 14 6 6 6 0 0 0 0 0 0
52320- 0 0 0 0 0 0 0 0 0 0 0 0
52321- 0 0 0 0 0 0 0 0 0 0 0 0
52322- 0 0 0 0 0 0 0 0 0 0 0 0
52323- 0 0 0 0 0 0 0 0 0 0 0 0
52324- 0 0 0 0 0 0 0 0 0 0 0 0
52325- 0 0 0 0 0 0 0 0 0 0 0 0
52326- 0 0 0 0 0 0 0 0 0 0 0 0
52327- 0 0 0 0 0 0 0 0 0 0 0 0
52328- 0 0 0 0 0 0 0 0 0 0 0 0
52329- 0 0 0 0 0 0 0 0 0 14 14 14
52330- 34 34 34 66 66 66 78 78 78 6 6 6
52331- 2 2 6 18 18 18 218 218 218 253 253 253
52332-253 253 253 253 253 253 253 253 253 246 246 246
52333-226 226 226 231 231 231 246 246 246 253 253 253
52334-253 253 253 253 253 253 253 253 253 253 253 253
52335-253 253 253 253 253 253 253 253 253 253 253 253
52336-253 253 253 178 178 178 2 2 6 2 2 6
52337- 2 2 6 2 2 6 2 2 6 2 2 6
52338- 2 2 6 18 18 18 90 90 90 62 62 62
52339- 30 30 30 10 10 10 0 0 0 0 0 0
52340- 0 0 0 0 0 0 0 0 0 0 0 0
52341- 0 0 0 0 0 0 0 0 0 0 0 0
52342- 0 0 0 0 0 0 0 0 0 0 0 0
52343- 0 0 0 0 0 0 0 0 0 0 0 0
52344- 0 0 0 0 0 0 0 0 0 0 0 0
52345- 0 0 0 0 0 0 0 0 0 0 0 0
52346- 0 0 0 0 0 0 0 0 0 0 0 0
52347- 0 0 0 0 0 0 0 0 0 0 0 0
52348- 0 0 0 0 0 0 0 0 0 0 0 0
52349- 0 0 0 0 0 0 10 10 10 26 26 26
52350- 58 58 58 90 90 90 18 18 18 2 2 6
52351- 2 2 6 110 110 110 253 253 253 253 253 253
52352-253 253 253 253 253 253 253 253 253 253 253 253
52353-250 250 250 253 253 253 253 253 253 253 253 253
52354-253 253 253 253 253 253 253 253 253 253 253 253
52355-253 253 253 253 253 253 253 253 253 253 253 253
52356-253 253 253 231 231 231 18 18 18 2 2 6
52357- 2 2 6 2 2 6 2 2 6 2 2 6
52358- 2 2 6 2 2 6 18 18 18 94 94 94
52359- 54 54 54 26 26 26 10 10 10 0 0 0
52360- 0 0 0 0 0 0 0 0 0 0 0 0
52361- 0 0 0 0 0 0 0 0 0 0 0 0
52362- 0 0 0 0 0 0 0 0 0 0 0 0
52363- 0 0 0 0 0 0 0 0 0 0 0 0
52364- 0 0 0 0 0 0 0 0 0 0 0 0
52365- 0 0 0 0 0 0 0 0 0 0 0 0
52366- 0 0 0 0 0 0 0 0 0 0 0 0
52367- 0 0 0 0 0 0 0 0 0 0 0 0
52368- 0 0 0 0 0 0 0 0 0 0 0 0
52369- 0 0 0 6 6 6 22 22 22 50 50 50
52370- 90 90 90 26 26 26 2 2 6 2 2 6
52371- 14 14 14 195 195 195 250 250 250 253 253 253
52372-253 253 253 253 253 253 253 253 253 253 253 253
52373-253 253 253 253 253 253 253 253 253 253 253 253
52374-253 253 253 253 253 253 253 253 253 253 253 253
52375-253 253 253 253 253 253 253 253 253 253 253 253
52376-250 250 250 242 242 242 54 54 54 2 2 6
52377- 2 2 6 2 2 6 2 2 6 2 2 6
52378- 2 2 6 2 2 6 2 2 6 38 38 38
52379- 86 86 86 50 50 50 22 22 22 6 6 6
52380- 0 0 0 0 0 0 0 0 0 0 0 0
52381- 0 0 0 0 0 0 0 0 0 0 0 0
52382- 0 0 0 0 0 0 0 0 0 0 0 0
52383- 0 0 0 0 0 0 0 0 0 0 0 0
52384- 0 0 0 0 0 0 0 0 0 0 0 0
52385- 0 0 0 0 0 0 0 0 0 0 0 0
52386- 0 0 0 0 0 0 0 0 0 0 0 0
52387- 0 0 0 0 0 0 0 0 0 0 0 0
52388- 0 0 0 0 0 0 0 0 0 0 0 0
52389- 6 6 6 14 14 14 38 38 38 82 82 82
52390- 34 34 34 2 2 6 2 2 6 2 2 6
52391- 42 42 42 195 195 195 246 246 246 253 253 253
52392-253 253 253 253 253 253 253 253 253 250 250 250
52393-242 242 242 242 242 242 250 250 250 253 253 253
52394-253 253 253 253 253 253 253 253 253 253 253 253
52395-253 253 253 250 250 250 246 246 246 238 238 238
52396-226 226 226 231 231 231 101 101 101 6 6 6
52397- 2 2 6 2 2 6 2 2 6 2 2 6
52398- 2 2 6 2 2 6 2 2 6 2 2 6
52399- 38 38 38 82 82 82 42 42 42 14 14 14
52400- 6 6 6 0 0 0 0 0 0 0 0 0
52401- 0 0 0 0 0 0 0 0 0 0 0 0
52402- 0 0 0 0 0 0 0 0 0 0 0 0
52403- 0 0 0 0 0 0 0 0 0 0 0 0
52404- 0 0 0 0 0 0 0 0 0 0 0 0
52405- 0 0 0 0 0 0 0 0 0 0 0 0
52406- 0 0 0 0 0 0 0 0 0 0 0 0
52407- 0 0 0 0 0 0 0 0 0 0 0 0
52408- 0 0 0 0 0 0 0 0 0 0 0 0
52409- 10 10 10 26 26 26 62 62 62 66 66 66
52410- 2 2 6 2 2 6 2 2 6 6 6 6
52411- 70 70 70 170 170 170 206 206 206 234 234 234
52412-246 246 246 250 250 250 250 250 250 238 238 238
52413-226 226 226 231 231 231 238 238 238 250 250 250
52414-250 250 250 250 250 250 246 246 246 231 231 231
52415-214 214 214 206 206 206 202 202 202 202 202 202
52416-198 198 198 202 202 202 182 182 182 18 18 18
52417- 2 2 6 2 2 6 2 2 6 2 2 6
52418- 2 2 6 2 2 6 2 2 6 2 2 6
52419- 2 2 6 62 62 62 66 66 66 30 30 30
52420- 10 10 10 0 0 0 0 0 0 0 0 0
52421- 0 0 0 0 0 0 0 0 0 0 0 0
52422- 0 0 0 0 0 0 0 0 0 0 0 0
52423- 0 0 0 0 0 0 0 0 0 0 0 0
52424- 0 0 0 0 0 0 0 0 0 0 0 0
52425- 0 0 0 0 0 0 0 0 0 0 0 0
52426- 0 0 0 0 0 0 0 0 0 0 0 0
52427- 0 0 0 0 0 0 0 0 0 0 0 0
52428- 0 0 0 0 0 0 0 0 0 0 0 0
52429- 14 14 14 42 42 42 82 82 82 18 18 18
52430- 2 2 6 2 2 6 2 2 6 10 10 10
52431- 94 94 94 182 182 182 218 218 218 242 242 242
52432-250 250 250 253 253 253 253 253 253 250 250 250
52433-234 234 234 253 253 253 253 253 253 253 253 253
52434-253 253 253 253 253 253 253 253 253 246 246 246
52435-238 238 238 226 226 226 210 210 210 202 202 202
52436-195 195 195 195 195 195 210 210 210 158 158 158
52437- 6 6 6 14 14 14 50 50 50 14 14 14
52438- 2 2 6 2 2 6 2 2 6 2 2 6
52439- 2 2 6 6 6 6 86 86 86 46 46 46
52440- 18 18 18 6 6 6 0 0 0 0 0 0
52441- 0 0 0 0 0 0 0 0 0 0 0 0
52442- 0 0 0 0 0 0 0 0 0 0 0 0
52443- 0 0 0 0 0 0 0 0 0 0 0 0
52444- 0 0 0 0 0 0 0 0 0 0 0 0
52445- 0 0 0 0 0 0 0 0 0 0 0 0
52446- 0 0 0 0 0 0 0 0 0 0 0 0
52447- 0 0 0 0 0 0 0 0 0 0 0 0
52448- 0 0 0 0 0 0 0 0 0 6 6 6
52449- 22 22 22 54 54 54 70 70 70 2 2 6
52450- 2 2 6 10 10 10 2 2 6 22 22 22
52451-166 166 166 231 231 231 250 250 250 253 253 253
52452-253 253 253 253 253 253 253 253 253 250 250 250
52453-242 242 242 253 253 253 253 253 253 253 253 253
52454-253 253 253 253 253 253 253 253 253 253 253 253
52455-253 253 253 253 253 253 253 253 253 246 246 246
52456-231 231 231 206 206 206 198 198 198 226 226 226
52457- 94 94 94 2 2 6 6 6 6 38 38 38
52458- 30 30 30 2 2 6 2 2 6 2 2 6
52459- 2 2 6 2 2 6 62 62 62 66 66 66
52460- 26 26 26 10 10 10 0 0 0 0 0 0
52461- 0 0 0 0 0 0 0 0 0 0 0 0
52462- 0 0 0 0 0 0 0 0 0 0 0 0
52463- 0 0 0 0 0 0 0 0 0 0 0 0
52464- 0 0 0 0 0 0 0 0 0 0 0 0
52465- 0 0 0 0 0 0 0 0 0 0 0 0
52466- 0 0 0 0 0 0 0 0 0 0 0 0
52467- 0 0 0 0 0 0 0 0 0 0 0 0
52468- 0 0 0 0 0 0 0 0 0 10 10 10
52469- 30 30 30 74 74 74 50 50 50 2 2 6
52470- 26 26 26 26 26 26 2 2 6 106 106 106
52471-238 238 238 253 253 253 253 253 253 253 253 253
52472-253 253 253 253 253 253 253 253 253 253 253 253
52473-253 253 253 253 253 253 253 253 253 253 253 253
52474-253 253 253 253 253 253 253 253 253 253 253 253
52475-253 253 253 253 253 253 253 253 253 253 253 253
52476-253 253 253 246 246 246 218 218 218 202 202 202
52477-210 210 210 14 14 14 2 2 6 2 2 6
52478- 30 30 30 22 22 22 2 2 6 2 2 6
52479- 2 2 6 2 2 6 18 18 18 86 86 86
52480- 42 42 42 14 14 14 0 0 0 0 0 0
52481- 0 0 0 0 0 0 0 0 0 0 0 0
52482- 0 0 0 0 0 0 0 0 0 0 0 0
52483- 0 0 0 0 0 0 0 0 0 0 0 0
52484- 0 0 0 0 0 0 0 0 0 0 0 0
52485- 0 0 0 0 0 0 0 0 0 0 0 0
52486- 0 0 0 0 0 0 0 0 0 0 0 0
52487- 0 0 0 0 0 0 0 0 0 0 0 0
52488- 0 0 0 0 0 0 0 0 0 14 14 14
52489- 42 42 42 90 90 90 22 22 22 2 2 6
52490- 42 42 42 2 2 6 18 18 18 218 218 218
52491-253 253 253 253 253 253 253 253 253 253 253 253
52492-253 253 253 253 253 253 253 253 253 253 253 253
52493-253 253 253 253 253 253 253 253 253 253 253 253
52494-253 253 253 253 253 253 253 253 253 253 253 253
52495-253 253 253 253 253 253 253 253 253 253 253 253
52496-253 253 253 253 253 253 250 250 250 221 221 221
52497-218 218 218 101 101 101 2 2 6 14 14 14
52498- 18 18 18 38 38 38 10 10 10 2 2 6
52499- 2 2 6 2 2 6 2 2 6 78 78 78
52500- 58 58 58 22 22 22 6 6 6 0 0 0
52501- 0 0 0 0 0 0 0 0 0 0 0 0
52502- 0 0 0 0 0 0 0 0 0 0 0 0
52503- 0 0 0 0 0 0 0 0 0 0 0 0
52504- 0 0 0 0 0 0 0 0 0 0 0 0
52505- 0 0 0 0 0 0 0 0 0 0 0 0
52506- 0 0 0 0 0 0 0 0 0 0 0 0
52507- 0 0 0 0 0 0 0 0 0 0 0 0
52508- 0 0 0 0 0 0 6 6 6 18 18 18
52509- 54 54 54 82 82 82 2 2 6 26 26 26
52510- 22 22 22 2 2 6 123 123 123 253 253 253
52511-253 253 253 253 253 253 253 253 253 253 253 253
52512-253 253 253 253 253 253 253 253 253 253 253 253
52513-253 253 253 253 253 253 253 253 253 253 253 253
52514-253 253 253 253 253 253 253 253 253 253 253 253
52515-253 253 253 253 253 253 253 253 253 253 253 253
52516-253 253 253 253 253 253 253 253 253 250 250 250
52517-238 238 238 198 198 198 6 6 6 38 38 38
52518- 58 58 58 26 26 26 38 38 38 2 2 6
52519- 2 2 6 2 2 6 2 2 6 46 46 46
52520- 78 78 78 30 30 30 10 10 10 0 0 0
52521- 0 0 0 0 0 0 0 0 0 0 0 0
52522- 0 0 0 0 0 0 0 0 0 0 0 0
52523- 0 0 0 0 0 0 0 0 0 0 0 0
52524- 0 0 0 0 0 0 0 0 0 0 0 0
52525- 0 0 0 0 0 0 0 0 0 0 0 0
52526- 0 0 0 0 0 0 0 0 0 0 0 0
52527- 0 0 0 0 0 0 0 0 0 0 0 0
52528- 0 0 0 0 0 0 10 10 10 30 30 30
52529- 74 74 74 58 58 58 2 2 6 42 42 42
52530- 2 2 6 22 22 22 231 231 231 253 253 253
52531-253 253 253 253 253 253 253 253 253 253 253 253
52532-253 253 253 253 253 253 253 253 253 250 250 250
52533-253 253 253 253 253 253 253 253 253 253 253 253
52534-253 253 253 253 253 253 253 253 253 253 253 253
52535-253 253 253 253 253 253 253 253 253 253 253 253
52536-253 253 253 253 253 253 253 253 253 253 253 253
52537-253 253 253 246 246 246 46 46 46 38 38 38
52538- 42 42 42 14 14 14 38 38 38 14 14 14
52539- 2 2 6 2 2 6 2 2 6 6 6 6
52540- 86 86 86 46 46 46 14 14 14 0 0 0
52541- 0 0 0 0 0 0 0 0 0 0 0 0
52542- 0 0 0 0 0 0 0 0 0 0 0 0
52543- 0 0 0 0 0 0 0 0 0 0 0 0
52544- 0 0 0 0 0 0 0 0 0 0 0 0
52545- 0 0 0 0 0 0 0 0 0 0 0 0
52546- 0 0 0 0 0 0 0 0 0 0 0 0
52547- 0 0 0 0 0 0 0 0 0 0 0 0
52548- 0 0 0 6 6 6 14 14 14 42 42 42
52549- 90 90 90 18 18 18 18 18 18 26 26 26
52550- 2 2 6 116 116 116 253 253 253 253 253 253
52551-253 253 253 253 253 253 253 253 253 253 253 253
52552-253 253 253 253 253 253 250 250 250 238 238 238
52553-253 253 253 253 253 253 253 253 253 253 253 253
52554-253 253 253 253 253 253 253 253 253 253 253 253
52555-253 253 253 253 253 253 253 253 253 253 253 253
52556-253 253 253 253 253 253 253 253 253 253 253 253
52557-253 253 253 253 253 253 94 94 94 6 6 6
52558- 2 2 6 2 2 6 10 10 10 34 34 34
52559- 2 2 6 2 2 6 2 2 6 2 2 6
52560- 74 74 74 58 58 58 22 22 22 6 6 6
52561- 0 0 0 0 0 0 0 0 0 0 0 0
52562- 0 0 0 0 0 0 0 0 0 0 0 0
52563- 0 0 0 0 0 0 0 0 0 0 0 0
52564- 0 0 0 0 0 0 0 0 0 0 0 0
52565- 0 0 0 0 0 0 0 0 0 0 0 0
52566- 0 0 0 0 0 0 0 0 0 0 0 0
52567- 0 0 0 0 0 0 0 0 0 0 0 0
52568- 0 0 0 10 10 10 26 26 26 66 66 66
52569- 82 82 82 2 2 6 38 38 38 6 6 6
52570- 14 14 14 210 210 210 253 253 253 253 253 253
52571-253 253 253 253 253 253 253 253 253 253 253 253
52572-253 253 253 253 253 253 246 246 246 242 242 242
52573-253 253 253 253 253 253 253 253 253 253 253 253
52574-253 253 253 253 253 253 253 253 253 253 253 253
52575-253 253 253 253 253 253 253 253 253 253 253 253
52576-253 253 253 253 253 253 253 253 253 253 253 253
52577-253 253 253 253 253 253 144 144 144 2 2 6
52578- 2 2 6 2 2 6 2 2 6 46 46 46
52579- 2 2 6 2 2 6 2 2 6 2 2 6
52580- 42 42 42 74 74 74 30 30 30 10 10 10
52581- 0 0 0 0 0 0 0 0 0 0 0 0
52582- 0 0 0 0 0 0 0 0 0 0 0 0
52583- 0 0 0 0 0 0 0 0 0 0 0 0
52584- 0 0 0 0 0 0 0 0 0 0 0 0
52585- 0 0 0 0 0 0 0 0 0 0 0 0
52586- 0 0 0 0 0 0 0 0 0 0 0 0
52587- 0 0 0 0 0 0 0 0 0 0 0 0
52588- 6 6 6 14 14 14 42 42 42 90 90 90
52589- 26 26 26 6 6 6 42 42 42 2 2 6
52590- 74 74 74 250 250 250 253 253 253 253 253 253
52591-253 253 253 253 253 253 253 253 253 253 253 253
52592-253 253 253 253 253 253 242 242 242 242 242 242
52593-253 253 253 253 253 253 253 253 253 253 253 253
52594-253 253 253 253 253 253 253 253 253 253 253 253
52595-253 253 253 253 253 253 253 253 253 253 253 253
52596-253 253 253 253 253 253 253 253 253 253 253 253
52597-253 253 253 253 253 253 182 182 182 2 2 6
52598- 2 2 6 2 2 6 2 2 6 46 46 46
52599- 2 2 6 2 2 6 2 2 6 2 2 6
52600- 10 10 10 86 86 86 38 38 38 10 10 10
52601- 0 0 0 0 0 0 0 0 0 0 0 0
52602- 0 0 0 0 0 0 0 0 0 0 0 0
52603- 0 0 0 0 0 0 0 0 0 0 0 0
52604- 0 0 0 0 0 0 0 0 0 0 0 0
52605- 0 0 0 0 0 0 0 0 0 0 0 0
52606- 0 0 0 0 0 0 0 0 0 0 0 0
52607- 0 0 0 0 0 0 0 0 0 0 0 0
52608- 10 10 10 26 26 26 66 66 66 82 82 82
52609- 2 2 6 22 22 22 18 18 18 2 2 6
52610-149 149 149 253 253 253 253 253 253 253 253 253
52611-253 253 253 253 253 253 253 253 253 253 253 253
52612-253 253 253 253 253 253 234 234 234 242 242 242
52613-253 253 253 253 253 253 253 253 253 253 253 253
52614-253 253 253 253 253 253 253 253 253 253 253 253
52615-253 253 253 253 253 253 253 253 253 253 253 253
52616-253 253 253 253 253 253 253 253 253 253 253 253
52617-253 253 253 253 253 253 206 206 206 2 2 6
52618- 2 2 6 2 2 6 2 2 6 38 38 38
52619- 2 2 6 2 2 6 2 2 6 2 2 6
52620- 6 6 6 86 86 86 46 46 46 14 14 14
52621- 0 0 0 0 0 0 0 0 0 0 0 0
52622- 0 0 0 0 0 0 0 0 0 0 0 0
52623- 0 0 0 0 0 0 0 0 0 0 0 0
52624- 0 0 0 0 0 0 0 0 0 0 0 0
52625- 0 0 0 0 0 0 0 0 0 0 0 0
52626- 0 0 0 0 0 0 0 0 0 0 0 0
52627- 0 0 0 0 0 0 0 0 0 6 6 6
52628- 18 18 18 46 46 46 86 86 86 18 18 18
52629- 2 2 6 34 34 34 10 10 10 6 6 6
52630-210 210 210 253 253 253 253 253 253 253 253 253
52631-253 253 253 253 253 253 253 253 253 253 253 253
52632-253 253 253 253 253 253 234 234 234 242 242 242
52633-253 253 253 253 253 253 253 253 253 253 253 253
52634-253 253 253 253 253 253 253 253 253 253 253 253
52635-253 253 253 253 253 253 253 253 253 253 253 253
52636-253 253 253 253 253 253 253 253 253 253 253 253
52637-253 253 253 253 253 253 221 221 221 6 6 6
52638- 2 2 6 2 2 6 6 6 6 30 30 30
52639- 2 2 6 2 2 6 2 2 6 2 2 6
52640- 2 2 6 82 82 82 54 54 54 18 18 18
52641- 6 6 6 0 0 0 0 0 0 0 0 0
52642- 0 0 0 0 0 0 0 0 0 0 0 0
52643- 0 0 0 0 0 0 0 0 0 0 0 0
52644- 0 0 0 0 0 0 0 0 0 0 0 0
52645- 0 0 0 0 0 0 0 0 0 0 0 0
52646- 0 0 0 0 0 0 0 0 0 0 0 0
52647- 0 0 0 0 0 0 0 0 0 10 10 10
52648- 26 26 26 66 66 66 62 62 62 2 2 6
52649- 2 2 6 38 38 38 10 10 10 26 26 26
52650-238 238 238 253 253 253 253 253 253 253 253 253
52651-253 253 253 253 253 253 253 253 253 253 253 253
52652-253 253 253 253 253 253 231 231 231 238 238 238
52653-253 253 253 253 253 253 253 253 253 253 253 253
52654-253 253 253 253 253 253 253 253 253 253 253 253
52655-253 253 253 253 253 253 253 253 253 253 253 253
52656-253 253 253 253 253 253 253 253 253 253 253 253
52657-253 253 253 253 253 253 231 231 231 6 6 6
52658- 2 2 6 2 2 6 10 10 10 30 30 30
52659- 2 2 6 2 2 6 2 2 6 2 2 6
52660- 2 2 6 66 66 66 58 58 58 22 22 22
52661- 6 6 6 0 0 0 0 0 0 0 0 0
52662- 0 0 0 0 0 0 0 0 0 0 0 0
52663- 0 0 0 0 0 0 0 0 0 0 0 0
52664- 0 0 0 0 0 0 0 0 0 0 0 0
52665- 0 0 0 0 0 0 0 0 0 0 0 0
52666- 0 0 0 0 0 0 0 0 0 0 0 0
52667- 0 0 0 0 0 0 0 0 0 10 10 10
52668- 38 38 38 78 78 78 6 6 6 2 2 6
52669- 2 2 6 46 46 46 14 14 14 42 42 42
52670-246 246 246 253 253 253 253 253 253 253 253 253
52671-253 253 253 253 253 253 253 253 253 253 253 253
52672-253 253 253 253 253 253 231 231 231 242 242 242
52673-253 253 253 253 253 253 253 253 253 253 253 253
52674-253 253 253 253 253 253 253 253 253 253 253 253
52675-253 253 253 253 253 253 253 253 253 253 253 253
52676-253 253 253 253 253 253 253 253 253 253 253 253
52677-253 253 253 253 253 253 234 234 234 10 10 10
52678- 2 2 6 2 2 6 22 22 22 14 14 14
52679- 2 2 6 2 2 6 2 2 6 2 2 6
52680- 2 2 6 66 66 66 62 62 62 22 22 22
52681- 6 6 6 0 0 0 0 0 0 0 0 0
52682- 0 0 0 0 0 0 0 0 0 0 0 0
52683- 0 0 0 0 0 0 0 0 0 0 0 0
52684- 0 0 0 0 0 0 0 0 0 0 0 0
52685- 0 0 0 0 0 0 0 0 0 0 0 0
52686- 0 0 0 0 0 0 0 0 0 0 0 0
52687- 0 0 0 0 0 0 6 6 6 18 18 18
52688- 50 50 50 74 74 74 2 2 6 2 2 6
52689- 14 14 14 70 70 70 34 34 34 62 62 62
52690-250 250 250 253 253 253 253 253 253 253 253 253
52691-253 253 253 253 253 253 253 253 253 253 253 253
52692-253 253 253 253 253 253 231 231 231 246 246 246
52693-253 253 253 253 253 253 253 253 253 253 253 253
52694-253 253 253 253 253 253 253 253 253 253 253 253
52695-253 253 253 253 253 253 253 253 253 253 253 253
52696-253 253 253 253 253 253 253 253 253 253 253 253
52697-253 253 253 253 253 253 234 234 234 14 14 14
52698- 2 2 6 2 2 6 30 30 30 2 2 6
52699- 2 2 6 2 2 6 2 2 6 2 2 6
52700- 2 2 6 66 66 66 62 62 62 22 22 22
52701- 6 6 6 0 0 0 0 0 0 0 0 0
52702- 0 0 0 0 0 0 0 0 0 0 0 0
52703- 0 0 0 0 0 0 0 0 0 0 0 0
52704- 0 0 0 0 0 0 0 0 0 0 0 0
52705- 0 0 0 0 0 0 0 0 0 0 0 0
52706- 0 0 0 0 0 0 0 0 0 0 0 0
52707- 0 0 0 0 0 0 6 6 6 18 18 18
52708- 54 54 54 62 62 62 2 2 6 2 2 6
52709- 2 2 6 30 30 30 46 46 46 70 70 70
52710-250 250 250 253 253 253 253 253 253 253 253 253
52711-253 253 253 253 253 253 253 253 253 253 253 253
52712-253 253 253 253 253 253 231 231 231 246 246 246
52713-253 253 253 253 253 253 253 253 253 253 253 253
52714-253 253 253 253 253 253 253 253 253 253 253 253
52715-253 253 253 253 253 253 253 253 253 253 253 253
52716-253 253 253 253 253 253 253 253 253 253 253 253
52717-253 253 253 253 253 253 226 226 226 10 10 10
52718- 2 2 6 6 6 6 30 30 30 2 2 6
52719- 2 2 6 2 2 6 2 2 6 2 2 6
52720- 2 2 6 66 66 66 58 58 58 22 22 22
52721- 6 6 6 0 0 0 0 0 0 0 0 0
52722- 0 0 0 0 0 0 0 0 0 0 0 0
52723- 0 0 0 0 0 0 0 0 0 0 0 0
52724- 0 0 0 0 0 0 0 0 0 0 0 0
52725- 0 0 0 0 0 0 0 0 0 0 0 0
52726- 0 0 0 0 0 0 0 0 0 0 0 0
52727- 0 0 0 0 0 0 6 6 6 22 22 22
52728- 58 58 58 62 62 62 2 2 6 2 2 6
52729- 2 2 6 2 2 6 30 30 30 78 78 78
52730-250 250 250 253 253 253 253 253 253 253 253 253
52731-253 253 253 253 253 253 253 253 253 253 253 253
52732-253 253 253 253 253 253 231 231 231 246 246 246
52733-253 253 253 253 253 253 253 253 253 253 253 253
52734-253 253 253 253 253 253 253 253 253 253 253 253
52735-253 253 253 253 253 253 253 253 253 253 253 253
52736-253 253 253 253 253 253 253 253 253 253 253 253
52737-253 253 253 253 253 253 206 206 206 2 2 6
52738- 22 22 22 34 34 34 18 14 6 22 22 22
52739- 26 26 26 18 18 18 6 6 6 2 2 6
52740- 2 2 6 82 82 82 54 54 54 18 18 18
52741- 6 6 6 0 0 0 0 0 0 0 0 0
52742- 0 0 0 0 0 0 0 0 0 0 0 0
52743- 0 0 0 0 0 0 0 0 0 0 0 0
52744- 0 0 0 0 0 0 0 0 0 0 0 0
52745- 0 0 0 0 0 0 0 0 0 0 0 0
52746- 0 0 0 0 0 0 0 0 0 0 0 0
52747- 0 0 0 0 0 0 6 6 6 26 26 26
52748- 62 62 62 106 106 106 74 54 14 185 133 11
52749-210 162 10 121 92 8 6 6 6 62 62 62
52750-238 238 238 253 253 253 253 253 253 253 253 253
52751-253 253 253 253 253 253 253 253 253 253 253 253
52752-253 253 253 253 253 253 231 231 231 246 246 246
52753-253 253 253 253 253 253 253 253 253 253 253 253
52754-253 253 253 253 253 253 253 253 253 253 253 253
52755-253 253 253 253 253 253 253 253 253 253 253 253
52756-253 253 253 253 253 253 253 253 253 253 253 253
52757-253 253 253 253 253 253 158 158 158 18 18 18
52758- 14 14 14 2 2 6 2 2 6 2 2 6
52759- 6 6 6 18 18 18 66 66 66 38 38 38
52760- 6 6 6 94 94 94 50 50 50 18 18 18
52761- 6 6 6 0 0 0 0 0 0 0 0 0
52762- 0 0 0 0 0 0 0 0 0 0 0 0
52763- 0 0 0 0 0 0 0 0 0 0 0 0
52764- 0 0 0 0 0 0 0 0 0 0 0 0
52765- 0 0 0 0 0 0 0 0 0 0 0 0
52766- 0 0 0 0 0 0 0 0 0 6 6 6
52767- 10 10 10 10 10 10 18 18 18 38 38 38
52768- 78 78 78 142 134 106 216 158 10 242 186 14
52769-246 190 14 246 190 14 156 118 10 10 10 10
52770- 90 90 90 238 238 238 253 253 253 253 253 253
52771-253 253 253 253 253 253 253 253 253 253 253 253
52772-253 253 253 253 253 253 231 231 231 250 250 250
52773-253 253 253 253 253 253 253 253 253 253 253 253
52774-253 253 253 253 253 253 253 253 253 253 253 253
52775-253 253 253 253 253 253 253 253 253 253 253 253
52776-253 253 253 253 253 253 253 253 253 246 230 190
52777-238 204 91 238 204 91 181 142 44 37 26 9
52778- 2 2 6 2 2 6 2 2 6 2 2 6
52779- 2 2 6 2 2 6 38 38 38 46 46 46
52780- 26 26 26 106 106 106 54 54 54 18 18 18
52781- 6 6 6 0 0 0 0 0 0 0 0 0
52782- 0 0 0 0 0 0 0 0 0 0 0 0
52783- 0 0 0 0 0 0 0 0 0 0 0 0
52784- 0 0 0 0 0 0 0 0 0 0 0 0
52785- 0 0 0 0 0 0 0 0 0 0 0 0
52786- 0 0 0 6 6 6 14 14 14 22 22 22
52787- 30 30 30 38 38 38 50 50 50 70 70 70
52788-106 106 106 190 142 34 226 170 11 242 186 14
52789-246 190 14 246 190 14 246 190 14 154 114 10
52790- 6 6 6 74 74 74 226 226 226 253 253 253
52791-253 253 253 253 253 253 253 253 253 253 253 253
52792-253 253 253 253 253 253 231 231 231 250 250 250
52793-253 253 253 253 253 253 253 253 253 253 253 253
52794-253 253 253 253 253 253 253 253 253 253 253 253
52795-253 253 253 253 253 253 253 253 253 253 253 253
52796-253 253 253 253 253 253 253 253 253 228 184 62
52797-241 196 14 241 208 19 232 195 16 38 30 10
52798- 2 2 6 2 2 6 2 2 6 2 2 6
52799- 2 2 6 6 6 6 30 30 30 26 26 26
52800-203 166 17 154 142 90 66 66 66 26 26 26
52801- 6 6 6 0 0 0 0 0 0 0 0 0
52802- 0 0 0 0 0 0 0 0 0 0 0 0
52803- 0 0 0 0 0 0 0 0 0 0 0 0
52804- 0 0 0 0 0 0 0 0 0 0 0 0
52805- 0 0 0 0 0 0 0 0 0 0 0 0
52806- 6 6 6 18 18 18 38 38 38 58 58 58
52807- 78 78 78 86 86 86 101 101 101 123 123 123
52808-175 146 61 210 150 10 234 174 13 246 186 14
52809-246 190 14 246 190 14 246 190 14 238 190 10
52810-102 78 10 2 2 6 46 46 46 198 198 198
52811-253 253 253 253 253 253 253 253 253 253 253 253
52812-253 253 253 253 253 253 234 234 234 242 242 242
52813-253 253 253 253 253 253 253 253 253 253 253 253
52814-253 253 253 253 253 253 253 253 253 253 253 253
52815-253 253 253 253 253 253 253 253 253 253 253 253
52816-253 253 253 253 253 253 253 253 253 224 178 62
52817-242 186 14 241 196 14 210 166 10 22 18 6
52818- 2 2 6 2 2 6 2 2 6 2 2 6
52819- 2 2 6 2 2 6 6 6 6 121 92 8
52820-238 202 15 232 195 16 82 82 82 34 34 34
52821- 10 10 10 0 0 0 0 0 0 0 0 0
52822- 0 0 0 0 0 0 0 0 0 0 0 0
52823- 0 0 0 0 0 0 0 0 0 0 0 0
52824- 0 0 0 0 0 0 0 0 0 0 0 0
52825- 0 0 0 0 0 0 0 0 0 0 0 0
52826- 14 14 14 38 38 38 70 70 70 154 122 46
52827-190 142 34 200 144 11 197 138 11 197 138 11
52828-213 154 11 226 170 11 242 186 14 246 190 14
52829-246 190 14 246 190 14 246 190 14 246 190 14
52830-225 175 15 46 32 6 2 2 6 22 22 22
52831-158 158 158 250 250 250 253 253 253 253 253 253
52832-253 253 253 253 253 253 253 253 253 253 253 253
52833-253 253 253 253 253 253 253 253 253 253 253 253
52834-253 253 253 253 253 253 253 253 253 253 253 253
52835-253 253 253 253 253 253 253 253 253 253 253 253
52836-253 253 253 250 250 250 242 242 242 224 178 62
52837-239 182 13 236 186 11 213 154 11 46 32 6
52838- 2 2 6 2 2 6 2 2 6 2 2 6
52839- 2 2 6 2 2 6 61 42 6 225 175 15
52840-238 190 10 236 186 11 112 100 78 42 42 42
52841- 14 14 14 0 0 0 0 0 0 0 0 0
52842- 0 0 0 0 0 0 0 0 0 0 0 0
52843- 0 0 0 0 0 0 0 0 0 0 0 0
52844- 0 0 0 0 0 0 0 0 0 0 0 0
52845- 0 0 0 0 0 0 0 0 0 6 6 6
52846- 22 22 22 54 54 54 154 122 46 213 154 11
52847-226 170 11 230 174 11 226 170 11 226 170 11
52848-236 178 12 242 186 14 246 190 14 246 190 14
52849-246 190 14 246 190 14 246 190 14 246 190 14
52850-241 196 14 184 144 12 10 10 10 2 2 6
52851- 6 6 6 116 116 116 242 242 242 253 253 253
52852-253 253 253 253 253 253 253 253 253 253 253 253
52853-253 253 253 253 253 253 253 253 253 253 253 253
52854-253 253 253 253 253 253 253 253 253 253 253 253
52855-253 253 253 253 253 253 253 253 253 253 253 253
52856-253 253 253 231 231 231 198 198 198 214 170 54
52857-236 178 12 236 178 12 210 150 10 137 92 6
52858- 18 14 6 2 2 6 2 2 6 2 2 6
52859- 6 6 6 70 47 6 200 144 11 236 178 12
52860-239 182 13 239 182 13 124 112 88 58 58 58
52861- 22 22 22 6 6 6 0 0 0 0 0 0
52862- 0 0 0 0 0 0 0 0 0 0 0 0
52863- 0 0 0 0 0 0 0 0 0 0 0 0
52864- 0 0 0 0 0 0 0 0 0 0 0 0
52865- 0 0 0 0 0 0 0 0 0 10 10 10
52866- 30 30 30 70 70 70 180 133 36 226 170 11
52867-239 182 13 242 186 14 242 186 14 246 186 14
52868-246 190 14 246 190 14 246 190 14 246 190 14
52869-246 190 14 246 190 14 246 190 14 246 190 14
52870-246 190 14 232 195 16 98 70 6 2 2 6
52871- 2 2 6 2 2 6 66 66 66 221 221 221
52872-253 253 253 253 253 253 253 253 253 253 253 253
52873-253 253 253 253 253 253 253 253 253 253 253 253
52874-253 253 253 253 253 253 253 253 253 253 253 253
52875-253 253 253 253 253 253 253 253 253 253 253 253
52876-253 253 253 206 206 206 198 198 198 214 166 58
52877-230 174 11 230 174 11 216 158 10 192 133 9
52878-163 110 8 116 81 8 102 78 10 116 81 8
52879-167 114 7 197 138 11 226 170 11 239 182 13
52880-242 186 14 242 186 14 162 146 94 78 78 78
52881- 34 34 34 14 14 14 6 6 6 0 0 0
52882- 0 0 0 0 0 0 0 0 0 0 0 0
52883- 0 0 0 0 0 0 0 0 0 0 0 0
52884- 0 0 0 0 0 0 0 0 0 0 0 0
52885- 0 0 0 0 0 0 0 0 0 6 6 6
52886- 30 30 30 78 78 78 190 142 34 226 170 11
52887-239 182 13 246 190 14 246 190 14 246 190 14
52888-246 190 14 246 190 14 246 190 14 246 190 14
52889-246 190 14 246 190 14 246 190 14 246 190 14
52890-246 190 14 241 196 14 203 166 17 22 18 6
52891- 2 2 6 2 2 6 2 2 6 38 38 38
52892-218 218 218 253 253 253 253 253 253 253 253 253
52893-253 253 253 253 253 253 253 253 253 253 253 253
52894-253 253 253 253 253 253 253 253 253 253 253 253
52895-253 253 253 253 253 253 253 253 253 253 253 253
52896-250 250 250 206 206 206 198 198 198 202 162 69
52897-226 170 11 236 178 12 224 166 10 210 150 10
52898-200 144 11 197 138 11 192 133 9 197 138 11
52899-210 150 10 226 170 11 242 186 14 246 190 14
52900-246 190 14 246 186 14 225 175 15 124 112 88
52901- 62 62 62 30 30 30 14 14 14 6 6 6
52902- 0 0 0 0 0 0 0 0 0 0 0 0
52903- 0 0 0 0 0 0 0 0 0 0 0 0
52904- 0 0 0 0 0 0 0 0 0 0 0 0
52905- 0 0 0 0 0 0 0 0 0 10 10 10
52906- 30 30 30 78 78 78 174 135 50 224 166 10
52907-239 182 13 246 190 14 246 190 14 246 190 14
52908-246 190 14 246 190 14 246 190 14 246 190 14
52909-246 190 14 246 190 14 246 190 14 246 190 14
52910-246 190 14 246 190 14 241 196 14 139 102 15
52911- 2 2 6 2 2 6 2 2 6 2 2 6
52912- 78 78 78 250 250 250 253 253 253 253 253 253
52913-253 253 253 253 253 253 253 253 253 253 253 253
52914-253 253 253 253 253 253 253 253 253 253 253 253
52915-253 253 253 253 253 253 253 253 253 253 253 253
52916-250 250 250 214 214 214 198 198 198 190 150 46
52917-219 162 10 236 178 12 234 174 13 224 166 10
52918-216 158 10 213 154 11 213 154 11 216 158 10
52919-226 170 11 239 182 13 246 190 14 246 190 14
52920-246 190 14 246 190 14 242 186 14 206 162 42
52921-101 101 101 58 58 58 30 30 30 14 14 14
52922- 6 6 6 0 0 0 0 0 0 0 0 0
52923- 0 0 0 0 0 0 0 0 0 0 0 0
52924- 0 0 0 0 0 0 0 0 0 0 0 0
52925- 0 0 0 0 0 0 0 0 0 10 10 10
52926- 30 30 30 74 74 74 174 135 50 216 158 10
52927-236 178 12 246 190 14 246 190 14 246 190 14
52928-246 190 14 246 190 14 246 190 14 246 190 14
52929-246 190 14 246 190 14 246 190 14 246 190 14
52930-246 190 14 246 190 14 241 196 14 226 184 13
52931- 61 42 6 2 2 6 2 2 6 2 2 6
52932- 22 22 22 238 238 238 253 253 253 253 253 253
52933-253 253 253 253 253 253 253 253 253 253 253 253
52934-253 253 253 253 253 253 253 253 253 253 253 253
52935-253 253 253 253 253 253 253 253 253 253 253 253
52936-253 253 253 226 226 226 187 187 187 180 133 36
52937-216 158 10 236 178 12 239 182 13 236 178 12
52938-230 174 11 226 170 11 226 170 11 230 174 11
52939-236 178 12 242 186 14 246 190 14 246 190 14
52940-246 190 14 246 190 14 246 186 14 239 182 13
52941-206 162 42 106 106 106 66 66 66 34 34 34
52942- 14 14 14 6 6 6 0 0 0 0 0 0
52943- 0 0 0 0 0 0 0 0 0 0 0 0
52944- 0 0 0 0 0 0 0 0 0 0 0 0
52945- 0 0 0 0 0 0 0 0 0 6 6 6
52946- 26 26 26 70 70 70 163 133 67 213 154 11
52947-236 178 12 246 190 14 246 190 14 246 190 14
52948-246 190 14 246 190 14 246 190 14 246 190 14
52949-246 190 14 246 190 14 246 190 14 246 190 14
52950-246 190 14 246 190 14 246 190 14 241 196 14
52951-190 146 13 18 14 6 2 2 6 2 2 6
52952- 46 46 46 246 246 246 253 253 253 253 253 253
52953-253 253 253 253 253 253 253 253 253 253 253 253
52954-253 253 253 253 253 253 253 253 253 253 253 253
52955-253 253 253 253 253 253 253 253 253 253 253 253
52956-253 253 253 221 221 221 86 86 86 156 107 11
52957-216 158 10 236 178 12 242 186 14 246 186 14
52958-242 186 14 239 182 13 239 182 13 242 186 14
52959-242 186 14 246 186 14 246 190 14 246 190 14
52960-246 190 14 246 190 14 246 190 14 246 190 14
52961-242 186 14 225 175 15 142 122 72 66 66 66
52962- 30 30 30 10 10 10 0 0 0 0 0 0
52963- 0 0 0 0 0 0 0 0 0 0 0 0
52964- 0 0 0 0 0 0 0 0 0 0 0 0
52965- 0 0 0 0 0 0 0 0 0 6 6 6
52966- 26 26 26 70 70 70 163 133 67 210 150 10
52967-236 178 12 246 190 14 246 190 14 246 190 14
52968-246 190 14 246 190 14 246 190 14 246 190 14
52969-246 190 14 246 190 14 246 190 14 246 190 14
52970-246 190 14 246 190 14 246 190 14 246 190 14
52971-232 195 16 121 92 8 34 34 34 106 106 106
52972-221 221 221 253 253 253 253 253 253 253 253 253
52973-253 253 253 253 253 253 253 253 253 253 253 253
52974-253 253 253 253 253 253 253 253 253 253 253 253
52975-253 253 253 253 253 253 253 253 253 253 253 253
52976-242 242 242 82 82 82 18 14 6 163 110 8
52977-216 158 10 236 178 12 242 186 14 246 190 14
52978-246 190 14 246 190 14 246 190 14 246 190 14
52979-246 190 14 246 190 14 246 190 14 246 190 14
52980-246 190 14 246 190 14 246 190 14 246 190 14
52981-246 190 14 246 190 14 242 186 14 163 133 67
52982- 46 46 46 18 18 18 6 6 6 0 0 0
52983- 0 0 0 0 0 0 0 0 0 0 0 0
52984- 0 0 0 0 0 0 0 0 0 0 0 0
52985- 0 0 0 0 0 0 0 0 0 10 10 10
52986- 30 30 30 78 78 78 163 133 67 210 150 10
52987-236 178 12 246 186 14 246 190 14 246 190 14
52988-246 190 14 246 190 14 246 190 14 246 190 14
52989-246 190 14 246 190 14 246 190 14 246 190 14
52990-246 190 14 246 190 14 246 190 14 246 190 14
52991-241 196 14 215 174 15 190 178 144 253 253 253
52992-253 253 253 253 253 253 253 253 253 253 253 253
52993-253 253 253 253 253 253 253 253 253 253 253 253
52994-253 253 253 253 253 253 253 253 253 253 253 253
52995-253 253 253 253 253 253 253 253 253 218 218 218
52996- 58 58 58 2 2 6 22 18 6 167 114 7
52997-216 158 10 236 178 12 246 186 14 246 190 14
52998-246 190 14 246 190 14 246 190 14 246 190 14
52999-246 190 14 246 190 14 246 190 14 246 190 14
53000-246 190 14 246 190 14 246 190 14 246 190 14
53001-246 190 14 246 186 14 242 186 14 190 150 46
53002- 54 54 54 22 22 22 6 6 6 0 0 0
53003- 0 0 0 0 0 0 0 0 0 0 0 0
53004- 0 0 0 0 0 0 0 0 0 0 0 0
53005- 0 0 0 0 0 0 0 0 0 14 14 14
53006- 38 38 38 86 86 86 180 133 36 213 154 11
53007-236 178 12 246 186 14 246 190 14 246 190 14
53008-246 190 14 246 190 14 246 190 14 246 190 14
53009-246 190 14 246 190 14 246 190 14 246 190 14
53010-246 190 14 246 190 14 246 190 14 246 190 14
53011-246 190 14 232 195 16 190 146 13 214 214 214
53012-253 253 253 253 253 253 253 253 253 253 253 253
53013-253 253 253 253 253 253 253 253 253 253 253 253
53014-253 253 253 253 253 253 253 253 253 253 253 253
53015-253 253 253 250 250 250 170 170 170 26 26 26
53016- 2 2 6 2 2 6 37 26 9 163 110 8
53017-219 162 10 239 182 13 246 186 14 246 190 14
53018-246 190 14 246 190 14 246 190 14 246 190 14
53019-246 190 14 246 190 14 246 190 14 246 190 14
53020-246 190 14 246 190 14 246 190 14 246 190 14
53021-246 186 14 236 178 12 224 166 10 142 122 72
53022- 46 46 46 18 18 18 6 6 6 0 0 0
53023- 0 0 0 0 0 0 0 0 0 0 0 0
53024- 0 0 0 0 0 0 0 0 0 0 0 0
53025- 0 0 0 0 0 0 6 6 6 18 18 18
53026- 50 50 50 109 106 95 192 133 9 224 166 10
53027-242 186 14 246 190 14 246 190 14 246 190 14
53028-246 190 14 246 190 14 246 190 14 246 190 14
53029-246 190 14 246 190 14 246 190 14 246 190 14
53030-246 190 14 246 190 14 246 190 14 246 190 14
53031-242 186 14 226 184 13 210 162 10 142 110 46
53032-226 226 226 253 253 253 253 253 253 253 253 253
53033-253 253 253 253 253 253 253 253 253 253 253 253
53034-253 253 253 253 253 253 253 253 253 253 253 253
53035-198 198 198 66 66 66 2 2 6 2 2 6
53036- 2 2 6 2 2 6 50 34 6 156 107 11
53037-219 162 10 239 182 13 246 186 14 246 190 14
53038-246 190 14 246 190 14 246 190 14 246 190 14
53039-246 190 14 246 190 14 246 190 14 246 190 14
53040-246 190 14 246 190 14 246 190 14 242 186 14
53041-234 174 13 213 154 11 154 122 46 66 66 66
53042- 30 30 30 10 10 10 0 0 0 0 0 0
53043- 0 0 0 0 0 0 0 0 0 0 0 0
53044- 0 0 0 0 0 0 0 0 0 0 0 0
53045- 0 0 0 0 0 0 6 6 6 22 22 22
53046- 58 58 58 154 121 60 206 145 10 234 174 13
53047-242 186 14 246 186 14 246 190 14 246 190 14
53048-246 190 14 246 190 14 246 190 14 246 190 14
53049-246 190 14 246 190 14 246 190 14 246 190 14
53050-246 190 14 246 190 14 246 190 14 246 190 14
53051-246 186 14 236 178 12 210 162 10 163 110 8
53052- 61 42 6 138 138 138 218 218 218 250 250 250
53053-253 253 253 253 253 253 253 253 253 250 250 250
53054-242 242 242 210 210 210 144 144 144 66 66 66
53055- 6 6 6 2 2 6 2 2 6 2 2 6
53056- 2 2 6 2 2 6 61 42 6 163 110 8
53057-216 158 10 236 178 12 246 190 14 246 190 14
53058-246 190 14 246 190 14 246 190 14 246 190 14
53059-246 190 14 246 190 14 246 190 14 246 190 14
53060-246 190 14 239 182 13 230 174 11 216 158 10
53061-190 142 34 124 112 88 70 70 70 38 38 38
53062- 18 18 18 6 6 6 0 0 0 0 0 0
53063- 0 0 0 0 0 0 0 0 0 0 0 0
53064- 0 0 0 0 0 0 0 0 0 0 0 0
53065- 0 0 0 0 0 0 6 6 6 22 22 22
53066- 62 62 62 168 124 44 206 145 10 224 166 10
53067-236 178 12 239 182 13 242 186 14 242 186 14
53068-246 186 14 246 190 14 246 190 14 246 190 14
53069-246 190 14 246 190 14 246 190 14 246 190 14
53070-246 190 14 246 190 14 246 190 14 246 190 14
53071-246 190 14 236 178 12 216 158 10 175 118 6
53072- 80 54 7 2 2 6 6 6 6 30 30 30
53073- 54 54 54 62 62 62 50 50 50 38 38 38
53074- 14 14 14 2 2 6 2 2 6 2 2 6
53075- 2 2 6 2 2 6 2 2 6 2 2 6
53076- 2 2 6 6 6 6 80 54 7 167 114 7
53077-213 154 11 236 178 12 246 190 14 246 190 14
53078-246 190 14 246 190 14 246 190 14 246 190 14
53079-246 190 14 242 186 14 239 182 13 239 182 13
53080-230 174 11 210 150 10 174 135 50 124 112 88
53081- 82 82 82 54 54 54 34 34 34 18 18 18
53082- 6 6 6 0 0 0 0 0 0 0 0 0
53083- 0 0 0 0 0 0 0 0 0 0 0 0
53084- 0 0 0 0 0 0 0 0 0 0 0 0
53085- 0 0 0 0 0 0 6 6 6 18 18 18
53086- 50 50 50 158 118 36 192 133 9 200 144 11
53087-216 158 10 219 162 10 224 166 10 226 170 11
53088-230 174 11 236 178 12 239 182 13 239 182 13
53089-242 186 14 246 186 14 246 190 14 246 190 14
53090-246 190 14 246 190 14 246 190 14 246 190 14
53091-246 186 14 230 174 11 210 150 10 163 110 8
53092-104 69 6 10 10 10 2 2 6 2 2 6
53093- 2 2 6 2 2 6 2 2 6 2 2 6
53094- 2 2 6 2 2 6 2 2 6 2 2 6
53095- 2 2 6 2 2 6 2 2 6 2 2 6
53096- 2 2 6 6 6 6 91 60 6 167 114 7
53097-206 145 10 230 174 11 242 186 14 246 190 14
53098-246 190 14 246 190 14 246 186 14 242 186 14
53099-239 182 13 230 174 11 224 166 10 213 154 11
53100-180 133 36 124 112 88 86 86 86 58 58 58
53101- 38 38 38 22 22 22 10 10 10 6 6 6
53102- 0 0 0 0 0 0 0 0 0 0 0 0
53103- 0 0 0 0 0 0 0 0 0 0 0 0
53104- 0 0 0 0 0 0 0 0 0 0 0 0
53105- 0 0 0 0 0 0 0 0 0 14 14 14
53106- 34 34 34 70 70 70 138 110 50 158 118 36
53107-167 114 7 180 123 7 192 133 9 197 138 11
53108-200 144 11 206 145 10 213 154 11 219 162 10
53109-224 166 10 230 174 11 239 182 13 242 186 14
53110-246 186 14 246 186 14 246 186 14 246 186 14
53111-239 182 13 216 158 10 185 133 11 152 99 6
53112-104 69 6 18 14 6 2 2 6 2 2 6
53113- 2 2 6 2 2 6 2 2 6 2 2 6
53114- 2 2 6 2 2 6 2 2 6 2 2 6
53115- 2 2 6 2 2 6 2 2 6 2 2 6
53116- 2 2 6 6 6 6 80 54 7 152 99 6
53117-192 133 9 219 162 10 236 178 12 239 182 13
53118-246 186 14 242 186 14 239 182 13 236 178 12
53119-224 166 10 206 145 10 192 133 9 154 121 60
53120- 94 94 94 62 62 62 42 42 42 22 22 22
53121- 14 14 14 6 6 6 0 0 0 0 0 0
53122- 0 0 0 0 0 0 0 0 0 0 0 0
53123- 0 0 0 0 0 0 0 0 0 0 0 0
53124- 0 0 0 0 0 0 0 0 0 0 0 0
53125- 0 0 0 0 0 0 0 0 0 6 6 6
53126- 18 18 18 34 34 34 58 58 58 78 78 78
53127-101 98 89 124 112 88 142 110 46 156 107 11
53128-163 110 8 167 114 7 175 118 6 180 123 7
53129-185 133 11 197 138 11 210 150 10 219 162 10
53130-226 170 11 236 178 12 236 178 12 234 174 13
53131-219 162 10 197 138 11 163 110 8 130 83 6
53132- 91 60 6 10 10 10 2 2 6 2 2 6
53133- 18 18 18 38 38 38 38 38 38 38 38 38
53134- 38 38 38 38 38 38 38 38 38 38 38 38
53135- 38 38 38 38 38 38 26 26 26 2 2 6
53136- 2 2 6 6 6 6 70 47 6 137 92 6
53137-175 118 6 200 144 11 219 162 10 230 174 11
53138-234 174 13 230 174 11 219 162 10 210 150 10
53139-192 133 9 163 110 8 124 112 88 82 82 82
53140- 50 50 50 30 30 30 14 14 14 6 6 6
53141- 0 0 0 0 0 0 0 0 0 0 0 0
53142- 0 0 0 0 0 0 0 0 0 0 0 0
53143- 0 0 0 0 0 0 0 0 0 0 0 0
53144- 0 0 0 0 0 0 0 0 0 0 0 0
53145- 0 0 0 0 0 0 0 0 0 0 0 0
53146- 6 6 6 14 14 14 22 22 22 34 34 34
53147- 42 42 42 58 58 58 74 74 74 86 86 86
53148-101 98 89 122 102 70 130 98 46 121 87 25
53149-137 92 6 152 99 6 163 110 8 180 123 7
53150-185 133 11 197 138 11 206 145 10 200 144 11
53151-180 123 7 156 107 11 130 83 6 104 69 6
53152- 50 34 6 54 54 54 110 110 110 101 98 89
53153- 86 86 86 82 82 82 78 78 78 78 78 78
53154- 78 78 78 78 78 78 78 78 78 78 78 78
53155- 78 78 78 82 82 82 86 86 86 94 94 94
53156-106 106 106 101 101 101 86 66 34 124 80 6
53157-156 107 11 180 123 7 192 133 9 200 144 11
53158-206 145 10 200 144 11 192 133 9 175 118 6
53159-139 102 15 109 106 95 70 70 70 42 42 42
53160- 22 22 22 10 10 10 0 0 0 0 0 0
53161- 0 0 0 0 0 0 0 0 0 0 0 0
53162- 0 0 0 0 0 0 0 0 0 0 0 0
53163- 0 0 0 0 0 0 0 0 0 0 0 0
53164- 0 0 0 0 0 0 0 0 0 0 0 0
53165- 0 0 0 0 0 0 0 0 0 0 0 0
53166- 0 0 0 0 0 0 6 6 6 10 10 10
53167- 14 14 14 22 22 22 30 30 30 38 38 38
53168- 50 50 50 62 62 62 74 74 74 90 90 90
53169-101 98 89 112 100 78 121 87 25 124 80 6
53170-137 92 6 152 99 6 152 99 6 152 99 6
53171-138 86 6 124 80 6 98 70 6 86 66 30
53172-101 98 89 82 82 82 58 58 58 46 46 46
53173- 38 38 38 34 34 34 34 34 34 34 34 34
53174- 34 34 34 34 34 34 34 34 34 34 34 34
53175- 34 34 34 34 34 34 38 38 38 42 42 42
53176- 54 54 54 82 82 82 94 86 76 91 60 6
53177-134 86 6 156 107 11 167 114 7 175 118 6
53178-175 118 6 167 114 7 152 99 6 121 87 25
53179-101 98 89 62 62 62 34 34 34 18 18 18
53180- 6 6 6 0 0 0 0 0 0 0 0 0
53181- 0 0 0 0 0 0 0 0 0 0 0 0
53182- 0 0 0 0 0 0 0 0 0 0 0 0
53183- 0 0 0 0 0 0 0 0 0 0 0 0
53184- 0 0 0 0 0 0 0 0 0 0 0 0
53185- 0 0 0 0 0 0 0 0 0 0 0 0
53186- 0 0 0 0 0 0 0 0 0 0 0 0
53187- 0 0 0 6 6 6 6 6 6 10 10 10
53188- 18 18 18 22 22 22 30 30 30 42 42 42
53189- 50 50 50 66 66 66 86 86 86 101 98 89
53190-106 86 58 98 70 6 104 69 6 104 69 6
53191-104 69 6 91 60 6 82 62 34 90 90 90
53192- 62 62 62 38 38 38 22 22 22 14 14 14
53193- 10 10 10 10 10 10 10 10 10 10 10 10
53194- 10 10 10 10 10 10 6 6 6 10 10 10
53195- 10 10 10 10 10 10 10 10 10 14 14 14
53196- 22 22 22 42 42 42 70 70 70 89 81 66
53197- 80 54 7 104 69 6 124 80 6 137 92 6
53198-134 86 6 116 81 8 100 82 52 86 86 86
53199- 58 58 58 30 30 30 14 14 14 6 6 6
53200- 0 0 0 0 0 0 0 0 0 0 0 0
53201- 0 0 0 0 0 0 0 0 0 0 0 0
53202- 0 0 0 0 0 0 0 0 0 0 0 0
53203- 0 0 0 0 0 0 0 0 0 0 0 0
53204- 0 0 0 0 0 0 0 0 0 0 0 0
53205- 0 0 0 0 0 0 0 0 0 0 0 0
53206- 0 0 0 0 0 0 0 0 0 0 0 0
53207- 0 0 0 0 0 0 0 0 0 0 0 0
53208- 0 0 0 6 6 6 10 10 10 14 14 14
53209- 18 18 18 26 26 26 38 38 38 54 54 54
53210- 70 70 70 86 86 86 94 86 76 89 81 66
53211- 89 81 66 86 86 86 74 74 74 50 50 50
53212- 30 30 30 14 14 14 6 6 6 0 0 0
53213- 0 0 0 0 0 0 0 0 0 0 0 0
53214- 0 0 0 0 0 0 0 0 0 0 0 0
53215- 0 0 0 0 0 0 0 0 0 0 0 0
53216- 6 6 6 18 18 18 34 34 34 58 58 58
53217- 82 82 82 89 81 66 89 81 66 89 81 66
53218- 94 86 66 94 86 76 74 74 74 50 50 50
53219- 26 26 26 14 14 14 6 6 6 0 0 0
53220- 0 0 0 0 0 0 0 0 0 0 0 0
53221- 0 0 0 0 0 0 0 0 0 0 0 0
53222- 0 0 0 0 0 0 0 0 0 0 0 0
53223- 0 0 0 0 0 0 0 0 0 0 0 0
53224- 0 0 0 0 0 0 0 0 0 0 0 0
53225- 0 0 0 0 0 0 0 0 0 0 0 0
53226- 0 0 0 0 0 0 0 0 0 0 0 0
53227- 0 0 0 0 0 0 0 0 0 0 0 0
53228- 0 0 0 0 0 0 0 0 0 0 0 0
53229- 6 6 6 6 6 6 14 14 14 18 18 18
53230- 30 30 30 38 38 38 46 46 46 54 54 54
53231- 50 50 50 42 42 42 30 30 30 18 18 18
53232- 10 10 10 0 0 0 0 0 0 0 0 0
53233- 0 0 0 0 0 0 0 0 0 0 0 0
53234- 0 0 0 0 0 0 0 0 0 0 0 0
53235- 0 0 0 0 0 0 0 0 0 0 0 0
53236- 0 0 0 6 6 6 14 14 14 26 26 26
53237- 38 38 38 50 50 50 58 58 58 58 58 58
53238- 54 54 54 42 42 42 30 30 30 18 18 18
53239- 10 10 10 0 0 0 0 0 0 0 0 0
53240- 0 0 0 0 0 0 0 0 0 0 0 0
53241- 0 0 0 0 0 0 0 0 0 0 0 0
53242- 0 0 0 0 0 0 0 0 0 0 0 0
53243- 0 0 0 0 0 0 0 0 0 0 0 0
53244- 0 0 0 0 0 0 0 0 0 0 0 0
53245- 0 0 0 0 0 0 0 0 0 0 0 0
53246- 0 0 0 0 0 0 0 0 0 0 0 0
53247- 0 0 0 0 0 0 0 0 0 0 0 0
53248- 0 0 0 0 0 0 0 0 0 0 0 0
53249- 0 0 0 0 0 0 0 0 0 6 6 6
53250- 6 6 6 10 10 10 14 14 14 18 18 18
53251- 18 18 18 14 14 14 10 10 10 6 6 6
53252- 0 0 0 0 0 0 0 0 0 0 0 0
53253- 0 0 0 0 0 0 0 0 0 0 0 0
53254- 0 0 0 0 0 0 0 0 0 0 0 0
53255- 0 0 0 0 0 0 0 0 0 0 0 0
53256- 0 0 0 0 0 0 0 0 0 6 6 6
53257- 14 14 14 18 18 18 22 22 22 22 22 22
53258- 18 18 18 14 14 14 10 10 10 6 6 6
53259- 0 0 0 0 0 0 0 0 0 0 0 0
53260- 0 0 0 0 0 0 0 0 0 0 0 0
53261- 0 0 0 0 0 0 0 0 0 0 0 0
53262- 0 0 0 0 0 0 0 0 0 0 0 0
53263- 0 0 0 0 0 0 0 0 0 0 0 0
53264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53277+4 4 4 4 4 4
53278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53291+4 4 4 4 4 4
53292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53305+4 4 4 4 4 4
53306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53319+4 4 4 4 4 4
53320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53333+4 4 4 4 4 4
53334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53347+4 4 4 4 4 4
53348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53352+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
53353+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
53354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53357+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
53358+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
53359+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
53360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53361+4 4 4 4 4 4
53362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53366+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
53367+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
53368+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53371+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
53372+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
53373+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
53374+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53375+4 4 4 4 4 4
53376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53380+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
53381+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
53382+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
53383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53385+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
53386+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
53387+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
53388+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
53389+4 4 4 4 4 4
53390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53393+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
53394+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
53395+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
53396+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
53397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53398+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
53399+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
53400+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
53401+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
53402+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
53403+4 4 4 4 4 4
53404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53407+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
53408+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
53409+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
53410+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
53411+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
53412+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
53413+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
53414+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
53415+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
53416+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
53417+4 4 4 4 4 4
53418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
53421+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
53422+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
53423+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
53424+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
53425+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
53426+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
53427+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
53428+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
53429+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
53430+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
53431+4 4 4 4 4 4
53432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53434+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
53435+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
53436+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
53437+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
53438+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
53439+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
53440+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
53441+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
53442+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
53443+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
53444+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
53445+4 4 4 4 4 4
53446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53448+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
53449+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
53450+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
53451+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
53452+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
53453+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
53454+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
53455+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
53456+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
53457+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
53458+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
53459+4 4 4 4 4 4
53460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53462+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
53463+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
53464+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
53465+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
53466+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
53467+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
53468+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
53469+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
53470+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
53471+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
53472+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
53473+4 4 4 4 4 4
53474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53476+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
53477+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
53478+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
53479+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
53480+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
53481+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
53482+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
53483+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
53484+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
53485+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
53486+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
53487+4 4 4 4 4 4
53488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53489+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
53490+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
53491+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
53492+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
53493+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
53494+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
53495+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
53496+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
53497+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
53498+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
53499+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
53500+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
53501+4 4 4 4 4 4
53502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53503+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
53504+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
53505+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
53506+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
53507+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
53508+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
53509+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
53510+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
53511+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
53512+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
53513+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
53514+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
53515+0 0 0 4 4 4
53516+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
53517+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
53518+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
53519+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
53520+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
53521+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
53522+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
53523+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
53524+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
53525+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
53526+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
53527+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
53528+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
53529+2 0 0 0 0 0
53530+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
53531+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
53532+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
53533+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
53534+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
53535+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
53536+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
53537+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
53538+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
53539+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
53540+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
53541+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
53542+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
53543+37 38 37 0 0 0
53544+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
53545+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
53546+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
53547+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
53548+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
53549+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
53550+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
53551+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
53552+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
53553+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
53554+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
53555+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
53556+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
53557+85 115 134 4 0 0
53558+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
53559+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
53560+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
53561+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
53562+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
53563+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
53564+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
53565+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
53566+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
53567+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
53568+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
53569+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
53570+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
53571+60 73 81 4 0 0
53572+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
53573+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
53574+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
53575+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
53576+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
53577+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
53578+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
53579+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
53580+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
53581+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
53582+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
53583+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
53584+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
53585+16 19 21 4 0 0
53586+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
53587+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
53588+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
53589+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
53590+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
53591+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
53592+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
53593+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
53594+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
53595+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
53596+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
53597+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
53598+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
53599+4 0 0 4 3 3
53600+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
53601+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
53602+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
53603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
53604+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
53605+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
53606+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
53607+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
53608+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
53609+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
53610+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
53611+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
53612+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
53613+3 2 2 4 4 4
53614+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
53615+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
53616+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
53617+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
53618+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
53619+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
53620+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
53621+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
53622+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
53623+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
53624+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
53625+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
53626+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
53627+4 4 4 4 4 4
53628+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
53629+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
53630+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
53631+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
53632+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
53633+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
53634+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
53635+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
53636+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
53637+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
53638+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
53639+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
53640+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
53641+4 4 4 4 4 4
53642+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
53643+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
53644+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
53645+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
53646+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
53647+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
53648+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
53649+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
53650+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
53651+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
53652+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
53653+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
53654+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
53655+5 5 5 5 5 5
53656+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
53657+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
53658+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
53659+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
53660+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
53661+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
53662+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
53663+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
53664+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
53665+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
53666+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
53667+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
53668+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
53669+5 5 5 4 4 4
53670+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
53671+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
53672+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
53673+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
53674+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
53675+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
53676+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
53677+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
53678+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
53679+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
53680+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
53681+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
53682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53683+4 4 4 4 4 4
53684+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
53685+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
53686+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
53687+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
53688+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
53689+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
53690+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
53691+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
53692+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
53693+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
53694+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
53695+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
53696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53697+4 4 4 4 4 4
53698+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
53699+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
53700+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
53701+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
53702+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
53703+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
53704+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
53705+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
53706+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
53707+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
53708+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
53709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53711+4 4 4 4 4 4
53712+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
53713+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
53714+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
53715+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
53716+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
53717+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
53718+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
53719+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
53720+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
53721+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
53722+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
53723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53725+4 4 4 4 4 4
53726+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
53727+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
53728+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
53729+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
53730+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
53731+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
53732+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
53733+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
53734+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
53735+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
53736+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53739+4 4 4 4 4 4
53740+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
53741+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
53742+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
53743+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
53744+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
53745+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
53746+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
53747+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
53748+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
53749+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
53750+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
53751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53753+4 4 4 4 4 4
53754+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
53755+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
53756+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
53757+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
53758+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
53759+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
53760+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
53761+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
53762+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
53763+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
53764+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
53765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53767+4 4 4 4 4 4
53768+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
53769+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
53770+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
53771+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
53772+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
53773+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
53774+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
53775+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
53776+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
53777+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
53778+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53781+4 4 4 4 4 4
53782+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
53783+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
53784+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
53785+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
53786+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
53787+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
53788+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
53789+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
53790+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
53791+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
53792+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53795+4 4 4 4 4 4
53796+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
53797+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
53798+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
53799+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
53800+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
53801+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
53802+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
53803+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
53804+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
53805+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
53806+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53809+4 4 4 4 4 4
53810+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
53811+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
53812+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
53813+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
53814+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
53815+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
53816+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
53817+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
53818+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
53819+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53820+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53823+4 4 4 4 4 4
53824+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
53825+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
53826+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
53827+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
53828+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
53829+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
53830+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
53831+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
53832+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
53833+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53834+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53837+4 4 4 4 4 4
53838+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
53839+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
53840+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
53841+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
53842+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
53843+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
53844+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
53845+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
53846+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
53847+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53848+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53851+4 4 4 4 4 4
53852+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
53853+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
53854+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
53855+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
53856+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
53857+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
53858+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
53859+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
53860+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
53861+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53862+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53865+4 4 4 4 4 4
53866+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
53867+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
53868+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
53869+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
53870+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
53871+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
53872+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
53873+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
53874+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
53875+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53876+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53879+4 4 4 4 4 4
53880+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
53881+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
53882+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
53883+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
53884+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
53885+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
53886+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
53887+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
53888+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
53889+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53890+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53893+4 4 4 4 4 4
53894+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
53895+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
53896+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
53897+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
53898+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
53899+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
53900+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
53901+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
53902+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
53903+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53904+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53907+4 4 4 4 4 4
53908+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
53909+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
53910+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
53911+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
53912+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
53913+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
53914+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
53915+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
53916+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
53917+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53918+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53921+4 4 4 4 4 4
53922+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
53923+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
53924+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
53925+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
53926+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
53927+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
53928+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
53929+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
53930+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
53931+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53932+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53935+4 4 4 4 4 4
53936+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
53937+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
53938+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
53939+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
53940+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
53941+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
53942+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
53943+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
53944+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
53945+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53946+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53949+4 4 4 4 4 4
53950+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
53951+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
53952+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
53953+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
53954+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
53955+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
53956+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
53957+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
53958+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
53959+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53960+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53963+4 4 4 4 4 4
53964+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
53965+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
53966+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
53967+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
53968+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
53969+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
53970+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
53971+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
53972+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
53973+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53974+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53977+4 4 4 4 4 4
53978+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
53979+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
53980+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
53981+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
53982+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
53983+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
53984+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
53985+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
53986+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
53987+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
53988+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
53989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53991+4 4 4 4 4 4
53992+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
53993+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
53994+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
53995+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
53996+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
53997+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
53998+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
53999+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
54000+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
54001+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54002+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54005+4 4 4 4 4 4
54006+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
54007+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
54008+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
54009+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
54010+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
54011+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
54012+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
54013+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
54014+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
54015+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54016+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54019+4 4 4 4 4 4
54020+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
54021+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
54022+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
54023+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
54024+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
54025+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
54026+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
54027+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
54028+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
54029+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
54030+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54033+4 4 4 4 4 4
54034+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
54035+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
54036+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
54037+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
54038+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
54039+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
54040+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
54041+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
54042+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
54043+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
54044+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54047+4 4 4 4 4 4
54048+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
54049+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
54050+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
54051+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
54052+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
54053+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
54054+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54055+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
54056+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
54057+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
54058+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54061+4 4 4 4 4 4
54062+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
54063+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
54064+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
54065+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
54066+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
54067+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
54068+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
54069+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
54070+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
54071+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
54072+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54075+4 4 4 4 4 4
54076+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
54077+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
54078+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
54079+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
54080+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
54081+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
54082+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
54083+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
54084+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
54085+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
54086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54089+4 4 4 4 4 4
54090+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
54091+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
54092+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
54093+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
54094+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
54095+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
54096+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
54097+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
54098+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
54099+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
54100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54103+4 4 4 4 4 4
54104+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
54105+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
54106+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
54107+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
54108+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
54109+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
54110+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
54111+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
54112+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
54113+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
54114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54117+4 4 4 4 4 4
54118+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
54119+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
54120+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
54121+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
54122+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
54123+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
54124+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
54125+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
54126+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
54127+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54131+4 4 4 4 4 4
54132+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
54133+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
54134+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
54135+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
54136+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
54137+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
54138+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
54139+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
54140+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
54141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54145+4 4 4 4 4 4
54146+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
54147+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
54148+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
54149+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
54150+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
54151+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
54152+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
54153+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
54154+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
54155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54159+4 4 4 4 4 4
54160+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
54161+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
54162+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
54163+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
54164+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
54165+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
54166+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
54167+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
54168+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54173+4 4 4 4 4 4
54174+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
54175+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
54176+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
54177+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
54178+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
54179+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
54180+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
54181+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
54182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54187+4 4 4 4 4 4
54188+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
54189+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
54190+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
54191+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
54192+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
54193+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
54194+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
54195+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
54196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54201+4 4 4 4 4 4
54202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54203+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
54204+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
54205+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
54206+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
54207+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
54208+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
54209+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
54210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54215+4 4 4 4 4 4
54216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54217+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
54218+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
54219+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
54220+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
54221+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
54222+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
54223+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
54224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54229+4 4 4 4 4 4
54230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54231+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
54232+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
54233+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
54234+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
54235+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
54236+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
54237+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54243+4 4 4 4 4 4
54244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54246+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
54247+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
54248+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
54249+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
54250+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
54251+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54257+4 4 4 4 4 4
54258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54261+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
54262+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
54263+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
54264+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
54265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54271+4 4 4 4 4 4
54272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54275+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
54276+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
54277+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
54278+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
54279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54285+4 4 4 4 4 4
54286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54289+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
54290+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
54291+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
54292+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
54293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54299+4 4 4 4 4 4
54300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54303+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
54304+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
54305+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
54306+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
54307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54313+4 4 4 4 4 4
54314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54318+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
54319+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
54320+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54327+4 4 4 4 4 4
54328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54332+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
54333+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
54334+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
54335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54341+4 4 4 4 4 4
54342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54346+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
54347+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
54348+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54355+4 4 4 4 4 4
54356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54360+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
54361+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
54362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54369+4 4 4 4 4 4
54370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54374+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
54375+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
54376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54383+4 4 4 4 4 4
54384diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
54385index a01147f..5d896f8 100644
54386--- a/drivers/video/matrox/matroxfb_DAC1064.c
54387+++ b/drivers/video/matrox/matroxfb_DAC1064.c
54388@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
54389
54390 #ifdef CONFIG_FB_MATROX_MYSTIQUE
54391 struct matrox_switch matrox_mystique = {
54392- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
54393+ .preinit = MGA1064_preinit,
54394+ .reset = MGA1064_reset,
54395+ .init = MGA1064_init,
54396+ .restore = MGA1064_restore,
54397 };
54398 EXPORT_SYMBOL(matrox_mystique);
54399 #endif
54400
54401 #ifdef CONFIG_FB_MATROX_G
54402 struct matrox_switch matrox_G100 = {
54403- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
54404+ .preinit = MGAG100_preinit,
54405+ .reset = MGAG100_reset,
54406+ .init = MGAG100_init,
54407+ .restore = MGAG100_restore,
54408 };
54409 EXPORT_SYMBOL(matrox_G100);
54410 #endif
54411diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
54412index 195ad7c..09743fc 100644
54413--- a/drivers/video/matrox/matroxfb_Ti3026.c
54414+++ b/drivers/video/matrox/matroxfb_Ti3026.c
54415@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
54416 }
54417
54418 struct matrox_switch matrox_millennium = {
54419- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
54420+ .preinit = Ti3026_preinit,
54421+ .reset = Ti3026_reset,
54422+ .init = Ti3026_init,
54423+ .restore = Ti3026_restore
54424 };
54425 EXPORT_SYMBOL(matrox_millennium);
54426 #endif
54427diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
54428index fe92eed..106e085 100644
54429--- a/drivers/video/mb862xx/mb862xxfb_accel.c
54430+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
54431@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
54432 struct mb862xxfb_par *par = info->par;
54433
54434 if (info->var.bits_per_pixel == 32) {
54435- info->fbops->fb_fillrect = cfb_fillrect;
54436- info->fbops->fb_copyarea = cfb_copyarea;
54437- info->fbops->fb_imageblit = cfb_imageblit;
54438+ pax_open_kernel();
54439+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54440+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54441+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54442+ pax_close_kernel();
54443 } else {
54444 outreg(disp, GC_L0EM, 3);
54445- info->fbops->fb_fillrect = mb86290fb_fillrect;
54446- info->fbops->fb_copyarea = mb86290fb_copyarea;
54447- info->fbops->fb_imageblit = mb86290fb_imageblit;
54448+ pax_open_kernel();
54449+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
54450+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
54451+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
54452+ pax_close_kernel();
54453 }
54454 outreg(draw, GDC_REG_DRAW_BASE, 0);
54455 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
54456diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
54457index ff22871..b129bed 100644
54458--- a/drivers/video/nvidia/nvidia.c
54459+++ b/drivers/video/nvidia/nvidia.c
54460@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
54461 info->fix.line_length = (info->var.xres_virtual *
54462 info->var.bits_per_pixel) >> 3;
54463 if (info->var.accel_flags) {
54464- info->fbops->fb_imageblit = nvidiafb_imageblit;
54465- info->fbops->fb_fillrect = nvidiafb_fillrect;
54466- info->fbops->fb_copyarea = nvidiafb_copyarea;
54467- info->fbops->fb_sync = nvidiafb_sync;
54468+ pax_open_kernel();
54469+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
54470+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
54471+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
54472+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
54473+ pax_close_kernel();
54474 info->pixmap.scan_align = 4;
54475 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54476 info->flags |= FBINFO_READS_FAST;
54477 NVResetGraphics(info);
54478 } else {
54479- info->fbops->fb_imageblit = cfb_imageblit;
54480- info->fbops->fb_fillrect = cfb_fillrect;
54481- info->fbops->fb_copyarea = cfb_copyarea;
54482- info->fbops->fb_sync = NULL;
54483+ pax_open_kernel();
54484+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54485+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54486+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54487+ *(void **)&info->fbops->fb_sync = NULL;
54488+ pax_close_kernel();
54489 info->pixmap.scan_align = 1;
54490 info->flags |= FBINFO_HWACCEL_DISABLED;
54491 info->flags &= ~FBINFO_READS_FAST;
54492@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
54493 info->pixmap.size = 8 * 1024;
54494 info->pixmap.flags = FB_PIXMAP_SYSTEM;
54495
54496- if (!hwcur)
54497- info->fbops->fb_cursor = NULL;
54498+ if (!hwcur) {
54499+ pax_open_kernel();
54500+ *(void **)&info->fbops->fb_cursor = NULL;
54501+ pax_close_kernel();
54502+ }
54503
54504 info->var.accel_flags = (!noaccel);
54505
54506diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
54507index 669a81f..e216d76 100644
54508--- a/drivers/video/omap2/dss/display.c
54509+++ b/drivers/video/omap2/dss/display.c
54510@@ -137,12 +137,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
54511 snprintf(dssdev->alias, sizeof(dssdev->alias),
54512 "display%d", disp_num_counter++);
54513
54514+ pax_open_kernel();
54515 if (drv && drv->get_resolution == NULL)
54516- drv->get_resolution = omapdss_default_get_resolution;
54517+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
54518 if (drv && drv->get_recommended_bpp == NULL)
54519- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54520+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54521 if (drv && drv->get_timings == NULL)
54522- drv->get_timings = omapdss_default_get_timings;
54523+ *(void **)&drv->get_timings = omapdss_default_get_timings;
54524+ pax_close_kernel();
54525
54526 mutex_lock(&panel_list_mutex);
54527 list_add_tail(&dssdev->panel_list, &panel_list);
54528diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
54529index 83433cb..71e9b98 100644
54530--- a/drivers/video/s1d13xxxfb.c
54531+++ b/drivers/video/s1d13xxxfb.c
54532@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
54533
54534 switch(prod_id) {
54535 case S1D13506_PROD_ID: /* activate acceleration */
54536- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54537- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54538+ pax_open_kernel();
54539+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54540+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54541+ pax_close_kernel();
54542 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
54543 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
54544 break;
54545diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
54546index d513ed6..90b0de9 100644
54547--- a/drivers/video/smscufx.c
54548+++ b/drivers/video/smscufx.c
54549@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
54550 fb_deferred_io_cleanup(info);
54551 kfree(info->fbdefio);
54552 info->fbdefio = NULL;
54553- info->fbops->fb_mmap = ufx_ops_mmap;
54554+ pax_open_kernel();
54555+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
54556+ pax_close_kernel();
54557 }
54558
54559 pr_debug("released /dev/fb%d user=%d count=%d",
54560diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
54561index 025f14e..20eb4db 100644
54562--- a/drivers/video/udlfb.c
54563+++ b/drivers/video/udlfb.c
54564@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
54565 dlfb_urb_completion(urb);
54566
54567 error:
54568- atomic_add(bytes_sent, &dev->bytes_sent);
54569- atomic_add(bytes_identical, &dev->bytes_identical);
54570- atomic_add(width*height*2, &dev->bytes_rendered);
54571+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54572+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54573+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
54574 end_cycles = get_cycles();
54575- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54576+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54577 >> 10)), /* Kcycles */
54578 &dev->cpu_kcycles_used);
54579
54580@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
54581 dlfb_urb_completion(urb);
54582
54583 error:
54584- atomic_add(bytes_sent, &dev->bytes_sent);
54585- atomic_add(bytes_identical, &dev->bytes_identical);
54586- atomic_add(bytes_rendered, &dev->bytes_rendered);
54587+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
54588+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
54589+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
54590 end_cycles = get_cycles();
54591- atomic_add(((unsigned int) ((end_cycles - start_cycles)
54592+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
54593 >> 10)), /* Kcycles */
54594 &dev->cpu_kcycles_used);
54595 }
54596@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
54597 fb_deferred_io_cleanup(info);
54598 kfree(info->fbdefio);
54599 info->fbdefio = NULL;
54600- info->fbops->fb_mmap = dlfb_ops_mmap;
54601+ pax_open_kernel();
54602+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
54603+ pax_close_kernel();
54604 }
54605
54606 pr_warn("released /dev/fb%d user=%d count=%d\n",
54607@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
54608 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54609 struct dlfb_data *dev = fb_info->par;
54610 return snprintf(buf, PAGE_SIZE, "%u\n",
54611- atomic_read(&dev->bytes_rendered));
54612+ atomic_read_unchecked(&dev->bytes_rendered));
54613 }
54614
54615 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54616@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
54617 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54618 struct dlfb_data *dev = fb_info->par;
54619 return snprintf(buf, PAGE_SIZE, "%u\n",
54620- atomic_read(&dev->bytes_identical));
54621+ atomic_read_unchecked(&dev->bytes_identical));
54622 }
54623
54624 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54625@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
54626 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54627 struct dlfb_data *dev = fb_info->par;
54628 return snprintf(buf, PAGE_SIZE, "%u\n",
54629- atomic_read(&dev->bytes_sent));
54630+ atomic_read_unchecked(&dev->bytes_sent));
54631 }
54632
54633 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54634@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
54635 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54636 struct dlfb_data *dev = fb_info->par;
54637 return snprintf(buf, PAGE_SIZE, "%u\n",
54638- atomic_read(&dev->cpu_kcycles_used));
54639+ atomic_read_unchecked(&dev->cpu_kcycles_used));
54640 }
54641
54642 static ssize_t edid_show(
54643@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
54644 struct fb_info *fb_info = dev_get_drvdata(fbdev);
54645 struct dlfb_data *dev = fb_info->par;
54646
54647- atomic_set(&dev->bytes_rendered, 0);
54648- atomic_set(&dev->bytes_identical, 0);
54649- atomic_set(&dev->bytes_sent, 0);
54650- atomic_set(&dev->cpu_kcycles_used, 0);
54651+ atomic_set_unchecked(&dev->bytes_rendered, 0);
54652+ atomic_set_unchecked(&dev->bytes_identical, 0);
54653+ atomic_set_unchecked(&dev->bytes_sent, 0);
54654+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
54655
54656 return count;
54657 }
54658diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
54659index 256fba7..6e75516 100644
54660--- a/drivers/video/uvesafb.c
54661+++ b/drivers/video/uvesafb.c
54662@@ -19,6 +19,7 @@
54663 #include <linux/io.h>
54664 #include <linux/mutex.h>
54665 #include <linux/slab.h>
54666+#include <linux/moduleloader.h>
54667 #include <video/edid.h>
54668 #include <video/uvesafb.h>
54669 #ifdef CONFIG_X86
54670@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
54671 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
54672 par->pmi_setpal = par->ypan = 0;
54673 } else {
54674+
54675+#ifdef CONFIG_PAX_KERNEXEC
54676+#ifdef CONFIG_MODULES
54677+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
54678+#endif
54679+ if (!par->pmi_code) {
54680+ par->pmi_setpal = par->ypan = 0;
54681+ return 0;
54682+ }
54683+#endif
54684+
54685 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
54686 + task->t.regs.edi);
54687+
54688+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54689+ pax_open_kernel();
54690+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
54691+ pax_close_kernel();
54692+
54693+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
54694+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
54695+#else
54696 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
54697 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
54698+#endif
54699+
54700 printk(KERN_INFO "uvesafb: protected mode interface info at "
54701 "%04x:%04x\n",
54702 (u16)task->t.regs.es, (u16)task->t.regs.edi);
54703@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
54704 par->ypan = ypan;
54705
54706 if (par->pmi_setpal || par->ypan) {
54707+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
54708 if (__supported_pte_mask & _PAGE_NX) {
54709 par->pmi_setpal = par->ypan = 0;
54710 printk(KERN_WARNING "uvesafb: NX protection is active, "
54711 "better not use the PMI.\n");
54712- } else {
54713+ } else
54714+#endif
54715 uvesafb_vbe_getpmi(task, par);
54716- }
54717 }
54718 #else
54719 /* The protected mode interface is not available on non-x86. */
54720@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
54721 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
54722
54723 /* Disable blanking if the user requested so. */
54724- if (!blank)
54725- info->fbops->fb_blank = NULL;
54726+ if (!blank) {
54727+ pax_open_kernel();
54728+ *(void **)&info->fbops->fb_blank = NULL;
54729+ pax_close_kernel();
54730+ }
54731
54732 /*
54733 * Find out how much IO memory is required for the mode with
54734@@ -1530,8 +1557,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
54735 info->flags = FBINFO_FLAG_DEFAULT |
54736 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
54737
54738- if (!par->ypan)
54739- info->fbops->fb_pan_display = NULL;
54740+ if (!par->ypan) {
54741+ pax_open_kernel();
54742+ *(void **)&info->fbops->fb_pan_display = NULL;
54743+ pax_close_kernel();
54744+ }
54745 }
54746
54747 static void uvesafb_init_mtrr(struct fb_info *info)
54748@@ -1792,6 +1822,11 @@ out_mode:
54749 out:
54750 kfree(par->vbe_modes);
54751
54752+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54753+ if (par->pmi_code)
54754+ module_free_exec(NULL, par->pmi_code);
54755+#endif
54756+
54757 framebuffer_release(info);
54758 return err;
54759 }
54760@@ -1816,6 +1851,12 @@ static int uvesafb_remove(struct platform_device *dev)
54761 kfree(par->vbe_modes);
54762 kfree(par->vbe_state_orig);
54763 kfree(par->vbe_state_saved);
54764+
54765+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54766+ if (par->pmi_code)
54767+ module_free_exec(NULL, par->pmi_code);
54768+#endif
54769+
54770 }
54771
54772 framebuffer_release(info);
54773diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
54774index 1c7da3b..56ea0bd 100644
54775--- a/drivers/video/vesafb.c
54776+++ b/drivers/video/vesafb.c
54777@@ -9,6 +9,7 @@
54778 */
54779
54780 #include <linux/module.h>
54781+#include <linux/moduleloader.h>
54782 #include <linux/kernel.h>
54783 #include <linux/errno.h>
54784 #include <linux/string.h>
54785@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
54786 static int vram_total; /* Set total amount of memory */
54787 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
54788 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
54789-static void (*pmi_start)(void) __read_mostly;
54790-static void (*pmi_pal) (void) __read_mostly;
54791+static void (*pmi_start)(void) __read_only;
54792+static void (*pmi_pal) (void) __read_only;
54793 static int depth __read_mostly;
54794 static int vga_compat __read_mostly;
54795 /* --------------------------------------------------------------------- */
54796@@ -234,6 +235,7 @@ static int vesafb_probe(struct platform_device *dev)
54797 unsigned int size_remap;
54798 unsigned int size_total;
54799 char *option = NULL;
54800+ void *pmi_code = NULL;
54801
54802 /* ignore error return of fb_get_options */
54803 fb_get_options("vesafb", &option);
54804@@ -280,10 +282,6 @@ static int vesafb_probe(struct platform_device *dev)
54805 size_remap = size_total;
54806 vesafb_fix.smem_len = size_remap;
54807
54808-#ifndef __i386__
54809- screen_info.vesapm_seg = 0;
54810-#endif
54811-
54812 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
54813 printk(KERN_WARNING
54814 "vesafb: cannot reserve video memory at 0x%lx\n",
54815@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
54816 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
54817 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
54818
54819+#ifdef __i386__
54820+
54821+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54822+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
54823+ if (!pmi_code)
54824+#elif !defined(CONFIG_PAX_KERNEXEC)
54825+ if (0)
54826+#endif
54827+
54828+#endif
54829+ screen_info.vesapm_seg = 0;
54830+
54831 if (screen_info.vesapm_seg) {
54832- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
54833- screen_info.vesapm_seg,screen_info.vesapm_off);
54834+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
54835+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
54836 }
54837
54838 if (screen_info.vesapm_seg < 0xc000)
54839@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
54840
54841 if (ypan || pmi_setpal) {
54842 unsigned short *pmi_base;
54843+
54844 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
54845- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
54846- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
54847+
54848+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54849+ pax_open_kernel();
54850+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
54851+#else
54852+ pmi_code = pmi_base;
54853+#endif
54854+
54855+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
54856+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
54857+
54858+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54859+ pmi_start = ktva_ktla(pmi_start);
54860+ pmi_pal = ktva_ktla(pmi_pal);
54861+ pax_close_kernel();
54862+#endif
54863+
54864 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
54865 if (pmi_base[3]) {
54866 printk(KERN_INFO "vesafb: pmi: ports = ");
54867@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
54868 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
54869 (ypan ? FBINFO_HWACCEL_YPAN : 0);
54870
54871- if (!ypan)
54872- info->fbops->fb_pan_display = NULL;
54873+ if (!ypan) {
54874+ pax_open_kernel();
54875+ *(void **)&info->fbops->fb_pan_display = NULL;
54876+ pax_close_kernel();
54877+ }
54878
54879 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
54880 err = -ENOMEM;
54881@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
54882 fb_info(info, "%s frame buffer device\n", info->fix.id);
54883 return 0;
54884 err:
54885+
54886+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
54887+ module_free_exec(NULL, pmi_code);
54888+#endif
54889+
54890 if (info->screen_base)
54891 iounmap(info->screen_base);
54892 framebuffer_release(info);
54893diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
54894index 88714ae..16c2e11 100644
54895--- a/drivers/video/via/via_clock.h
54896+++ b/drivers/video/via/via_clock.h
54897@@ -56,7 +56,7 @@ struct via_clock {
54898
54899 void (*set_engine_pll_state)(u8 state);
54900 void (*set_engine_pll)(struct via_pll_config config);
54901-};
54902+} __no_const;
54903
54904
54905 static inline u32 get_pll_internal_frequency(u32 ref_freq,
54906diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
54907index fef20db..d28b1ab 100644
54908--- a/drivers/xen/xenfs/xenstored.c
54909+++ b/drivers/xen/xenfs/xenstored.c
54910@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
54911 static int xsd_kva_open(struct inode *inode, struct file *file)
54912 {
54913 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
54914+#ifdef CONFIG_GRKERNSEC_HIDESYM
54915+ NULL);
54916+#else
54917 xen_store_interface);
54918+#endif
54919+
54920 if (!file->private_data)
54921 return -ENOMEM;
54922 return 0;
54923diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
54924index 9ff073f..05cef23 100644
54925--- a/fs/9p/vfs_addr.c
54926+++ b/fs/9p/vfs_addr.c
54927@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
54928
54929 retval = v9fs_file_write_internal(inode,
54930 v9inode->writeback_fid,
54931- (__force const char __user *)buffer,
54932+ (const char __force_user *)buffer,
54933 len, &offset, 0);
54934 if (retval > 0)
54935 retval = 0;
54936diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
54937index 4e65aa9..043dc9a 100644
54938--- a/fs/9p/vfs_inode.c
54939+++ b/fs/9p/vfs_inode.c
54940@@ -1306,7 +1306,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
54941 void
54942 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54943 {
54944- char *s = nd_get_link(nd);
54945+ const char *s = nd_get_link(nd);
54946
54947 p9_debug(P9_DEBUG_VFS, " %s %s\n",
54948 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
54949diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
54950index 370b24c..ff0be7b 100644
54951--- a/fs/Kconfig.binfmt
54952+++ b/fs/Kconfig.binfmt
54953@@ -103,7 +103,7 @@ config HAVE_AOUT
54954
54955 config BINFMT_AOUT
54956 tristate "Kernel support for a.out and ECOFF binaries"
54957- depends on HAVE_AOUT
54958+ depends on HAVE_AOUT && BROKEN
54959 ---help---
54960 A.out (Assembler.OUTput) is a set of formats for libraries and
54961 executables used in the earliest versions of UNIX. Linux used
54962diff --git a/fs/afs/inode.c b/fs/afs/inode.c
54963index ce25d75..dc09eeb 100644
54964--- a/fs/afs/inode.c
54965+++ b/fs/afs/inode.c
54966@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
54967 struct afs_vnode *vnode;
54968 struct super_block *sb;
54969 struct inode *inode;
54970- static atomic_t afs_autocell_ino;
54971+ static atomic_unchecked_t afs_autocell_ino;
54972
54973 _enter("{%x:%u},%*.*s,",
54974 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
54975@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
54976 data.fid.unique = 0;
54977 data.fid.vnode = 0;
54978
54979- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
54980+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
54981 afs_iget5_autocell_test, afs_iget5_set,
54982 &data);
54983 if (!inode) {
54984diff --git a/fs/aio.c b/fs/aio.c
54985index 062a5f6..e5618e0 100644
54986--- a/fs/aio.c
54987+++ b/fs/aio.c
54988@@ -374,7 +374,7 @@ static int aio_setup_ring(struct kioctx *ctx)
54989 size += sizeof(struct io_event) * nr_events;
54990
54991 nr_pages = PFN_UP(size);
54992- if (nr_pages < 0)
54993+ if (nr_pages <= 0)
54994 return -EINVAL;
54995
54996 file = aio_private_file(ctx, nr_pages);
54997diff --git a/fs/attr.c b/fs/attr.c
54998index 267968d..5dd8f96 100644
54999--- a/fs/attr.c
55000+++ b/fs/attr.c
55001@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
55002 unsigned long limit;
55003
55004 limit = rlimit(RLIMIT_FSIZE);
55005+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
55006 if (limit != RLIM_INFINITY && offset > limit)
55007 goto out_sig;
55008 if (offset > inode->i_sb->s_maxbytes)
55009diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
55010index 689e40d..515cac5 100644
55011--- a/fs/autofs4/waitq.c
55012+++ b/fs/autofs4/waitq.c
55013@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
55014 {
55015 unsigned long sigpipe, flags;
55016 mm_segment_t fs;
55017- const char *data = (const char *)addr;
55018+ const char __user *data = (const char __force_user *)addr;
55019 ssize_t wr = 0;
55020
55021 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
55022@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
55023 return 1;
55024 }
55025
55026+#ifdef CONFIG_GRKERNSEC_HIDESYM
55027+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
55028+#endif
55029+
55030 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
55031 enum autofs_notify notify)
55032 {
55033@@ -373,7 +377,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
55034
55035 /* If this is a direct mount request create a dummy name */
55036 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
55037+#ifdef CONFIG_GRKERNSEC_HIDESYM
55038+ /* this name does get written to userland via autofs4_write() */
55039+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
55040+#else
55041 qstr.len = sprintf(name, "%p", dentry);
55042+#endif
55043 else {
55044 qstr.len = autofs4_getpath(sbi, dentry, &name);
55045 if (!qstr.len) {
55046diff --git a/fs/befs/endian.h b/fs/befs/endian.h
55047index 2722387..56059b5 100644
55048--- a/fs/befs/endian.h
55049+++ b/fs/befs/endian.h
55050@@ -11,7 +11,7 @@
55051
55052 #include <asm/byteorder.h>
55053
55054-static inline u64
55055+static inline u64 __intentional_overflow(-1)
55056 fs64_to_cpu(const struct super_block *sb, fs64 n)
55057 {
55058 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
55059@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
55060 return (__force fs64)cpu_to_be64(n);
55061 }
55062
55063-static inline u32
55064+static inline u32 __intentional_overflow(-1)
55065 fs32_to_cpu(const struct super_block *sb, fs32 n)
55066 {
55067 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
55068@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
55069 return (__force fs32)cpu_to_be32(n);
55070 }
55071
55072-static inline u16
55073+static inline u16 __intentional_overflow(-1)
55074 fs16_to_cpu(const struct super_block *sb, fs16 n)
55075 {
55076 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
55077diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
55078index ca0ba15..0fa3257 100644
55079--- a/fs/binfmt_aout.c
55080+++ b/fs/binfmt_aout.c
55081@@ -16,6 +16,7 @@
55082 #include <linux/string.h>
55083 #include <linux/fs.h>
55084 #include <linux/file.h>
55085+#include <linux/security.h>
55086 #include <linux/stat.h>
55087 #include <linux/fcntl.h>
55088 #include <linux/ptrace.h>
55089@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
55090 #endif
55091 # define START_STACK(u) ((void __user *)u.start_stack)
55092
55093+ memset(&dump, 0, sizeof(dump));
55094+
55095 fs = get_fs();
55096 set_fs(KERNEL_DS);
55097 has_dumped = 1;
55098@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
55099
55100 /* If the size of the dump file exceeds the rlimit, then see what would happen
55101 if we wrote the stack, but not the data area. */
55102+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
55103 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
55104 dump.u_dsize = 0;
55105
55106 /* Make sure we have enough room to write the stack and data areas. */
55107+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
55108 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
55109 dump.u_ssize = 0;
55110
55111@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
55112 rlim = rlimit(RLIMIT_DATA);
55113 if (rlim >= RLIM_INFINITY)
55114 rlim = ~0;
55115+
55116+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
55117 if (ex.a_data + ex.a_bss > rlim)
55118 return -ENOMEM;
55119
55120@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
55121
55122 install_exec_creds(bprm);
55123
55124+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55125+ current->mm->pax_flags = 0UL;
55126+#endif
55127+
55128+#ifdef CONFIG_PAX_PAGEEXEC
55129+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
55130+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
55131+
55132+#ifdef CONFIG_PAX_EMUTRAMP
55133+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
55134+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
55135+#endif
55136+
55137+#ifdef CONFIG_PAX_MPROTECT
55138+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
55139+ current->mm->pax_flags |= MF_PAX_MPROTECT;
55140+#endif
55141+
55142+ }
55143+#endif
55144+
55145 if (N_MAGIC(ex) == OMAGIC) {
55146 unsigned long text_addr, map_size;
55147 loff_t pos;
55148@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
55149 }
55150
55151 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
55152- PROT_READ | PROT_WRITE | PROT_EXEC,
55153+ PROT_READ | PROT_WRITE,
55154 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
55155 fd_offset + ex.a_text);
55156 if (error != N_DATADDR(ex)) {
55157diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
55158index 571a423..dbb9c6c 100644
55159--- a/fs/binfmt_elf.c
55160+++ b/fs/binfmt_elf.c
55161@@ -34,6 +34,7 @@
55162 #include <linux/utsname.h>
55163 #include <linux/coredump.h>
55164 #include <linux/sched.h>
55165+#include <linux/xattr.h>
55166 #include <asm/uaccess.h>
55167 #include <asm/param.h>
55168 #include <asm/page.h>
55169@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
55170 #define elf_core_dump NULL
55171 #endif
55172
55173+#ifdef CONFIG_PAX_MPROTECT
55174+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
55175+#endif
55176+
55177+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55178+static void elf_handle_mmap(struct file *file);
55179+#endif
55180+
55181 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
55182 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
55183 #else
55184@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
55185 .load_binary = load_elf_binary,
55186 .load_shlib = load_elf_library,
55187 .core_dump = elf_core_dump,
55188+
55189+#ifdef CONFIG_PAX_MPROTECT
55190+ .handle_mprotect= elf_handle_mprotect,
55191+#endif
55192+
55193+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55194+ .handle_mmap = elf_handle_mmap,
55195+#endif
55196+
55197 .min_coredump = ELF_EXEC_PAGESIZE,
55198 };
55199
55200@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
55201
55202 static int set_brk(unsigned long start, unsigned long end)
55203 {
55204+ unsigned long e = end;
55205+
55206 start = ELF_PAGEALIGN(start);
55207 end = ELF_PAGEALIGN(end);
55208 if (end > start) {
55209@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
55210 if (BAD_ADDR(addr))
55211 return addr;
55212 }
55213- current->mm->start_brk = current->mm->brk = end;
55214+ current->mm->start_brk = current->mm->brk = e;
55215 return 0;
55216 }
55217
55218@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
55219 elf_addr_t __user *u_rand_bytes;
55220 const char *k_platform = ELF_PLATFORM;
55221 const char *k_base_platform = ELF_BASE_PLATFORM;
55222- unsigned char k_rand_bytes[16];
55223+ u32 k_rand_bytes[4];
55224 int items;
55225 elf_addr_t *elf_info;
55226 int ei_index = 0;
55227 const struct cred *cred = current_cred();
55228 struct vm_area_struct *vma;
55229+ unsigned long saved_auxv[AT_VECTOR_SIZE];
55230
55231 /*
55232 * In some cases (e.g. Hyper-Threading), we want to avoid L1
55233@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
55234 * Generate 16 random bytes for userspace PRNG seeding.
55235 */
55236 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
55237- u_rand_bytes = (elf_addr_t __user *)
55238- STACK_ALLOC(p, sizeof(k_rand_bytes));
55239+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
55240+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
55241+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
55242+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
55243+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
55244+ u_rand_bytes = (elf_addr_t __user *) p;
55245 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
55246 return -EFAULT;
55247
55248@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
55249 return -EFAULT;
55250 current->mm->env_end = p;
55251
55252+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
55253+
55254 /* Put the elf_info on the stack in the right place. */
55255 sp = (elf_addr_t __user *)envp + 1;
55256- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
55257+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
55258 return -EFAULT;
55259 return 0;
55260 }
55261@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
55262 an ELF header */
55263
55264 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55265- struct file *interpreter, unsigned long *interp_map_addr,
55266- unsigned long no_base)
55267+ struct file *interpreter, unsigned long no_base)
55268 {
55269 struct elf_phdr *elf_phdata;
55270 struct elf_phdr *eppnt;
55271- unsigned long load_addr = 0;
55272+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
55273 int load_addr_set = 0;
55274 unsigned long last_bss = 0, elf_bss = 0;
55275- unsigned long error = ~0UL;
55276+ unsigned long error = -EINVAL;
55277 unsigned long total_size;
55278 int retval, i, size;
55279
55280@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55281 goto out_close;
55282 }
55283
55284+#ifdef CONFIG_PAX_SEGMEXEC
55285+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
55286+ pax_task_size = SEGMEXEC_TASK_SIZE;
55287+#endif
55288+
55289 eppnt = elf_phdata;
55290 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
55291 if (eppnt->p_type == PT_LOAD) {
55292@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55293 map_addr = elf_map(interpreter, load_addr + vaddr,
55294 eppnt, elf_prot, elf_type, total_size);
55295 total_size = 0;
55296- if (!*interp_map_addr)
55297- *interp_map_addr = map_addr;
55298 error = map_addr;
55299 if (BAD_ADDR(map_addr))
55300 goto out_close;
55301@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55302 k = load_addr + eppnt->p_vaddr;
55303 if (BAD_ADDR(k) ||
55304 eppnt->p_filesz > eppnt->p_memsz ||
55305- eppnt->p_memsz > TASK_SIZE ||
55306- TASK_SIZE - eppnt->p_memsz < k) {
55307+ eppnt->p_memsz > pax_task_size ||
55308+ pax_task_size - eppnt->p_memsz < k) {
55309 error = -ENOMEM;
55310 goto out_close;
55311 }
55312@@ -525,9 +554,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55313 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
55314
55315 /* Map the last of the bss segment */
55316- error = vm_brk(elf_bss, last_bss - elf_bss);
55317- if (BAD_ADDR(error))
55318- goto out_close;
55319+ if (last_bss > elf_bss) {
55320+ error = vm_brk(elf_bss, last_bss - elf_bss);
55321+ if (BAD_ADDR(error))
55322+ goto out_close;
55323+ }
55324 }
55325
55326 error = load_addr;
55327@@ -538,6 +569,336 @@ out:
55328 return error;
55329 }
55330
55331+#ifdef CONFIG_PAX_PT_PAX_FLAGS
55332+#ifdef CONFIG_PAX_SOFTMODE
55333+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
55334+{
55335+ unsigned long pax_flags = 0UL;
55336+
55337+#ifdef CONFIG_PAX_PAGEEXEC
55338+ if (elf_phdata->p_flags & PF_PAGEEXEC)
55339+ pax_flags |= MF_PAX_PAGEEXEC;
55340+#endif
55341+
55342+#ifdef CONFIG_PAX_SEGMEXEC
55343+ if (elf_phdata->p_flags & PF_SEGMEXEC)
55344+ pax_flags |= MF_PAX_SEGMEXEC;
55345+#endif
55346+
55347+#ifdef CONFIG_PAX_EMUTRAMP
55348+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
55349+ pax_flags |= MF_PAX_EMUTRAMP;
55350+#endif
55351+
55352+#ifdef CONFIG_PAX_MPROTECT
55353+ if (elf_phdata->p_flags & PF_MPROTECT)
55354+ pax_flags |= MF_PAX_MPROTECT;
55355+#endif
55356+
55357+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
55358+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
55359+ pax_flags |= MF_PAX_RANDMMAP;
55360+#endif
55361+
55362+ return pax_flags;
55363+}
55364+#endif
55365+
55366+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
55367+{
55368+ unsigned long pax_flags = 0UL;
55369+
55370+#ifdef CONFIG_PAX_PAGEEXEC
55371+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
55372+ pax_flags |= MF_PAX_PAGEEXEC;
55373+#endif
55374+
55375+#ifdef CONFIG_PAX_SEGMEXEC
55376+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
55377+ pax_flags |= MF_PAX_SEGMEXEC;
55378+#endif
55379+
55380+#ifdef CONFIG_PAX_EMUTRAMP
55381+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
55382+ pax_flags |= MF_PAX_EMUTRAMP;
55383+#endif
55384+
55385+#ifdef CONFIG_PAX_MPROTECT
55386+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
55387+ pax_flags |= MF_PAX_MPROTECT;
55388+#endif
55389+
55390+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
55391+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
55392+ pax_flags |= MF_PAX_RANDMMAP;
55393+#endif
55394+
55395+ return pax_flags;
55396+}
55397+#endif
55398+
55399+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
55400+#ifdef CONFIG_PAX_SOFTMODE
55401+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
55402+{
55403+ unsigned long pax_flags = 0UL;
55404+
55405+#ifdef CONFIG_PAX_PAGEEXEC
55406+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
55407+ pax_flags |= MF_PAX_PAGEEXEC;
55408+#endif
55409+
55410+#ifdef CONFIG_PAX_SEGMEXEC
55411+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
55412+ pax_flags |= MF_PAX_SEGMEXEC;
55413+#endif
55414+
55415+#ifdef CONFIG_PAX_EMUTRAMP
55416+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
55417+ pax_flags |= MF_PAX_EMUTRAMP;
55418+#endif
55419+
55420+#ifdef CONFIG_PAX_MPROTECT
55421+ if (pax_flags_softmode & MF_PAX_MPROTECT)
55422+ pax_flags |= MF_PAX_MPROTECT;
55423+#endif
55424+
55425+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
55426+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
55427+ pax_flags |= MF_PAX_RANDMMAP;
55428+#endif
55429+
55430+ return pax_flags;
55431+}
55432+#endif
55433+
55434+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
55435+{
55436+ unsigned long pax_flags = 0UL;
55437+
55438+#ifdef CONFIG_PAX_PAGEEXEC
55439+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
55440+ pax_flags |= MF_PAX_PAGEEXEC;
55441+#endif
55442+
55443+#ifdef CONFIG_PAX_SEGMEXEC
55444+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
55445+ pax_flags |= MF_PAX_SEGMEXEC;
55446+#endif
55447+
55448+#ifdef CONFIG_PAX_EMUTRAMP
55449+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
55450+ pax_flags |= MF_PAX_EMUTRAMP;
55451+#endif
55452+
55453+#ifdef CONFIG_PAX_MPROTECT
55454+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
55455+ pax_flags |= MF_PAX_MPROTECT;
55456+#endif
55457+
55458+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
55459+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
55460+ pax_flags |= MF_PAX_RANDMMAP;
55461+#endif
55462+
55463+ return pax_flags;
55464+}
55465+#endif
55466+
55467+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55468+static unsigned long pax_parse_defaults(void)
55469+{
55470+ unsigned long pax_flags = 0UL;
55471+
55472+#ifdef CONFIG_PAX_SOFTMODE
55473+ if (pax_softmode)
55474+ return pax_flags;
55475+#endif
55476+
55477+#ifdef CONFIG_PAX_PAGEEXEC
55478+ pax_flags |= MF_PAX_PAGEEXEC;
55479+#endif
55480+
55481+#ifdef CONFIG_PAX_SEGMEXEC
55482+ pax_flags |= MF_PAX_SEGMEXEC;
55483+#endif
55484+
55485+#ifdef CONFIG_PAX_MPROTECT
55486+ pax_flags |= MF_PAX_MPROTECT;
55487+#endif
55488+
55489+#ifdef CONFIG_PAX_RANDMMAP
55490+ if (randomize_va_space)
55491+ pax_flags |= MF_PAX_RANDMMAP;
55492+#endif
55493+
55494+ return pax_flags;
55495+}
55496+
55497+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
55498+{
55499+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
55500+
55501+#ifdef CONFIG_PAX_EI_PAX
55502+
55503+#ifdef CONFIG_PAX_SOFTMODE
55504+ if (pax_softmode)
55505+ return pax_flags;
55506+#endif
55507+
55508+ pax_flags = 0UL;
55509+
55510+#ifdef CONFIG_PAX_PAGEEXEC
55511+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
55512+ pax_flags |= MF_PAX_PAGEEXEC;
55513+#endif
55514+
55515+#ifdef CONFIG_PAX_SEGMEXEC
55516+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
55517+ pax_flags |= MF_PAX_SEGMEXEC;
55518+#endif
55519+
55520+#ifdef CONFIG_PAX_EMUTRAMP
55521+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
55522+ pax_flags |= MF_PAX_EMUTRAMP;
55523+#endif
55524+
55525+#ifdef CONFIG_PAX_MPROTECT
55526+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
55527+ pax_flags |= MF_PAX_MPROTECT;
55528+#endif
55529+
55530+#ifdef CONFIG_PAX_ASLR
55531+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
55532+ pax_flags |= MF_PAX_RANDMMAP;
55533+#endif
55534+
55535+#endif
55536+
55537+ return pax_flags;
55538+
55539+}
55540+
55541+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
55542+{
55543+
55544+#ifdef CONFIG_PAX_PT_PAX_FLAGS
55545+ unsigned long i;
55546+
55547+ for (i = 0UL; i < elf_ex->e_phnum; i++)
55548+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
55549+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
55550+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
55551+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
55552+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
55553+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
55554+ return PAX_PARSE_FLAGS_FALLBACK;
55555+
55556+#ifdef CONFIG_PAX_SOFTMODE
55557+ if (pax_softmode)
55558+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
55559+ else
55560+#endif
55561+
55562+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
55563+ break;
55564+ }
55565+#endif
55566+
55567+ return PAX_PARSE_FLAGS_FALLBACK;
55568+}
55569+
55570+static unsigned long pax_parse_xattr_pax(struct file * const file)
55571+{
55572+
55573+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
55574+ ssize_t xattr_size, i;
55575+ unsigned char xattr_value[sizeof("pemrs") - 1];
55576+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
55577+
55578+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
55579+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
55580+ return PAX_PARSE_FLAGS_FALLBACK;
55581+
55582+ for (i = 0; i < xattr_size; i++)
55583+ switch (xattr_value[i]) {
55584+ default:
55585+ return PAX_PARSE_FLAGS_FALLBACK;
55586+
55587+#define parse_flag(option1, option2, flag) \
55588+ case option1: \
55589+ if (pax_flags_hardmode & MF_PAX_##flag) \
55590+ return PAX_PARSE_FLAGS_FALLBACK;\
55591+ pax_flags_hardmode |= MF_PAX_##flag; \
55592+ break; \
55593+ case option2: \
55594+ if (pax_flags_softmode & MF_PAX_##flag) \
55595+ return PAX_PARSE_FLAGS_FALLBACK;\
55596+ pax_flags_softmode |= MF_PAX_##flag; \
55597+ break;
55598+
55599+ parse_flag('p', 'P', PAGEEXEC);
55600+ parse_flag('e', 'E', EMUTRAMP);
55601+ parse_flag('m', 'M', MPROTECT);
55602+ parse_flag('r', 'R', RANDMMAP);
55603+ parse_flag('s', 'S', SEGMEXEC);
55604+
55605+#undef parse_flag
55606+ }
55607+
55608+ if (pax_flags_hardmode & pax_flags_softmode)
55609+ return PAX_PARSE_FLAGS_FALLBACK;
55610+
55611+#ifdef CONFIG_PAX_SOFTMODE
55612+ if (pax_softmode)
55613+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
55614+ else
55615+#endif
55616+
55617+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
55618+#else
55619+ return PAX_PARSE_FLAGS_FALLBACK;
55620+#endif
55621+
55622+}
55623+
55624+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
55625+{
55626+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
55627+
55628+ pax_flags = pax_parse_defaults();
55629+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
55630+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
55631+ xattr_pax_flags = pax_parse_xattr_pax(file);
55632+
55633+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
55634+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
55635+ pt_pax_flags != xattr_pax_flags)
55636+ return -EINVAL;
55637+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
55638+ pax_flags = xattr_pax_flags;
55639+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
55640+ pax_flags = pt_pax_flags;
55641+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
55642+ pax_flags = ei_pax_flags;
55643+
55644+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
55645+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
55646+ if ((__supported_pte_mask & _PAGE_NX))
55647+ pax_flags &= ~MF_PAX_SEGMEXEC;
55648+ else
55649+ pax_flags &= ~MF_PAX_PAGEEXEC;
55650+ }
55651+#endif
55652+
55653+ if (0 > pax_check_flags(&pax_flags))
55654+ return -EINVAL;
55655+
55656+ current->mm->pax_flags = pax_flags;
55657+ return 0;
55658+}
55659+#endif
55660+
55661 /*
55662 * These are the functions used to load ELF style executables and shared
55663 * libraries. There is no binary dependent code anywhere else.
55664@@ -554,6 +915,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
55665 {
55666 unsigned int random_variable = 0;
55667
55668+#ifdef CONFIG_PAX_RANDUSTACK
55669+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
55670+ return stack_top - current->mm->delta_stack;
55671+#endif
55672+
55673 if ((current->flags & PF_RANDOMIZE) &&
55674 !(current->personality & ADDR_NO_RANDOMIZE)) {
55675 random_variable = get_random_int() & STACK_RND_MASK;
55676@@ -572,7 +938,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
55677 unsigned long load_addr = 0, load_bias = 0;
55678 int load_addr_set = 0;
55679 char * elf_interpreter = NULL;
55680- unsigned long error;
55681+ unsigned long error = 0;
55682 struct elf_phdr *elf_ppnt, *elf_phdata;
55683 unsigned long elf_bss, elf_brk;
55684 int retval, i;
55685@@ -582,12 +948,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
55686 unsigned long start_code, end_code, start_data, end_data;
55687 unsigned long reloc_func_desc __maybe_unused = 0;
55688 int executable_stack = EXSTACK_DEFAULT;
55689- unsigned long def_flags = 0;
55690 struct pt_regs *regs = current_pt_regs();
55691 struct {
55692 struct elfhdr elf_ex;
55693 struct elfhdr interp_elf_ex;
55694 } *loc;
55695+ unsigned long pax_task_size;
55696
55697 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
55698 if (!loc) {
55699@@ -723,11 +1089,82 @@ static int load_elf_binary(struct linux_binprm *bprm)
55700 goto out_free_dentry;
55701
55702 /* OK, This is the point of no return */
55703- current->mm->def_flags = def_flags;
55704+ current->mm->def_flags = 0;
55705
55706 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
55707 may depend on the personality. */
55708 SET_PERSONALITY(loc->elf_ex);
55709+
55710+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55711+ current->mm->pax_flags = 0UL;
55712+#endif
55713+
55714+#ifdef CONFIG_PAX_DLRESOLVE
55715+ current->mm->call_dl_resolve = 0UL;
55716+#endif
55717+
55718+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
55719+ current->mm->call_syscall = 0UL;
55720+#endif
55721+
55722+#ifdef CONFIG_PAX_ASLR
55723+ current->mm->delta_mmap = 0UL;
55724+ current->mm->delta_stack = 0UL;
55725+#endif
55726+
55727+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55728+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
55729+ send_sig(SIGKILL, current, 0);
55730+ goto out_free_dentry;
55731+ }
55732+#endif
55733+
55734+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
55735+ pax_set_initial_flags(bprm);
55736+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
55737+ if (pax_set_initial_flags_func)
55738+ (pax_set_initial_flags_func)(bprm);
55739+#endif
55740+
55741+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
55742+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
55743+ current->mm->context.user_cs_limit = PAGE_SIZE;
55744+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
55745+ }
55746+#endif
55747+
55748+#ifdef CONFIG_PAX_SEGMEXEC
55749+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
55750+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
55751+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
55752+ pax_task_size = SEGMEXEC_TASK_SIZE;
55753+ current->mm->def_flags |= VM_NOHUGEPAGE;
55754+ } else
55755+#endif
55756+
55757+ pax_task_size = TASK_SIZE;
55758+
55759+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
55760+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
55761+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
55762+ put_cpu();
55763+ }
55764+#endif
55765+
55766+#ifdef CONFIG_PAX_ASLR
55767+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
55768+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
55769+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
55770+ }
55771+#endif
55772+
55773+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
55774+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
55775+ executable_stack = EXSTACK_DISABLE_X;
55776+ current->personality &= ~READ_IMPLIES_EXEC;
55777+ } else
55778+#endif
55779+
55780 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
55781 current->personality |= READ_IMPLIES_EXEC;
55782
55783@@ -817,6 +1254,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
55784 #else
55785 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
55786 #endif
55787+
55788+#ifdef CONFIG_PAX_RANDMMAP
55789+ /* PaX: randomize base address at the default exe base if requested */
55790+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
55791+#ifdef CONFIG_SPARC64
55792+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
55793+#else
55794+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
55795+#endif
55796+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
55797+ elf_flags |= MAP_FIXED;
55798+ }
55799+#endif
55800+
55801 }
55802
55803 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
55804@@ -849,9 +1300,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
55805 * allowed task size. Note that p_filesz must always be
55806 * <= p_memsz so it is only necessary to check p_memsz.
55807 */
55808- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
55809- elf_ppnt->p_memsz > TASK_SIZE ||
55810- TASK_SIZE - elf_ppnt->p_memsz < k) {
55811+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
55812+ elf_ppnt->p_memsz > pax_task_size ||
55813+ pax_task_size - elf_ppnt->p_memsz < k) {
55814 /* set_brk can never work. Avoid overflows. */
55815 send_sig(SIGKILL, current, 0);
55816 retval = -EINVAL;
55817@@ -890,17 +1341,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
55818 goto out_free_dentry;
55819 }
55820 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
55821- send_sig(SIGSEGV, current, 0);
55822- retval = -EFAULT; /* Nobody gets to see this, but.. */
55823- goto out_free_dentry;
55824+ /*
55825+ * This bss-zeroing can fail if the ELF
55826+ * file specifies odd protections. So
55827+ * we don't check the return value
55828+ */
55829 }
55830
55831+#ifdef CONFIG_PAX_RANDMMAP
55832+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
55833+ unsigned long start, size, flags;
55834+ vm_flags_t vm_flags;
55835+
55836+ start = ELF_PAGEALIGN(elf_brk);
55837+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
55838+ flags = MAP_FIXED | MAP_PRIVATE;
55839+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
55840+
55841+ down_write(&current->mm->mmap_sem);
55842+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
55843+ retval = -ENOMEM;
55844+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
55845+// if (current->personality & ADDR_NO_RANDOMIZE)
55846+// vm_flags |= VM_READ | VM_MAYREAD;
55847+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
55848+ retval = IS_ERR_VALUE(start) ? start : 0;
55849+ }
55850+ up_write(&current->mm->mmap_sem);
55851+ if (retval == 0)
55852+ retval = set_brk(start + size, start + size + PAGE_SIZE);
55853+ if (retval < 0) {
55854+ send_sig(SIGKILL, current, 0);
55855+ goto out_free_dentry;
55856+ }
55857+ }
55858+#endif
55859+
55860 if (elf_interpreter) {
55861- unsigned long interp_map_addr = 0;
55862-
55863 elf_entry = load_elf_interp(&loc->interp_elf_ex,
55864 interpreter,
55865- &interp_map_addr,
55866 load_bias);
55867 if (!IS_ERR((void *)elf_entry)) {
55868 /*
55869@@ -1122,7 +1601,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
55870 * Decide what to dump of a segment, part, all or none.
55871 */
55872 static unsigned long vma_dump_size(struct vm_area_struct *vma,
55873- unsigned long mm_flags)
55874+ unsigned long mm_flags, long signr)
55875 {
55876 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
55877
55878@@ -1160,7 +1639,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
55879 if (vma->vm_file == NULL)
55880 return 0;
55881
55882- if (FILTER(MAPPED_PRIVATE))
55883+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
55884 goto whole;
55885
55886 /*
55887@@ -1367,9 +1846,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
55888 {
55889 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
55890 int i = 0;
55891- do
55892+ do {
55893 i += 2;
55894- while (auxv[i - 2] != AT_NULL);
55895+ } while (auxv[i - 2] != AT_NULL);
55896 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
55897 }
55898
55899@@ -1378,7 +1857,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
55900 {
55901 mm_segment_t old_fs = get_fs();
55902 set_fs(KERNEL_DS);
55903- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
55904+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
55905 set_fs(old_fs);
55906 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
55907 }
55908@@ -2002,14 +2481,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
55909 }
55910
55911 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
55912- unsigned long mm_flags)
55913+ struct coredump_params *cprm)
55914 {
55915 struct vm_area_struct *vma;
55916 size_t size = 0;
55917
55918 for (vma = first_vma(current, gate_vma); vma != NULL;
55919 vma = next_vma(vma, gate_vma))
55920- size += vma_dump_size(vma, mm_flags);
55921+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
55922 return size;
55923 }
55924
55925@@ -2100,7 +2579,7 @@ static int elf_core_dump(struct coredump_params *cprm)
55926
55927 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
55928
55929- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
55930+ offset += elf_core_vma_data_size(gate_vma, cprm);
55931 offset += elf_core_extra_data_size();
55932 e_shoff = offset;
55933
55934@@ -2128,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
55935 phdr.p_offset = offset;
55936 phdr.p_vaddr = vma->vm_start;
55937 phdr.p_paddr = 0;
55938- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
55939+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
55940 phdr.p_memsz = vma->vm_end - vma->vm_start;
55941 offset += phdr.p_filesz;
55942 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
55943@@ -2161,7 +2640,7 @@ static int elf_core_dump(struct coredump_params *cprm)
55944 unsigned long addr;
55945 unsigned long end;
55946
55947- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
55948+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
55949
55950 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
55951 struct page *page;
55952@@ -2202,6 +2681,167 @@ out:
55953
55954 #endif /* CONFIG_ELF_CORE */
55955
55956+#ifdef CONFIG_PAX_MPROTECT
55957+/* PaX: non-PIC ELF libraries need relocations on their executable segments
55958+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
55959+ * we'll remove VM_MAYWRITE for good on RELRO segments.
55960+ *
55961+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
55962+ * basis because we want to allow the common case and not the special ones.
55963+ */
55964+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
55965+{
55966+ struct elfhdr elf_h;
55967+ struct elf_phdr elf_p;
55968+ unsigned long i;
55969+ unsigned long oldflags;
55970+ bool is_textrel_rw, is_textrel_rx, is_relro;
55971+
55972+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
55973+ return;
55974+
55975+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
55976+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
55977+
55978+#ifdef CONFIG_PAX_ELFRELOCS
55979+ /* possible TEXTREL */
55980+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
55981+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
55982+#else
55983+ is_textrel_rw = false;
55984+ is_textrel_rx = false;
55985+#endif
55986+
55987+ /* possible RELRO */
55988+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
55989+
55990+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
55991+ return;
55992+
55993+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
55994+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
55995+
55996+#ifdef CONFIG_PAX_ETEXECRELOCS
55997+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
55998+#else
55999+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
56000+#endif
56001+
56002+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
56003+ !elf_check_arch(&elf_h) ||
56004+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
56005+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
56006+ return;
56007+
56008+ for (i = 0UL; i < elf_h.e_phnum; i++) {
56009+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
56010+ return;
56011+ switch (elf_p.p_type) {
56012+ case PT_DYNAMIC:
56013+ if (!is_textrel_rw && !is_textrel_rx)
56014+ continue;
56015+ i = 0UL;
56016+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
56017+ elf_dyn dyn;
56018+
56019+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
56020+ break;
56021+ if (dyn.d_tag == DT_NULL)
56022+ break;
56023+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
56024+ gr_log_textrel(vma);
56025+ if (is_textrel_rw)
56026+ vma->vm_flags |= VM_MAYWRITE;
56027+ else
56028+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
56029+ vma->vm_flags &= ~VM_MAYWRITE;
56030+ break;
56031+ }
56032+ i++;
56033+ }
56034+ is_textrel_rw = false;
56035+ is_textrel_rx = false;
56036+ continue;
56037+
56038+ case PT_GNU_RELRO:
56039+ if (!is_relro)
56040+ continue;
56041+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
56042+ vma->vm_flags &= ~VM_MAYWRITE;
56043+ is_relro = false;
56044+ continue;
56045+
56046+#ifdef CONFIG_PAX_PT_PAX_FLAGS
56047+ case PT_PAX_FLAGS: {
56048+ const char *msg_mprotect = "", *msg_emutramp = "";
56049+ char *buffer_lib, *buffer_exe;
56050+
56051+ if (elf_p.p_flags & PF_NOMPROTECT)
56052+ msg_mprotect = "MPROTECT disabled";
56053+
56054+#ifdef CONFIG_PAX_EMUTRAMP
56055+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
56056+ msg_emutramp = "EMUTRAMP enabled";
56057+#endif
56058+
56059+ if (!msg_mprotect[0] && !msg_emutramp[0])
56060+ continue;
56061+
56062+ if (!printk_ratelimit())
56063+ continue;
56064+
56065+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
56066+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
56067+ if (buffer_lib && buffer_exe) {
56068+ char *path_lib, *path_exe;
56069+
56070+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
56071+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
56072+
56073+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
56074+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
56075+
56076+ }
56077+ free_page((unsigned long)buffer_exe);
56078+ free_page((unsigned long)buffer_lib);
56079+ continue;
56080+ }
56081+#endif
56082+
56083+ }
56084+ }
56085+}
56086+#endif
56087+
56088+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56089+
56090+extern int grsec_enable_log_rwxmaps;
56091+
56092+static void elf_handle_mmap(struct file *file)
56093+{
56094+ struct elfhdr elf_h;
56095+ struct elf_phdr elf_p;
56096+ unsigned long i;
56097+
56098+ if (!grsec_enable_log_rwxmaps)
56099+ return;
56100+
56101+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
56102+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
56103+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
56104+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
56105+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
56106+ return;
56107+
56108+ for (i = 0UL; i < elf_h.e_phnum; i++) {
56109+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
56110+ return;
56111+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
56112+ gr_log_ptgnustack(file);
56113+ }
56114+}
56115+#endif
56116+
56117 static int __init init_elf_binfmt(void)
56118 {
56119 register_binfmt(&elf_format);
56120diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
56121index d50bbe5..af3b649 100644
56122--- a/fs/binfmt_flat.c
56123+++ b/fs/binfmt_flat.c
56124@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
56125 realdatastart = (unsigned long) -ENOMEM;
56126 printk("Unable to allocate RAM for process data, errno %d\n",
56127 (int)-realdatastart);
56128+ down_write(&current->mm->mmap_sem);
56129 vm_munmap(textpos, text_len);
56130+ up_write(&current->mm->mmap_sem);
56131 ret = realdatastart;
56132 goto err;
56133 }
56134@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
56135 }
56136 if (IS_ERR_VALUE(result)) {
56137 printk("Unable to read data+bss, errno %d\n", (int)-result);
56138+ down_write(&current->mm->mmap_sem);
56139 vm_munmap(textpos, text_len);
56140 vm_munmap(realdatastart, len);
56141+ up_write(&current->mm->mmap_sem);
56142 ret = result;
56143 goto err;
56144 }
56145@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
56146 }
56147 if (IS_ERR_VALUE(result)) {
56148 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
56149+ down_write(&current->mm->mmap_sem);
56150 vm_munmap(textpos, text_len + data_len + extra +
56151 MAX_SHARED_LIBS * sizeof(unsigned long));
56152+ up_write(&current->mm->mmap_sem);
56153 ret = result;
56154 goto err;
56155 }
56156diff --git a/fs/bio.c b/fs/bio.c
56157index 33d79a4..c3c9893 100644
56158--- a/fs/bio.c
56159+++ b/fs/bio.c
56160@@ -1106,7 +1106,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
56161 /*
56162 * Overflow, abort
56163 */
56164- if (end < start)
56165+ if (end < start || end - start > INT_MAX - nr_pages)
56166 return ERR_PTR(-EINVAL);
56167
56168 nr_pages += end - start;
56169@@ -1240,7 +1240,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
56170 /*
56171 * Overflow, abort
56172 */
56173- if (end < start)
56174+ if (end < start || end - start > INT_MAX - nr_pages)
56175 return ERR_PTR(-EINVAL);
56176
56177 nr_pages += end - start;
56178@@ -1502,7 +1502,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
56179 const int read = bio_data_dir(bio) == READ;
56180 struct bio_map_data *bmd = bio->bi_private;
56181 int i;
56182- char *p = bmd->sgvecs[0].iov_base;
56183+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
56184
56185 bio_for_each_segment_all(bvec, bio, i) {
56186 char *addr = page_address(bvec->bv_page);
56187diff --git a/fs/block_dev.c b/fs/block_dev.c
56188index 1e86823..8e34695 100644
56189--- a/fs/block_dev.c
56190+++ b/fs/block_dev.c
56191@@ -637,7 +637,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
56192 else if (bdev->bd_contains == bdev)
56193 return true; /* is a whole device which isn't held */
56194
56195- else if (whole->bd_holder == bd_may_claim)
56196+ else if (whole->bd_holder == (void *)bd_may_claim)
56197 return true; /* is a partition of a device that is being partitioned */
56198 else if (whole->bd_holder != NULL)
56199 return false; /* is a partition of a held device */
56200diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
56201index 316136b..e7a3a50 100644
56202--- a/fs/btrfs/ctree.c
56203+++ b/fs/btrfs/ctree.c
56204@@ -1028,9 +1028,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
56205 free_extent_buffer(buf);
56206 add_root_to_dirty_list(root);
56207 } else {
56208- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
56209- parent_start = parent->start;
56210- else
56211+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
56212+ if (parent)
56213+ parent_start = parent->start;
56214+ else
56215+ parent_start = 0;
56216+ } else
56217 parent_start = 0;
56218
56219 WARN_ON(trans->transid != btrfs_header_generation(parent));
56220diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
56221index 8d292fb..bc205c2 100644
56222--- a/fs/btrfs/delayed-inode.c
56223+++ b/fs/btrfs/delayed-inode.c
56224@@ -459,7 +459,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
56225
56226 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
56227 {
56228- int seq = atomic_inc_return(&delayed_root->items_seq);
56229+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
56230 if ((atomic_dec_return(&delayed_root->items) <
56231 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
56232 waitqueue_active(&delayed_root->wait))
56233@@ -1379,7 +1379,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
56234 static int refs_newer(struct btrfs_delayed_root *delayed_root,
56235 int seq, int count)
56236 {
56237- int val = atomic_read(&delayed_root->items_seq);
56238+ int val = atomic_read_unchecked(&delayed_root->items_seq);
56239
56240 if (val < seq || val >= seq + count)
56241 return 1;
56242@@ -1396,7 +1396,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
56243 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
56244 return;
56245
56246- seq = atomic_read(&delayed_root->items_seq);
56247+ seq = atomic_read_unchecked(&delayed_root->items_seq);
56248
56249 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
56250 int ret;
56251diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
56252index a4b38f9..f86a509 100644
56253--- a/fs/btrfs/delayed-inode.h
56254+++ b/fs/btrfs/delayed-inode.h
56255@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
56256 */
56257 struct list_head prepare_list;
56258 atomic_t items; /* for delayed items */
56259- atomic_t items_seq; /* for delayed items */
56260+ atomic_unchecked_t items_seq; /* for delayed items */
56261 int nodes; /* for delayed nodes */
56262 wait_queue_head_t wait;
56263 };
56264@@ -87,7 +87,7 @@ static inline void btrfs_init_delayed_root(
56265 struct btrfs_delayed_root *delayed_root)
56266 {
56267 atomic_set(&delayed_root->items, 0);
56268- atomic_set(&delayed_root->items_seq, 0);
56269+ atomic_set_unchecked(&delayed_root->items_seq, 0);
56270 delayed_root->nodes = 0;
56271 spin_lock_init(&delayed_root->lock);
56272 init_waitqueue_head(&delayed_root->wait);
56273diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
56274index 9f831bb..14afde5 100644
56275--- a/fs/btrfs/ioctl.c
56276+++ b/fs/btrfs/ioctl.c
56277@@ -3457,9 +3457,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
56278 for (i = 0; i < num_types; i++) {
56279 struct btrfs_space_info *tmp;
56280
56281+ /* Don't copy in more than we allocated */
56282 if (!slot_count)
56283 break;
56284
56285+ slot_count--;
56286+
56287 info = NULL;
56288 rcu_read_lock();
56289 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
56290@@ -3481,10 +3484,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
56291 memcpy(dest, &space, sizeof(space));
56292 dest++;
56293 space_args.total_spaces++;
56294- slot_count--;
56295 }
56296- if (!slot_count)
56297- break;
56298 }
56299 up_read(&info->groups_sem);
56300 }
56301diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
56302index d71a11d..384e2c4 100644
56303--- a/fs/btrfs/super.c
56304+++ b/fs/btrfs/super.c
56305@@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
56306 function, line, errstr);
56307 return;
56308 }
56309- ACCESS_ONCE(trans->transaction->aborted) = errno;
56310+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
56311 /* Wake up anybody who may be waiting on this transaction */
56312 wake_up(&root->fs_info->transaction_wait);
56313 wake_up(&root->fs_info->transaction_blocked_wait);
56314diff --git a/fs/buffer.c b/fs/buffer.c
56315index 6024877..7bd000a 100644
56316--- a/fs/buffer.c
56317+++ b/fs/buffer.c
56318@@ -3426,7 +3426,7 @@ void __init buffer_init(void)
56319 bh_cachep = kmem_cache_create("buffer_head",
56320 sizeof(struct buffer_head), 0,
56321 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
56322- SLAB_MEM_SPREAD),
56323+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
56324 NULL);
56325
56326 /*
56327diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
56328index 622f469..e8d2d55 100644
56329--- a/fs/cachefiles/bind.c
56330+++ b/fs/cachefiles/bind.c
56331@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
56332 args);
56333
56334 /* start by checking things over */
56335- ASSERT(cache->fstop_percent >= 0 &&
56336- cache->fstop_percent < cache->fcull_percent &&
56337+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
56338 cache->fcull_percent < cache->frun_percent &&
56339 cache->frun_percent < 100);
56340
56341- ASSERT(cache->bstop_percent >= 0 &&
56342- cache->bstop_percent < cache->bcull_percent &&
56343+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
56344 cache->bcull_percent < cache->brun_percent &&
56345 cache->brun_percent < 100);
56346
56347diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
56348index 0a1467b..6a53245 100644
56349--- a/fs/cachefiles/daemon.c
56350+++ b/fs/cachefiles/daemon.c
56351@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
56352 if (n > buflen)
56353 return -EMSGSIZE;
56354
56355- if (copy_to_user(_buffer, buffer, n) != 0)
56356+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
56357 return -EFAULT;
56358
56359 return n;
56360@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
56361 if (test_bit(CACHEFILES_DEAD, &cache->flags))
56362 return -EIO;
56363
56364- if (datalen < 0 || datalen > PAGE_SIZE - 1)
56365+ if (datalen > PAGE_SIZE - 1)
56366 return -EOPNOTSUPP;
56367
56368 /* drag the command string into the kernel so we can parse it */
56369@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
56370 if (args[0] != '%' || args[1] != '\0')
56371 return -EINVAL;
56372
56373- if (fstop < 0 || fstop >= cache->fcull_percent)
56374+ if (fstop >= cache->fcull_percent)
56375 return cachefiles_daemon_range_error(cache, args);
56376
56377 cache->fstop_percent = fstop;
56378@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
56379 if (args[0] != '%' || args[1] != '\0')
56380 return -EINVAL;
56381
56382- if (bstop < 0 || bstop >= cache->bcull_percent)
56383+ if (bstop >= cache->bcull_percent)
56384 return cachefiles_daemon_range_error(cache, args);
56385
56386 cache->bstop_percent = bstop;
56387diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
56388index 5349473..d6c0b93 100644
56389--- a/fs/cachefiles/internal.h
56390+++ b/fs/cachefiles/internal.h
56391@@ -59,7 +59,7 @@ struct cachefiles_cache {
56392 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
56393 struct rb_root active_nodes; /* active nodes (can't be culled) */
56394 rwlock_t active_lock; /* lock for active_nodes */
56395- atomic_t gravecounter; /* graveyard uniquifier */
56396+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
56397 unsigned frun_percent; /* when to stop culling (% files) */
56398 unsigned fcull_percent; /* when to start culling (% files) */
56399 unsigned fstop_percent; /* when to stop allocating (% files) */
56400@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
56401 * proc.c
56402 */
56403 #ifdef CONFIG_CACHEFILES_HISTOGRAM
56404-extern atomic_t cachefiles_lookup_histogram[HZ];
56405-extern atomic_t cachefiles_mkdir_histogram[HZ];
56406-extern atomic_t cachefiles_create_histogram[HZ];
56407+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
56408+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
56409+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
56410
56411 extern int __init cachefiles_proc_init(void);
56412 extern void cachefiles_proc_cleanup(void);
56413 static inline
56414-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
56415+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
56416 {
56417 unsigned long jif = jiffies - start_jif;
56418 if (jif >= HZ)
56419 jif = HZ - 1;
56420- atomic_inc(&histogram[jif]);
56421+ atomic_inc_unchecked(&histogram[jif]);
56422 }
56423
56424 #else
56425diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
56426index ca65f39..48921e3 100644
56427--- a/fs/cachefiles/namei.c
56428+++ b/fs/cachefiles/namei.c
56429@@ -317,7 +317,7 @@ try_again:
56430 /* first step is to make up a grave dentry in the graveyard */
56431 sprintf(nbuffer, "%08x%08x",
56432 (uint32_t) get_seconds(),
56433- (uint32_t) atomic_inc_return(&cache->gravecounter));
56434+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
56435
56436 /* do the multiway lock magic */
56437 trap = lock_rename(cache->graveyard, dir);
56438diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
56439index eccd339..4c1d995 100644
56440--- a/fs/cachefiles/proc.c
56441+++ b/fs/cachefiles/proc.c
56442@@ -14,9 +14,9 @@
56443 #include <linux/seq_file.h>
56444 #include "internal.h"
56445
56446-atomic_t cachefiles_lookup_histogram[HZ];
56447-atomic_t cachefiles_mkdir_histogram[HZ];
56448-atomic_t cachefiles_create_histogram[HZ];
56449+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
56450+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
56451+atomic_unchecked_t cachefiles_create_histogram[HZ];
56452
56453 /*
56454 * display the latency histogram
56455@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
56456 return 0;
56457 default:
56458 index = (unsigned long) v - 3;
56459- x = atomic_read(&cachefiles_lookup_histogram[index]);
56460- y = atomic_read(&cachefiles_mkdir_histogram[index]);
56461- z = atomic_read(&cachefiles_create_histogram[index]);
56462+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
56463+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
56464+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
56465 if (x == 0 && y == 0 && z == 0)
56466 return 0;
56467
56468diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
56469index ebaff36..7e3ea26 100644
56470--- a/fs/cachefiles/rdwr.c
56471+++ b/fs/cachefiles/rdwr.c
56472@@ -950,7 +950,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
56473 old_fs = get_fs();
56474 set_fs(KERNEL_DS);
56475 ret = file->f_op->write(
56476- file, (const void __user *) data, len, &pos);
56477+ file, (const void __force_user *) data, len, &pos);
56478 set_fs(old_fs);
56479 kunmap(page);
56480 file_end_write(file);
56481diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
56482index 2a0bcae..34ec24e 100644
56483--- a/fs/ceph/dir.c
56484+++ b/fs/ceph/dir.c
56485@@ -240,7 +240,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
56486 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
56487 struct ceph_mds_client *mdsc = fsc->mdsc;
56488 unsigned frag = fpos_frag(ctx->pos);
56489- int off = fpos_off(ctx->pos);
56490+ unsigned int off = fpos_off(ctx->pos);
56491 int err;
56492 u32 ftype;
56493 struct ceph_mds_reply_info_parsed *rinfo;
56494diff --git a/fs/ceph/super.c b/fs/ceph/super.c
56495index 6a0951e..03fac6d 100644
56496--- a/fs/ceph/super.c
56497+++ b/fs/ceph/super.c
56498@@ -870,7 +870,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
56499 /*
56500 * construct our own bdi so we can control readahead, etc.
56501 */
56502-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
56503+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
56504
56505 static int ceph_register_bdi(struct super_block *sb,
56506 struct ceph_fs_client *fsc)
56507@@ -887,7 +887,7 @@ static int ceph_register_bdi(struct super_block *sb,
56508 default_backing_dev_info.ra_pages;
56509
56510 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
56511- atomic_long_inc_return(&bdi_seq));
56512+ atomic_long_inc_return_unchecked(&bdi_seq));
56513 if (!err)
56514 sb->s_bdi = &fsc->backing_dev_info;
56515 return err;
56516diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
56517index f3ac415..3d2420c 100644
56518--- a/fs/cifs/cifs_debug.c
56519+++ b/fs/cifs/cifs_debug.c
56520@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
56521
56522 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
56523 #ifdef CONFIG_CIFS_STATS2
56524- atomic_set(&totBufAllocCount, 0);
56525- atomic_set(&totSmBufAllocCount, 0);
56526+ atomic_set_unchecked(&totBufAllocCount, 0);
56527+ atomic_set_unchecked(&totSmBufAllocCount, 0);
56528 #endif /* CONFIG_CIFS_STATS2 */
56529 spin_lock(&cifs_tcp_ses_lock);
56530 list_for_each(tmp1, &cifs_tcp_ses_list) {
56531@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
56532 tcon = list_entry(tmp3,
56533 struct cifs_tcon,
56534 tcon_list);
56535- atomic_set(&tcon->num_smbs_sent, 0);
56536+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
56537 if (server->ops->clear_stats)
56538 server->ops->clear_stats(tcon);
56539 }
56540@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
56541 smBufAllocCount.counter, cifs_min_small);
56542 #ifdef CONFIG_CIFS_STATS2
56543 seq_printf(m, "Total Large %d Small %d Allocations\n",
56544- atomic_read(&totBufAllocCount),
56545- atomic_read(&totSmBufAllocCount));
56546+ atomic_read_unchecked(&totBufAllocCount),
56547+ atomic_read_unchecked(&totSmBufAllocCount));
56548 #endif /* CONFIG_CIFS_STATS2 */
56549
56550 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
56551@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
56552 if (tcon->need_reconnect)
56553 seq_puts(m, "\tDISCONNECTED ");
56554 seq_printf(m, "\nSMBs: %d",
56555- atomic_read(&tcon->num_smbs_sent));
56556+ atomic_read_unchecked(&tcon->num_smbs_sent));
56557 if (server->ops->print_stats)
56558 server->ops->print_stats(m, tcon);
56559 }
56560diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
56561index 849f613..eae6dec 100644
56562--- a/fs/cifs/cifsfs.c
56563+++ b/fs/cifs/cifsfs.c
56564@@ -1056,7 +1056,7 @@ cifs_init_request_bufs(void)
56565 */
56566 cifs_req_cachep = kmem_cache_create("cifs_request",
56567 CIFSMaxBufSize + max_hdr_size, 0,
56568- SLAB_HWCACHE_ALIGN, NULL);
56569+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
56570 if (cifs_req_cachep == NULL)
56571 return -ENOMEM;
56572
56573@@ -1083,7 +1083,7 @@ cifs_init_request_bufs(void)
56574 efficient to alloc 1 per page off the slab compared to 17K (5page)
56575 alloc of large cifs buffers even when page debugging is on */
56576 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
56577- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
56578+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
56579 NULL);
56580 if (cifs_sm_req_cachep == NULL) {
56581 mempool_destroy(cifs_req_poolp);
56582@@ -1168,8 +1168,8 @@ init_cifs(void)
56583 atomic_set(&bufAllocCount, 0);
56584 atomic_set(&smBufAllocCount, 0);
56585 #ifdef CONFIG_CIFS_STATS2
56586- atomic_set(&totBufAllocCount, 0);
56587- atomic_set(&totSmBufAllocCount, 0);
56588+ atomic_set_unchecked(&totBufAllocCount, 0);
56589+ atomic_set_unchecked(&totSmBufAllocCount, 0);
56590 #endif /* CONFIG_CIFS_STATS2 */
56591
56592 atomic_set(&midCount, 0);
56593diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
56594index f918a99..bb300d5 100644
56595--- a/fs/cifs/cifsglob.h
56596+++ b/fs/cifs/cifsglob.h
56597@@ -787,35 +787,35 @@ struct cifs_tcon {
56598 __u16 Flags; /* optional support bits */
56599 enum statusEnum tidStatus;
56600 #ifdef CONFIG_CIFS_STATS
56601- atomic_t num_smbs_sent;
56602+ atomic_unchecked_t num_smbs_sent;
56603 union {
56604 struct {
56605- atomic_t num_writes;
56606- atomic_t num_reads;
56607- atomic_t num_flushes;
56608- atomic_t num_oplock_brks;
56609- atomic_t num_opens;
56610- atomic_t num_closes;
56611- atomic_t num_deletes;
56612- atomic_t num_mkdirs;
56613- atomic_t num_posixopens;
56614- atomic_t num_posixmkdirs;
56615- atomic_t num_rmdirs;
56616- atomic_t num_renames;
56617- atomic_t num_t2renames;
56618- atomic_t num_ffirst;
56619- atomic_t num_fnext;
56620- atomic_t num_fclose;
56621- atomic_t num_hardlinks;
56622- atomic_t num_symlinks;
56623- atomic_t num_locks;
56624- atomic_t num_acl_get;
56625- atomic_t num_acl_set;
56626+ atomic_unchecked_t num_writes;
56627+ atomic_unchecked_t num_reads;
56628+ atomic_unchecked_t num_flushes;
56629+ atomic_unchecked_t num_oplock_brks;
56630+ atomic_unchecked_t num_opens;
56631+ atomic_unchecked_t num_closes;
56632+ atomic_unchecked_t num_deletes;
56633+ atomic_unchecked_t num_mkdirs;
56634+ atomic_unchecked_t num_posixopens;
56635+ atomic_unchecked_t num_posixmkdirs;
56636+ atomic_unchecked_t num_rmdirs;
56637+ atomic_unchecked_t num_renames;
56638+ atomic_unchecked_t num_t2renames;
56639+ atomic_unchecked_t num_ffirst;
56640+ atomic_unchecked_t num_fnext;
56641+ atomic_unchecked_t num_fclose;
56642+ atomic_unchecked_t num_hardlinks;
56643+ atomic_unchecked_t num_symlinks;
56644+ atomic_unchecked_t num_locks;
56645+ atomic_unchecked_t num_acl_get;
56646+ atomic_unchecked_t num_acl_set;
56647 } cifs_stats;
56648 #ifdef CONFIG_CIFS_SMB2
56649 struct {
56650- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
56651- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
56652+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
56653+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
56654 } smb2_stats;
56655 #endif /* CONFIG_CIFS_SMB2 */
56656 } stats;
56657@@ -1145,7 +1145,7 @@ convert_delimiter(char *path, char delim)
56658 }
56659
56660 #ifdef CONFIG_CIFS_STATS
56661-#define cifs_stats_inc atomic_inc
56662+#define cifs_stats_inc atomic_inc_unchecked
56663
56664 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
56665 unsigned int bytes)
56666@@ -1511,8 +1511,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
56667 /* Various Debug counters */
56668 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
56669 #ifdef CONFIG_CIFS_STATS2
56670-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
56671-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
56672+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
56673+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
56674 #endif
56675 GLOBAL_EXTERN atomic_t smBufAllocCount;
56676 GLOBAL_EXTERN atomic_t midCount;
56677diff --git a/fs/cifs/file.c b/fs/cifs/file.c
56678index 5a5a872..92c3210 100644
56679--- a/fs/cifs/file.c
56680+++ b/fs/cifs/file.c
56681@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
56682 index = mapping->writeback_index; /* Start from prev offset */
56683 end = -1;
56684 } else {
56685- index = wbc->range_start >> PAGE_CACHE_SHIFT;
56686- end = wbc->range_end >> PAGE_CACHE_SHIFT;
56687- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
56688+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
56689 range_whole = true;
56690+ index = 0;
56691+ end = ULONG_MAX;
56692+ } else {
56693+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
56694+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
56695+ }
56696 scanned = true;
56697 }
56698 retry:
56699diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
56700index 2f9f379..43f8025 100644
56701--- a/fs/cifs/misc.c
56702+++ b/fs/cifs/misc.c
56703@@ -170,7 +170,7 @@ cifs_buf_get(void)
56704 memset(ret_buf, 0, buf_size + 3);
56705 atomic_inc(&bufAllocCount);
56706 #ifdef CONFIG_CIFS_STATS2
56707- atomic_inc(&totBufAllocCount);
56708+ atomic_inc_unchecked(&totBufAllocCount);
56709 #endif /* CONFIG_CIFS_STATS2 */
56710 }
56711
56712@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
56713 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
56714 atomic_inc(&smBufAllocCount);
56715 #ifdef CONFIG_CIFS_STATS2
56716- atomic_inc(&totSmBufAllocCount);
56717+ atomic_inc_unchecked(&totSmBufAllocCount);
56718 #endif /* CONFIG_CIFS_STATS2 */
56719
56720 }
56721diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
56722index 5f5ba0d..8d6ef7d 100644
56723--- a/fs/cifs/smb1ops.c
56724+++ b/fs/cifs/smb1ops.c
56725@@ -609,27 +609,27 @@ static void
56726 cifs_clear_stats(struct cifs_tcon *tcon)
56727 {
56728 #ifdef CONFIG_CIFS_STATS
56729- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
56730- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
56731- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
56732- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
56733- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
56734- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
56735- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
56736- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
56737- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
56738- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
56739- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
56740- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
56741- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
56742- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
56743- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
56744- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
56745- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
56746- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
56747- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
56748- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
56749- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
56750+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
56751+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
56752+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
56753+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
56754+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
56755+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
56756+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
56757+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
56758+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
56759+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
56760+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
56761+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
56762+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
56763+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
56764+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
56765+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
56766+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
56767+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
56768+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
56769+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
56770+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
56771 #endif
56772 }
56773
56774@@ -638,36 +638,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
56775 {
56776 #ifdef CONFIG_CIFS_STATS
56777 seq_printf(m, " Oplocks breaks: %d",
56778- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
56779+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
56780 seq_printf(m, "\nReads: %d Bytes: %llu",
56781- atomic_read(&tcon->stats.cifs_stats.num_reads),
56782+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
56783 (long long)(tcon->bytes_read));
56784 seq_printf(m, "\nWrites: %d Bytes: %llu",
56785- atomic_read(&tcon->stats.cifs_stats.num_writes),
56786+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
56787 (long long)(tcon->bytes_written));
56788 seq_printf(m, "\nFlushes: %d",
56789- atomic_read(&tcon->stats.cifs_stats.num_flushes));
56790+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
56791 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
56792- atomic_read(&tcon->stats.cifs_stats.num_locks),
56793- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
56794- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
56795+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
56796+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
56797+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
56798 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
56799- atomic_read(&tcon->stats.cifs_stats.num_opens),
56800- atomic_read(&tcon->stats.cifs_stats.num_closes),
56801- atomic_read(&tcon->stats.cifs_stats.num_deletes));
56802+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
56803+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
56804+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
56805 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
56806- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
56807- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
56808+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
56809+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
56810 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
56811- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
56812- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
56813+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
56814+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
56815 seq_printf(m, "\nRenames: %d T2 Renames %d",
56816- atomic_read(&tcon->stats.cifs_stats.num_renames),
56817- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
56818+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
56819+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
56820 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
56821- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
56822- atomic_read(&tcon->stats.cifs_stats.num_fnext),
56823- atomic_read(&tcon->stats.cifs_stats.num_fclose));
56824+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
56825+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
56826+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
56827 #endif
56828 }
56829
56830diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
56831index 757da3e..07bf1ed 100644
56832--- a/fs/cifs/smb2ops.c
56833+++ b/fs/cifs/smb2ops.c
56834@@ -370,8 +370,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
56835 #ifdef CONFIG_CIFS_STATS
56836 int i;
56837 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
56838- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
56839- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
56840+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
56841+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
56842 }
56843 #endif
56844 }
56845@@ -411,65 +411,65 @@ static void
56846 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
56847 {
56848 #ifdef CONFIG_CIFS_STATS
56849- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
56850- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
56851+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
56852+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
56853 seq_printf(m, "\nNegotiates: %d sent %d failed",
56854- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
56855- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
56856+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
56857+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
56858 seq_printf(m, "\nSessionSetups: %d sent %d failed",
56859- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
56860- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
56861+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
56862+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
56863 seq_printf(m, "\nLogoffs: %d sent %d failed",
56864- atomic_read(&sent[SMB2_LOGOFF_HE]),
56865- atomic_read(&failed[SMB2_LOGOFF_HE]));
56866+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
56867+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
56868 seq_printf(m, "\nTreeConnects: %d sent %d failed",
56869- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
56870- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
56871+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
56872+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
56873 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
56874- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
56875- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
56876+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
56877+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
56878 seq_printf(m, "\nCreates: %d sent %d failed",
56879- atomic_read(&sent[SMB2_CREATE_HE]),
56880- atomic_read(&failed[SMB2_CREATE_HE]));
56881+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
56882+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
56883 seq_printf(m, "\nCloses: %d sent %d failed",
56884- atomic_read(&sent[SMB2_CLOSE_HE]),
56885- atomic_read(&failed[SMB2_CLOSE_HE]));
56886+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
56887+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
56888 seq_printf(m, "\nFlushes: %d sent %d failed",
56889- atomic_read(&sent[SMB2_FLUSH_HE]),
56890- atomic_read(&failed[SMB2_FLUSH_HE]));
56891+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
56892+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
56893 seq_printf(m, "\nReads: %d sent %d failed",
56894- atomic_read(&sent[SMB2_READ_HE]),
56895- atomic_read(&failed[SMB2_READ_HE]));
56896+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
56897+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
56898 seq_printf(m, "\nWrites: %d sent %d failed",
56899- atomic_read(&sent[SMB2_WRITE_HE]),
56900- atomic_read(&failed[SMB2_WRITE_HE]));
56901+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
56902+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
56903 seq_printf(m, "\nLocks: %d sent %d failed",
56904- atomic_read(&sent[SMB2_LOCK_HE]),
56905- atomic_read(&failed[SMB2_LOCK_HE]));
56906+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
56907+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
56908 seq_printf(m, "\nIOCTLs: %d sent %d failed",
56909- atomic_read(&sent[SMB2_IOCTL_HE]),
56910- atomic_read(&failed[SMB2_IOCTL_HE]));
56911+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
56912+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
56913 seq_printf(m, "\nCancels: %d sent %d failed",
56914- atomic_read(&sent[SMB2_CANCEL_HE]),
56915- atomic_read(&failed[SMB2_CANCEL_HE]));
56916+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
56917+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
56918 seq_printf(m, "\nEchos: %d sent %d failed",
56919- atomic_read(&sent[SMB2_ECHO_HE]),
56920- atomic_read(&failed[SMB2_ECHO_HE]));
56921+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
56922+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
56923 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
56924- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
56925- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
56926+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
56927+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
56928 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
56929- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
56930- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
56931+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
56932+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
56933 seq_printf(m, "\nQueryInfos: %d sent %d failed",
56934- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
56935- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
56936+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
56937+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
56938 seq_printf(m, "\nSetInfos: %d sent %d failed",
56939- atomic_read(&sent[SMB2_SET_INFO_HE]),
56940- atomic_read(&failed[SMB2_SET_INFO_HE]));
56941+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
56942+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
56943 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
56944- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
56945- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
56946+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
56947+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
56948 #endif
56949 }
56950
56951diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
56952index 2013234..a720734 100644
56953--- a/fs/cifs/smb2pdu.c
56954+++ b/fs/cifs/smb2pdu.c
56955@@ -2091,8 +2091,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
56956 default:
56957 cifs_dbg(VFS, "info level %u isn't supported\n",
56958 srch_inf->info_level);
56959- rc = -EINVAL;
56960- goto qdir_exit;
56961+ return -EINVAL;
56962 }
56963
56964 req->FileIndex = cpu_to_le32(index);
56965diff --git a/fs/coda/cache.c b/fs/coda/cache.c
56966index 1da168c..8bc7ff6 100644
56967--- a/fs/coda/cache.c
56968+++ b/fs/coda/cache.c
56969@@ -24,7 +24,7 @@
56970 #include "coda_linux.h"
56971 #include "coda_cache.h"
56972
56973-static atomic_t permission_epoch = ATOMIC_INIT(0);
56974+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
56975
56976 /* replace or extend an acl cache hit */
56977 void coda_cache_enter(struct inode *inode, int mask)
56978@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
56979 struct coda_inode_info *cii = ITOC(inode);
56980
56981 spin_lock(&cii->c_lock);
56982- cii->c_cached_epoch = atomic_read(&permission_epoch);
56983+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
56984 if (!uid_eq(cii->c_uid, current_fsuid())) {
56985 cii->c_uid = current_fsuid();
56986 cii->c_cached_perm = mask;
56987@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
56988 {
56989 struct coda_inode_info *cii = ITOC(inode);
56990 spin_lock(&cii->c_lock);
56991- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
56992+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
56993 spin_unlock(&cii->c_lock);
56994 }
56995
56996 /* remove all acl caches */
56997 void coda_cache_clear_all(struct super_block *sb)
56998 {
56999- atomic_inc(&permission_epoch);
57000+ atomic_inc_unchecked(&permission_epoch);
57001 }
57002
57003
57004@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
57005 spin_lock(&cii->c_lock);
57006 hit = (mask & cii->c_cached_perm) == mask &&
57007 uid_eq(cii->c_uid, current_fsuid()) &&
57008- cii->c_cached_epoch == atomic_read(&permission_epoch);
57009+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
57010 spin_unlock(&cii->c_lock);
57011
57012 return hit;
57013diff --git a/fs/compat.c b/fs/compat.c
57014index 6af20de..fec3fbb 100644
57015--- a/fs/compat.c
57016+++ b/fs/compat.c
57017@@ -54,7 +54,7 @@
57018 #include <asm/ioctls.h>
57019 #include "internal.h"
57020
57021-int compat_log = 1;
57022+int compat_log = 0;
57023
57024 int compat_printk(const char *fmt, ...)
57025 {
57026@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
57027
57028 set_fs(KERNEL_DS);
57029 /* The __user pointer cast is valid because of the set_fs() */
57030- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
57031+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
57032 set_fs(oldfs);
57033 /* truncating is ok because it's a user address */
57034 if (!ret)
57035@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
57036 goto out;
57037
57038 ret = -EINVAL;
57039- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
57040+ if (nr_segs > UIO_MAXIOV)
57041 goto out;
57042 if (nr_segs > fast_segs) {
57043 ret = -ENOMEM;
57044@@ -834,6 +834,7 @@ struct compat_old_linux_dirent {
57045 struct compat_readdir_callback {
57046 struct dir_context ctx;
57047 struct compat_old_linux_dirent __user *dirent;
57048+ struct file * file;
57049 int result;
57050 };
57051
57052@@ -851,6 +852,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
57053 buf->result = -EOVERFLOW;
57054 return -EOVERFLOW;
57055 }
57056+
57057+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
57058+ return 0;
57059+
57060 buf->result++;
57061 dirent = buf->dirent;
57062 if (!access_ok(VERIFY_WRITE, dirent,
57063@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
57064 if (!f.file)
57065 return -EBADF;
57066
57067+ buf.file = f.file;
57068 error = iterate_dir(f.file, &buf.ctx);
57069 if (buf.result)
57070 error = buf.result;
57071@@ -901,6 +907,7 @@ struct compat_getdents_callback {
57072 struct dir_context ctx;
57073 struct compat_linux_dirent __user *current_dir;
57074 struct compat_linux_dirent __user *previous;
57075+ struct file * file;
57076 int count;
57077 int error;
57078 };
57079@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
57080 buf->error = -EOVERFLOW;
57081 return -EOVERFLOW;
57082 }
57083+
57084+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
57085+ return 0;
57086+
57087 dirent = buf->previous;
57088 if (dirent) {
57089 if (__put_user(offset, &dirent->d_off))
57090@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
57091 if (!f.file)
57092 return -EBADF;
57093
57094+ buf.file = f.file;
57095 error = iterate_dir(f.file, &buf.ctx);
57096 if (error >= 0)
57097 error = buf.error;
57098@@ -987,6 +999,7 @@ struct compat_getdents_callback64 {
57099 struct dir_context ctx;
57100 struct linux_dirent64 __user *current_dir;
57101 struct linux_dirent64 __user *previous;
57102+ struct file * file;
57103 int count;
57104 int error;
57105 };
57106@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
57107 buf->error = -EINVAL; /* only used if we fail.. */
57108 if (reclen > buf->count)
57109 return -EINVAL;
57110+
57111+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
57112+ return 0;
57113+
57114 dirent = buf->previous;
57115
57116 if (dirent) {
57117@@ -1052,6 +1069,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
57118 if (!f.file)
57119 return -EBADF;
57120
57121+ buf.file = f.file;
57122 error = iterate_dir(f.file, &buf.ctx);
57123 if (error >= 0)
57124 error = buf.error;
57125diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
57126index a81147e..20bf2b5 100644
57127--- a/fs/compat_binfmt_elf.c
57128+++ b/fs/compat_binfmt_elf.c
57129@@ -30,11 +30,13 @@
57130 #undef elf_phdr
57131 #undef elf_shdr
57132 #undef elf_note
57133+#undef elf_dyn
57134 #undef elf_addr_t
57135 #define elfhdr elf32_hdr
57136 #define elf_phdr elf32_phdr
57137 #define elf_shdr elf32_shdr
57138 #define elf_note elf32_note
57139+#define elf_dyn Elf32_Dyn
57140 #define elf_addr_t Elf32_Addr
57141
57142 /*
57143diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
57144index dc52e13..ec61057 100644
57145--- a/fs/compat_ioctl.c
57146+++ b/fs/compat_ioctl.c
57147@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
57148 return -EFAULT;
57149 if (__get_user(udata, &ss32->iomem_base))
57150 return -EFAULT;
57151- ss.iomem_base = compat_ptr(udata);
57152+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
57153 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
57154 __get_user(ss.port_high, &ss32->port_high))
57155 return -EFAULT;
57156@@ -702,8 +702,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
57157 for (i = 0; i < nmsgs; i++) {
57158 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
57159 return -EFAULT;
57160- if (get_user(datap, &umsgs[i].buf) ||
57161- put_user(compat_ptr(datap), &tmsgs[i].buf))
57162+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
57163+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
57164 return -EFAULT;
57165 }
57166 return sys_ioctl(fd, cmd, (unsigned long)tdata);
57167@@ -796,7 +796,7 @@ static int compat_ioctl_preallocate(struct file *file,
57168 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
57169 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
57170 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
57171- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
57172+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
57173 return -EFAULT;
57174
57175 return ioctl_preallocate(file, p);
57176@@ -1616,8 +1616,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
57177 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
57178 {
57179 unsigned int a, b;
57180- a = *(unsigned int *)p;
57181- b = *(unsigned int *)q;
57182+ a = *(const unsigned int *)p;
57183+ b = *(const unsigned int *)q;
57184 if (a > b)
57185 return 1;
57186 if (a < b)
57187diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
57188index e081acb..911df21 100644
57189--- a/fs/configfs/dir.c
57190+++ b/fs/configfs/dir.c
57191@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
57192 }
57193 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
57194 struct configfs_dirent *next;
57195- const char *name;
57196+ const unsigned char * name;
57197+ char d_name[sizeof(next->s_dentry->d_iname)];
57198 int len;
57199 struct inode *inode = NULL;
57200
57201@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
57202 continue;
57203
57204 name = configfs_get_name(next);
57205- len = strlen(name);
57206+ if (next->s_dentry && name == next->s_dentry->d_iname) {
57207+ len = next->s_dentry->d_name.len;
57208+ memcpy(d_name, name, len);
57209+ name = d_name;
57210+ } else
57211+ len = strlen(name);
57212
57213 /*
57214 * We'll have a dentry and an inode for
57215diff --git a/fs/coredump.c b/fs/coredump.c
57216index bc3fbcd..6031650 100644
57217--- a/fs/coredump.c
57218+++ b/fs/coredump.c
57219@@ -438,8 +438,8 @@ static void wait_for_dump_helpers(struct file *file)
57220 struct pipe_inode_info *pipe = file->private_data;
57221
57222 pipe_lock(pipe);
57223- pipe->readers++;
57224- pipe->writers--;
57225+ atomic_inc(&pipe->readers);
57226+ atomic_dec(&pipe->writers);
57227 wake_up_interruptible_sync(&pipe->wait);
57228 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
57229 pipe_unlock(pipe);
57230@@ -448,11 +448,11 @@ static void wait_for_dump_helpers(struct file *file)
57231 * We actually want wait_event_freezable() but then we need
57232 * to clear TIF_SIGPENDING and improve dump_interrupted().
57233 */
57234- wait_event_interruptible(pipe->wait, pipe->readers == 1);
57235+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
57236
57237 pipe_lock(pipe);
57238- pipe->readers--;
57239- pipe->writers++;
57240+ atomic_dec(&pipe->readers);
57241+ atomic_inc(&pipe->writers);
57242 pipe_unlock(pipe);
57243 }
57244
57245@@ -499,7 +499,9 @@ void do_coredump(const siginfo_t *siginfo)
57246 struct files_struct *displaced;
57247 bool need_nonrelative = false;
57248 bool core_dumped = false;
57249- static atomic_t core_dump_count = ATOMIC_INIT(0);
57250+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
57251+ long signr = siginfo->si_signo;
57252+ int dumpable;
57253 struct coredump_params cprm = {
57254 .siginfo = siginfo,
57255 .regs = signal_pt_regs(),
57256@@ -512,12 +514,17 @@ void do_coredump(const siginfo_t *siginfo)
57257 .mm_flags = mm->flags,
57258 };
57259
57260- audit_core_dumps(siginfo->si_signo);
57261+ audit_core_dumps(signr);
57262+
57263+ dumpable = __get_dumpable(cprm.mm_flags);
57264+
57265+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
57266+ gr_handle_brute_attach(dumpable);
57267
57268 binfmt = mm->binfmt;
57269 if (!binfmt || !binfmt->core_dump)
57270 goto fail;
57271- if (!__get_dumpable(cprm.mm_flags))
57272+ if (!dumpable)
57273 goto fail;
57274
57275 cred = prepare_creds();
57276@@ -536,7 +543,7 @@ void do_coredump(const siginfo_t *siginfo)
57277 need_nonrelative = true;
57278 }
57279
57280- retval = coredump_wait(siginfo->si_signo, &core_state);
57281+ retval = coredump_wait(signr, &core_state);
57282 if (retval < 0)
57283 goto fail_creds;
57284
57285@@ -579,7 +586,7 @@ void do_coredump(const siginfo_t *siginfo)
57286 }
57287 cprm.limit = RLIM_INFINITY;
57288
57289- dump_count = atomic_inc_return(&core_dump_count);
57290+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
57291 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
57292 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
57293 task_tgid_vnr(current), current->comm);
57294@@ -611,6 +618,8 @@ void do_coredump(const siginfo_t *siginfo)
57295 } else {
57296 struct inode *inode;
57297
57298+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
57299+
57300 if (cprm.limit < binfmt->min_coredump)
57301 goto fail_unlock;
57302
57303@@ -669,7 +678,7 @@ close_fail:
57304 filp_close(cprm.file, NULL);
57305 fail_dropcount:
57306 if (ispipe)
57307- atomic_dec(&core_dump_count);
57308+ atomic_dec_unchecked(&core_dump_count);
57309 fail_unlock:
57310 kfree(cn.corename);
57311 coredump_finish(mm, core_dumped);
57312@@ -690,6 +699,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
57313 struct file *file = cprm->file;
57314 loff_t pos = file->f_pos;
57315 ssize_t n;
57316+
57317+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
57318 if (cprm->written + nr > cprm->limit)
57319 return 0;
57320 while (nr) {
57321diff --git a/fs/dcache.c b/fs/dcache.c
57322index fdbe230..ba17c1f 100644
57323--- a/fs/dcache.c
57324+++ b/fs/dcache.c
57325@@ -1495,7 +1495,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
57326 */
57327 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
57328 if (name->len > DNAME_INLINE_LEN-1) {
57329- dname = kmalloc(name->len + 1, GFP_KERNEL);
57330+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
57331 if (!dname) {
57332 kmem_cache_free(dentry_cache, dentry);
57333 return NULL;
57334@@ -3428,7 +3428,8 @@ void __init vfs_caches_init(unsigned long mempages)
57335 mempages -= reserve;
57336
57337 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
57338- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
57339+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
57340+ SLAB_NO_SANITIZE, NULL);
57341
57342 dcache_init();
57343 inode_init();
57344diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
57345index 9c0444c..628490c 100644
57346--- a/fs/debugfs/inode.c
57347+++ b/fs/debugfs/inode.c
57348@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
57349 */
57350 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
57351 {
57352+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
57353+ return __create_file(name, S_IFDIR | S_IRWXU,
57354+#else
57355 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
57356+#endif
57357 parent, NULL, NULL);
57358 }
57359 EXPORT_SYMBOL_GPL(debugfs_create_dir);
57360diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
57361index c36c448..fc96710 100644
57362--- a/fs/ecryptfs/inode.c
57363+++ b/fs/ecryptfs/inode.c
57364@@ -675,7 +675,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
57365 old_fs = get_fs();
57366 set_fs(get_ds());
57367 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
57368- (char __user *)lower_buf,
57369+ (char __force_user *)lower_buf,
57370 PATH_MAX);
57371 set_fs(old_fs);
57372 if (rc < 0)
57373diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
57374index e4141f2..d8263e8 100644
57375--- a/fs/ecryptfs/miscdev.c
57376+++ b/fs/ecryptfs/miscdev.c
57377@@ -304,7 +304,7 @@ check_list:
57378 goto out_unlock_msg_ctx;
57379 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
57380 if (msg_ctx->msg) {
57381- if (copy_to_user(&buf[i], packet_length, packet_length_size))
57382+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
57383 goto out_unlock_msg_ctx;
57384 i += packet_length_size;
57385 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
57386diff --git a/fs/exec.c b/fs/exec.c
57387index 7ea097f..0158d8a 100644
57388--- a/fs/exec.c
57389+++ b/fs/exec.c
57390@@ -55,8 +55,20 @@
57391 #include <linux/pipe_fs_i.h>
57392 #include <linux/oom.h>
57393 #include <linux/compat.h>
57394+#include <linux/random.h>
57395+#include <linux/seq_file.h>
57396+#include <linux/coredump.h>
57397+#include <linux/mman.h>
57398+
57399+#ifdef CONFIG_PAX_REFCOUNT
57400+#include <linux/kallsyms.h>
57401+#include <linux/kdebug.h>
57402+#endif
57403+
57404+#include <trace/events/fs.h>
57405
57406 #include <asm/uaccess.h>
57407+#include <asm/sections.h>
57408 #include <asm/mmu_context.h>
57409 #include <asm/tlb.h>
57410
57411@@ -66,19 +78,34 @@
57412
57413 #include <trace/events/sched.h>
57414
57415+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57416+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
57417+{
57418+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
57419+}
57420+#endif
57421+
57422+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
57423+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
57424+EXPORT_SYMBOL(pax_set_initial_flags_func);
57425+#endif
57426+
57427 int suid_dumpable = 0;
57428
57429 static LIST_HEAD(formats);
57430 static DEFINE_RWLOCK(binfmt_lock);
57431
57432+extern int gr_process_kernel_exec_ban(void);
57433+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
57434+
57435 void __register_binfmt(struct linux_binfmt * fmt, int insert)
57436 {
57437 BUG_ON(!fmt);
57438 if (WARN_ON(!fmt->load_binary))
57439 return;
57440 write_lock(&binfmt_lock);
57441- insert ? list_add(&fmt->lh, &formats) :
57442- list_add_tail(&fmt->lh, &formats);
57443+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
57444+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
57445 write_unlock(&binfmt_lock);
57446 }
57447
57448@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
57449 void unregister_binfmt(struct linux_binfmt * fmt)
57450 {
57451 write_lock(&binfmt_lock);
57452- list_del(&fmt->lh);
57453+ pax_list_del((struct list_head *)&fmt->lh);
57454 write_unlock(&binfmt_lock);
57455 }
57456
57457@@ -181,18 +208,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
57458 int write)
57459 {
57460 struct page *page;
57461- int ret;
57462
57463-#ifdef CONFIG_STACK_GROWSUP
57464- if (write) {
57465- ret = expand_downwards(bprm->vma, pos);
57466- if (ret < 0)
57467- return NULL;
57468- }
57469-#endif
57470- ret = get_user_pages(current, bprm->mm, pos,
57471- 1, write, 1, &page, NULL);
57472- if (ret <= 0)
57473+ if (0 > expand_downwards(bprm->vma, pos))
57474+ return NULL;
57475+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
57476 return NULL;
57477
57478 if (write) {
57479@@ -208,6 +227,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
57480 if (size <= ARG_MAX)
57481 return page;
57482
57483+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57484+ // only allow 512KB for argv+env on suid/sgid binaries
57485+ // to prevent easy ASLR exhaustion
57486+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
57487+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
57488+ (size > (512 * 1024))) {
57489+ put_page(page);
57490+ return NULL;
57491+ }
57492+#endif
57493+
57494 /*
57495 * Limit to 1/4-th the stack size for the argv+env strings.
57496 * This ensures that:
57497@@ -267,6 +297,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
57498 vma->vm_end = STACK_TOP_MAX;
57499 vma->vm_start = vma->vm_end - PAGE_SIZE;
57500 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
57501+
57502+#ifdef CONFIG_PAX_SEGMEXEC
57503+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
57504+#endif
57505+
57506 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
57507 INIT_LIST_HEAD(&vma->anon_vma_chain);
57508
57509@@ -277,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
57510 mm->stack_vm = mm->total_vm = 1;
57511 up_write(&mm->mmap_sem);
57512 bprm->p = vma->vm_end - sizeof(void *);
57513+
57514+#ifdef CONFIG_PAX_RANDUSTACK
57515+ if (randomize_va_space)
57516+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
57517+#endif
57518+
57519 return 0;
57520 err:
57521 up_write(&mm->mmap_sem);
57522@@ -397,7 +438,7 @@ struct user_arg_ptr {
57523 } ptr;
57524 };
57525
57526-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
57527+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
57528 {
57529 const char __user *native;
57530
57531@@ -406,14 +447,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
57532 compat_uptr_t compat;
57533
57534 if (get_user(compat, argv.ptr.compat + nr))
57535- return ERR_PTR(-EFAULT);
57536+ return (const char __force_user *)ERR_PTR(-EFAULT);
57537
57538 return compat_ptr(compat);
57539 }
57540 #endif
57541
57542 if (get_user(native, argv.ptr.native + nr))
57543- return ERR_PTR(-EFAULT);
57544+ return (const char __force_user *)ERR_PTR(-EFAULT);
57545
57546 return native;
57547 }
57548@@ -432,7 +473,7 @@ static int count(struct user_arg_ptr argv, int max)
57549 if (!p)
57550 break;
57551
57552- if (IS_ERR(p))
57553+ if (IS_ERR((const char __force_kernel *)p))
57554 return -EFAULT;
57555
57556 if (i >= max)
57557@@ -467,7 +508,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
57558
57559 ret = -EFAULT;
57560 str = get_user_arg_ptr(argv, argc);
57561- if (IS_ERR(str))
57562+ if (IS_ERR((const char __force_kernel *)str))
57563 goto out;
57564
57565 len = strnlen_user(str, MAX_ARG_STRLEN);
57566@@ -549,7 +590,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
57567 int r;
57568 mm_segment_t oldfs = get_fs();
57569 struct user_arg_ptr argv = {
57570- .ptr.native = (const char __user *const __user *)__argv,
57571+ .ptr.native = (const char __user * const __force_user *)__argv,
57572 };
57573
57574 set_fs(KERNEL_DS);
57575@@ -584,7 +625,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
57576 unsigned long new_end = old_end - shift;
57577 struct mmu_gather tlb;
57578
57579- BUG_ON(new_start > new_end);
57580+ if (new_start >= new_end || new_start < mmap_min_addr)
57581+ return -ENOMEM;
57582
57583 /*
57584 * ensure there are no vmas between where we want to go
57585@@ -593,6 +635,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
57586 if (vma != find_vma(mm, new_start))
57587 return -EFAULT;
57588
57589+#ifdef CONFIG_PAX_SEGMEXEC
57590+ BUG_ON(pax_find_mirror_vma(vma));
57591+#endif
57592+
57593 /*
57594 * cover the whole range: [new_start, old_end)
57595 */
57596@@ -673,10 +719,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
57597 stack_top = arch_align_stack(stack_top);
57598 stack_top = PAGE_ALIGN(stack_top);
57599
57600- if (unlikely(stack_top < mmap_min_addr) ||
57601- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
57602- return -ENOMEM;
57603-
57604 stack_shift = vma->vm_end - stack_top;
57605
57606 bprm->p -= stack_shift;
57607@@ -688,8 +730,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
57608 bprm->exec -= stack_shift;
57609
57610 down_write(&mm->mmap_sem);
57611+
57612+ /* Move stack pages down in memory. */
57613+ if (stack_shift) {
57614+ ret = shift_arg_pages(vma, stack_shift);
57615+ if (ret)
57616+ goto out_unlock;
57617+ }
57618+
57619 vm_flags = VM_STACK_FLAGS;
57620
57621+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
57622+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
57623+ vm_flags &= ~VM_EXEC;
57624+
57625+#ifdef CONFIG_PAX_MPROTECT
57626+ if (mm->pax_flags & MF_PAX_MPROTECT)
57627+ vm_flags &= ~VM_MAYEXEC;
57628+#endif
57629+
57630+ }
57631+#endif
57632+
57633 /*
57634 * Adjust stack execute permissions; explicitly enable for
57635 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
57636@@ -708,13 +770,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
57637 goto out_unlock;
57638 BUG_ON(prev != vma);
57639
57640- /* Move stack pages down in memory. */
57641- if (stack_shift) {
57642- ret = shift_arg_pages(vma, stack_shift);
57643- if (ret)
57644- goto out_unlock;
57645- }
57646-
57647 /* mprotect_fixup is overkill to remove the temporary stack flags */
57648 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
57649
57650@@ -738,6 +793,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
57651 #endif
57652 current->mm->start_stack = bprm->p;
57653 ret = expand_stack(vma, stack_base);
57654+
57655+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
57656+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
57657+ unsigned long size;
57658+ vm_flags_t vm_flags;
57659+
57660+ size = STACK_TOP - vma->vm_end;
57661+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
57662+
57663+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
57664+
57665+#ifdef CONFIG_X86
57666+ if (!ret) {
57667+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
57668+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
57669+ }
57670+#endif
57671+
57672+ }
57673+#endif
57674+
57675 if (ret)
57676 ret = -EFAULT;
57677
57678@@ -774,6 +850,8 @@ struct file *open_exec(const char *name)
57679
57680 fsnotify_open(file);
57681
57682+ trace_open_exec(name);
57683+
57684 err = deny_write_access(file);
57685 if (err)
57686 goto exit;
57687@@ -797,7 +875,7 @@ int kernel_read(struct file *file, loff_t offset,
57688 old_fs = get_fs();
57689 set_fs(get_ds());
57690 /* The cast to a user pointer is valid due to the set_fs() */
57691- result = vfs_read(file, (void __user *)addr, count, &pos);
57692+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
57693 set_fs(old_fs);
57694 return result;
57695 }
57696@@ -1253,7 +1331,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
57697 }
57698 rcu_read_unlock();
57699
57700- if (p->fs->users > n_fs) {
57701+ if (atomic_read(&p->fs->users) > n_fs) {
57702 bprm->unsafe |= LSM_UNSAFE_SHARE;
57703 } else {
57704 res = -EAGAIN;
57705@@ -1443,6 +1521,31 @@ static int exec_binprm(struct linux_binprm *bprm)
57706 return ret;
57707 }
57708
57709+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57710+static DEFINE_PER_CPU(u64, exec_counter);
57711+static int __init init_exec_counters(void)
57712+{
57713+ unsigned int cpu;
57714+
57715+ for_each_possible_cpu(cpu) {
57716+ per_cpu(exec_counter, cpu) = (u64)cpu;
57717+ }
57718+
57719+ return 0;
57720+}
57721+early_initcall(init_exec_counters);
57722+static inline void increment_exec_counter(void)
57723+{
57724+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
57725+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
57726+}
57727+#else
57728+static inline void increment_exec_counter(void) {}
57729+#endif
57730+
57731+extern void gr_handle_exec_args(struct linux_binprm *bprm,
57732+ struct user_arg_ptr argv);
57733+
57734 /*
57735 * sys_execve() executes a new program.
57736 */
57737@@ -1450,12 +1553,19 @@ static int do_execve_common(const char *filename,
57738 struct user_arg_ptr argv,
57739 struct user_arg_ptr envp)
57740 {
57741+#ifdef CONFIG_GRKERNSEC
57742+ struct file *old_exec_file;
57743+ struct acl_subject_label *old_acl;
57744+ struct rlimit old_rlim[RLIM_NLIMITS];
57745+#endif
57746 struct linux_binprm *bprm;
57747 struct file *file;
57748 struct files_struct *displaced;
57749 bool clear_in_exec;
57750 int retval;
57751
57752+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
57753+
57754 /*
57755 * We move the actual failure in case of RLIMIT_NPROC excess from
57756 * set*uid() to execve() because too many poorly written programs
57757@@ -1496,12 +1606,22 @@ static int do_execve_common(const char *filename,
57758 if (IS_ERR(file))
57759 goto out_unmark;
57760
57761+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
57762+ retval = -EPERM;
57763+ goto out_file;
57764+ }
57765+
57766 sched_exec();
57767
57768 bprm->file = file;
57769 bprm->filename = filename;
57770 bprm->interp = filename;
57771
57772+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
57773+ retval = -EACCES;
57774+ goto out_file;
57775+ }
57776+
57777 retval = bprm_mm_init(bprm);
57778 if (retval)
57779 goto out_file;
57780@@ -1518,24 +1638,70 @@ static int do_execve_common(const char *filename,
57781 if (retval < 0)
57782 goto out;
57783
57784+#ifdef CONFIG_GRKERNSEC
57785+ old_acl = current->acl;
57786+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
57787+ old_exec_file = current->exec_file;
57788+ get_file(file);
57789+ current->exec_file = file;
57790+#endif
57791+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57792+ /* limit suid stack to 8MB
57793+ * we saved the old limits above and will restore them if this exec fails
57794+ */
57795+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
57796+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
57797+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
57798+#endif
57799+
57800+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
57801+ retval = -EPERM;
57802+ goto out_fail;
57803+ }
57804+
57805+ if (!gr_tpe_allow(file)) {
57806+ retval = -EACCES;
57807+ goto out_fail;
57808+ }
57809+
57810+ if (gr_check_crash_exec(file)) {
57811+ retval = -EACCES;
57812+ goto out_fail;
57813+ }
57814+
57815+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
57816+ bprm->unsafe);
57817+ if (retval < 0)
57818+ goto out_fail;
57819+
57820 retval = copy_strings_kernel(1, &bprm->filename, bprm);
57821 if (retval < 0)
57822- goto out;
57823+ goto out_fail;
57824
57825 bprm->exec = bprm->p;
57826 retval = copy_strings(bprm->envc, envp, bprm);
57827 if (retval < 0)
57828- goto out;
57829+ goto out_fail;
57830
57831 retval = copy_strings(bprm->argc, argv, bprm);
57832 if (retval < 0)
57833- goto out;
57834+ goto out_fail;
57835+
57836+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
57837+
57838+ gr_handle_exec_args(bprm, argv);
57839
57840 retval = exec_binprm(bprm);
57841 if (retval < 0)
57842- goto out;
57843+ goto out_fail;
57844+#ifdef CONFIG_GRKERNSEC
57845+ if (old_exec_file)
57846+ fput(old_exec_file);
57847+#endif
57848
57849 /* execve succeeded */
57850+
57851+ increment_exec_counter();
57852 current->fs->in_exec = 0;
57853 current->in_execve = 0;
57854 acct_update_integrals(current);
57855@@ -1545,6 +1711,14 @@ static int do_execve_common(const char *filename,
57856 put_files_struct(displaced);
57857 return retval;
57858
57859+out_fail:
57860+#ifdef CONFIG_GRKERNSEC
57861+ current->acl = old_acl;
57862+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
57863+ fput(current->exec_file);
57864+ current->exec_file = old_exec_file;
57865+#endif
57866+
57867 out:
57868 if (bprm->mm) {
57869 acct_arg_size(bprm, 0);
57870@@ -1699,3 +1873,295 @@ asmlinkage long compat_sys_execve(const char __user * filename,
57871 return error;
57872 }
57873 #endif
57874+
57875+int pax_check_flags(unsigned long *flags)
57876+{
57877+ int retval = 0;
57878+
57879+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
57880+ if (*flags & MF_PAX_SEGMEXEC)
57881+ {
57882+ *flags &= ~MF_PAX_SEGMEXEC;
57883+ retval = -EINVAL;
57884+ }
57885+#endif
57886+
57887+ if ((*flags & MF_PAX_PAGEEXEC)
57888+
57889+#ifdef CONFIG_PAX_PAGEEXEC
57890+ && (*flags & MF_PAX_SEGMEXEC)
57891+#endif
57892+
57893+ )
57894+ {
57895+ *flags &= ~MF_PAX_PAGEEXEC;
57896+ retval = -EINVAL;
57897+ }
57898+
57899+ if ((*flags & MF_PAX_MPROTECT)
57900+
57901+#ifdef CONFIG_PAX_MPROTECT
57902+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
57903+#endif
57904+
57905+ )
57906+ {
57907+ *flags &= ~MF_PAX_MPROTECT;
57908+ retval = -EINVAL;
57909+ }
57910+
57911+ if ((*flags & MF_PAX_EMUTRAMP)
57912+
57913+#ifdef CONFIG_PAX_EMUTRAMP
57914+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
57915+#endif
57916+
57917+ )
57918+ {
57919+ *flags &= ~MF_PAX_EMUTRAMP;
57920+ retval = -EINVAL;
57921+ }
57922+
57923+ return retval;
57924+}
57925+
57926+EXPORT_SYMBOL(pax_check_flags);
57927+
57928+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
57929+char *pax_get_path(const struct path *path, char *buf, int buflen)
57930+{
57931+ char *pathname = d_path(path, buf, buflen);
57932+
57933+ if (IS_ERR(pathname))
57934+ goto toolong;
57935+
57936+ pathname = mangle_path(buf, pathname, "\t\n\\");
57937+ if (!pathname)
57938+ goto toolong;
57939+
57940+ *pathname = 0;
57941+ return buf;
57942+
57943+toolong:
57944+ return "<path too long>";
57945+}
57946+EXPORT_SYMBOL(pax_get_path);
57947+
57948+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
57949+{
57950+ struct task_struct *tsk = current;
57951+ struct mm_struct *mm = current->mm;
57952+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
57953+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
57954+ char *path_exec = NULL;
57955+ char *path_fault = NULL;
57956+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
57957+ siginfo_t info = { };
57958+
57959+ if (buffer_exec && buffer_fault) {
57960+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
57961+
57962+ down_read(&mm->mmap_sem);
57963+ vma = mm->mmap;
57964+ while (vma && (!vma_exec || !vma_fault)) {
57965+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
57966+ vma_exec = vma;
57967+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
57968+ vma_fault = vma;
57969+ vma = vma->vm_next;
57970+ }
57971+ if (vma_exec)
57972+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
57973+ if (vma_fault) {
57974+ start = vma_fault->vm_start;
57975+ end = vma_fault->vm_end;
57976+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
57977+ if (vma_fault->vm_file)
57978+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
57979+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
57980+ path_fault = "<heap>";
57981+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
57982+ path_fault = "<stack>";
57983+ else
57984+ path_fault = "<anonymous mapping>";
57985+ }
57986+ up_read(&mm->mmap_sem);
57987+ }
57988+ if (tsk->signal->curr_ip)
57989+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
57990+ else
57991+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
57992+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
57993+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
57994+ free_page((unsigned long)buffer_exec);
57995+ free_page((unsigned long)buffer_fault);
57996+ pax_report_insns(regs, pc, sp);
57997+ info.si_signo = SIGKILL;
57998+ info.si_errno = 0;
57999+ info.si_code = SI_KERNEL;
58000+ info.si_pid = 0;
58001+ info.si_uid = 0;
58002+ do_coredump(&info);
58003+}
58004+#endif
58005+
58006+#ifdef CONFIG_PAX_REFCOUNT
58007+void pax_report_refcount_overflow(struct pt_regs *regs)
58008+{
58009+ if (current->signal->curr_ip)
58010+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
58011+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
58012+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
58013+ else
58014+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
58015+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
58016+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
58017+ preempt_disable();
58018+ show_regs(regs);
58019+ preempt_enable();
58020+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
58021+}
58022+#endif
58023+
58024+#ifdef CONFIG_PAX_USERCOPY
58025+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
58026+static noinline int check_stack_object(const void *obj, unsigned long len)
58027+{
58028+ const void * const stack = task_stack_page(current);
58029+ const void * const stackend = stack + THREAD_SIZE;
58030+
58031+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
58032+ const void *frame = NULL;
58033+ const void *oldframe;
58034+#endif
58035+
58036+ if (obj + len < obj)
58037+ return -1;
58038+
58039+ if (obj + len <= stack || stackend <= obj)
58040+ return 0;
58041+
58042+ if (obj < stack || stackend < obj + len)
58043+ return -1;
58044+
58045+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
58046+ oldframe = __builtin_frame_address(1);
58047+ if (oldframe)
58048+ frame = __builtin_frame_address(2);
58049+ /*
58050+ low ----------------------------------------------> high
58051+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
58052+ ^----------------^
58053+ allow copies only within here
58054+ */
58055+ while (stack <= frame && frame < stackend) {
58056+ /* if obj + len extends past the last frame, this
58057+ check won't pass and the next frame will be 0,
58058+ causing us to bail out and correctly report
58059+ the copy as invalid
58060+ */
58061+ if (obj + len <= frame)
58062+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
58063+ oldframe = frame;
58064+ frame = *(const void * const *)frame;
58065+ }
58066+ return -1;
58067+#else
58068+ return 1;
58069+#endif
58070+}
58071+
58072+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
58073+{
58074+ if (current->signal->curr_ip)
58075+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
58076+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
58077+ else
58078+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
58079+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
58080+ dump_stack();
58081+ gr_handle_kernel_exploit();
58082+ do_group_exit(SIGKILL);
58083+}
58084+#endif
58085+
58086+#ifdef CONFIG_PAX_USERCOPY
58087+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
58088+{
58089+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
58090+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
58091+#ifdef CONFIG_MODULES
58092+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
58093+#else
58094+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
58095+#endif
58096+
58097+#else
58098+ unsigned long textlow = (unsigned long)_stext;
58099+ unsigned long texthigh = (unsigned long)_etext;
58100+
58101+#ifdef CONFIG_X86_64
58102+ /* check against linear mapping as well */
58103+ if (high > (unsigned long)__va(__pa(textlow)) &&
58104+ low <= (unsigned long)__va(__pa(texthigh)))
58105+ return true;
58106+#endif
58107+
58108+#endif
58109+
58110+ if (high <= textlow || low > texthigh)
58111+ return false;
58112+ else
58113+ return true;
58114+}
58115+#endif
58116+
58117+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
58118+{
58119+
58120+#ifdef CONFIG_PAX_USERCOPY
58121+ const char *type;
58122+
58123+ if (!n)
58124+ return;
58125+
58126+ type = check_heap_object(ptr, n);
58127+ if (!type) {
58128+ int ret = check_stack_object(ptr, n);
58129+ if (ret == 1 || ret == 2)
58130+ return;
58131+ if (ret == 0) {
58132+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
58133+ type = "<kernel text>";
58134+ else
58135+ return;
58136+ } else
58137+ type = "<process stack>";
58138+ }
58139+
58140+ pax_report_usercopy(ptr, n, to_user, type);
58141+#endif
58142+
58143+}
58144+EXPORT_SYMBOL(__check_object_size);
58145+
58146+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
58147+void pax_track_stack(void)
58148+{
58149+ unsigned long sp = (unsigned long)&sp;
58150+ if (sp < current_thread_info()->lowest_stack &&
58151+ sp > (unsigned long)task_stack_page(current))
58152+ current_thread_info()->lowest_stack = sp;
58153+}
58154+EXPORT_SYMBOL(pax_track_stack);
58155+#endif
58156+
58157+#ifdef CONFIG_PAX_SIZE_OVERFLOW
58158+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
58159+{
58160+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
58161+ dump_stack();
58162+ do_group_exit(SIGKILL);
58163+}
58164+EXPORT_SYMBOL(report_size_overflow);
58165+#endif
58166diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
58167index 9f9992b..8b59411 100644
58168--- a/fs/ext2/balloc.c
58169+++ b/fs/ext2/balloc.c
58170@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
58171
58172 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
58173 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
58174- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
58175+ if (free_blocks < root_blocks + 1 &&
58176 !uid_eq(sbi->s_resuid, current_fsuid()) &&
58177 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
58178- !in_group_p (sbi->s_resgid))) {
58179+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
58180 return 0;
58181 }
58182 return 1;
58183diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
58184index 2d7557d..14e38f94 100644
58185--- a/fs/ext2/xattr.c
58186+++ b/fs/ext2/xattr.c
58187@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
58188 struct buffer_head *bh = NULL;
58189 struct ext2_xattr_entry *entry;
58190 char *end;
58191- size_t rest = buffer_size;
58192+ size_t rest = buffer_size, total_size = 0;
58193 int error;
58194
58195 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
58196@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
58197 buffer += size;
58198 }
58199 rest -= size;
58200+ total_size += size;
58201 }
58202 }
58203- error = buffer_size - rest; /* total size */
58204+ error = total_size;
58205
58206 cleanup:
58207 brelse(bh);
58208diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
58209index 22548f5..41521d8 100644
58210--- a/fs/ext3/balloc.c
58211+++ b/fs/ext3/balloc.c
58212@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
58213
58214 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
58215 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
58216- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
58217+ if (free_blocks < root_blocks + 1 &&
58218 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
58219 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
58220- !in_group_p (sbi->s_resgid))) {
58221+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
58222 return 0;
58223 }
58224 return 1;
58225diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
58226index b1fc963..881228c 100644
58227--- a/fs/ext3/xattr.c
58228+++ b/fs/ext3/xattr.c
58229@@ -330,7 +330,7 @@ static int
58230 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
58231 char *buffer, size_t buffer_size)
58232 {
58233- size_t rest = buffer_size;
58234+ size_t rest = buffer_size, total_size = 0;
58235
58236 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
58237 const struct xattr_handler *handler =
58238@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
58239 buffer += size;
58240 }
58241 rest -= size;
58242+ total_size += size;
58243 }
58244 }
58245- return buffer_size - rest;
58246+ return total_size;
58247 }
58248
58249 static int
58250diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
58251index 6ea7b14..8fa16d9 100644
58252--- a/fs/ext4/balloc.c
58253+++ b/fs/ext4/balloc.c
58254@@ -534,8 +534,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
58255 /* Hm, nope. Are (enough) root reserved clusters available? */
58256 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
58257 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
58258- capable(CAP_SYS_RESOURCE) ||
58259- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
58260+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
58261+ capable_nolog(CAP_SYS_RESOURCE)) {
58262
58263 if (free_clusters >= (nclusters + dirty_clusters +
58264 resv_clusters))
58265diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
58266index ece5556..e39d3a8 100644
58267--- a/fs/ext4/ext4.h
58268+++ b/fs/ext4/ext4.h
58269@@ -1267,19 +1267,19 @@ struct ext4_sb_info {
58270 unsigned long s_mb_last_start;
58271
58272 /* stats for buddy allocator */
58273- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
58274- atomic_t s_bal_success; /* we found long enough chunks */
58275- atomic_t s_bal_allocated; /* in blocks */
58276- atomic_t s_bal_ex_scanned; /* total extents scanned */
58277- atomic_t s_bal_goals; /* goal hits */
58278- atomic_t s_bal_breaks; /* too long searches */
58279- atomic_t s_bal_2orders; /* 2^order hits */
58280+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
58281+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
58282+ atomic_unchecked_t s_bal_allocated; /* in blocks */
58283+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
58284+ atomic_unchecked_t s_bal_goals; /* goal hits */
58285+ atomic_unchecked_t s_bal_breaks; /* too long searches */
58286+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
58287 spinlock_t s_bal_lock;
58288 unsigned long s_mb_buddies_generated;
58289 unsigned long long s_mb_generation_time;
58290- atomic_t s_mb_lost_chunks;
58291- atomic_t s_mb_preallocated;
58292- atomic_t s_mb_discarded;
58293+ atomic_unchecked_t s_mb_lost_chunks;
58294+ atomic_unchecked_t s_mb_preallocated;
58295+ atomic_unchecked_t s_mb_discarded;
58296 atomic_t s_lock_busy;
58297
58298 /* locality groups */
58299diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
58300index 04a5c75..09894fa 100644
58301--- a/fs/ext4/mballoc.c
58302+++ b/fs/ext4/mballoc.c
58303@@ -1880,7 +1880,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
58304 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
58305
58306 if (EXT4_SB(sb)->s_mb_stats)
58307- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
58308+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
58309
58310 break;
58311 }
58312@@ -2189,7 +2189,7 @@ repeat:
58313 ac->ac_status = AC_STATUS_CONTINUE;
58314 ac->ac_flags |= EXT4_MB_HINT_FIRST;
58315 cr = 3;
58316- atomic_inc(&sbi->s_mb_lost_chunks);
58317+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
58318 goto repeat;
58319 }
58320 }
58321@@ -2697,25 +2697,25 @@ int ext4_mb_release(struct super_block *sb)
58322 if (sbi->s_mb_stats) {
58323 ext4_msg(sb, KERN_INFO,
58324 "mballoc: %u blocks %u reqs (%u success)",
58325- atomic_read(&sbi->s_bal_allocated),
58326- atomic_read(&sbi->s_bal_reqs),
58327- atomic_read(&sbi->s_bal_success));
58328+ atomic_read_unchecked(&sbi->s_bal_allocated),
58329+ atomic_read_unchecked(&sbi->s_bal_reqs),
58330+ atomic_read_unchecked(&sbi->s_bal_success));
58331 ext4_msg(sb, KERN_INFO,
58332 "mballoc: %u extents scanned, %u goal hits, "
58333 "%u 2^N hits, %u breaks, %u lost",
58334- atomic_read(&sbi->s_bal_ex_scanned),
58335- atomic_read(&sbi->s_bal_goals),
58336- atomic_read(&sbi->s_bal_2orders),
58337- atomic_read(&sbi->s_bal_breaks),
58338- atomic_read(&sbi->s_mb_lost_chunks));
58339+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
58340+ atomic_read_unchecked(&sbi->s_bal_goals),
58341+ atomic_read_unchecked(&sbi->s_bal_2orders),
58342+ atomic_read_unchecked(&sbi->s_bal_breaks),
58343+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
58344 ext4_msg(sb, KERN_INFO,
58345 "mballoc: %lu generated and it took %Lu",
58346 sbi->s_mb_buddies_generated,
58347 sbi->s_mb_generation_time);
58348 ext4_msg(sb, KERN_INFO,
58349 "mballoc: %u preallocated, %u discarded",
58350- atomic_read(&sbi->s_mb_preallocated),
58351- atomic_read(&sbi->s_mb_discarded));
58352+ atomic_read_unchecked(&sbi->s_mb_preallocated),
58353+ atomic_read_unchecked(&sbi->s_mb_discarded));
58354 }
58355
58356 free_percpu(sbi->s_locality_groups);
58357@@ -3169,16 +3169,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
58358 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
58359
58360 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
58361- atomic_inc(&sbi->s_bal_reqs);
58362- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
58363+ atomic_inc_unchecked(&sbi->s_bal_reqs);
58364+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
58365 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
58366- atomic_inc(&sbi->s_bal_success);
58367- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
58368+ atomic_inc_unchecked(&sbi->s_bal_success);
58369+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
58370 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
58371 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
58372- atomic_inc(&sbi->s_bal_goals);
58373+ atomic_inc_unchecked(&sbi->s_bal_goals);
58374 if (ac->ac_found > sbi->s_mb_max_to_scan)
58375- atomic_inc(&sbi->s_bal_breaks);
58376+ atomic_inc_unchecked(&sbi->s_bal_breaks);
58377 }
58378
58379 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
58380@@ -3583,7 +3583,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
58381 trace_ext4_mb_new_inode_pa(ac, pa);
58382
58383 ext4_mb_use_inode_pa(ac, pa);
58384- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
58385+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
58386
58387 ei = EXT4_I(ac->ac_inode);
58388 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
58389@@ -3643,7 +3643,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
58390 trace_ext4_mb_new_group_pa(ac, pa);
58391
58392 ext4_mb_use_group_pa(ac, pa);
58393- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
58394+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
58395
58396 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
58397 lg = ac->ac_lg;
58398@@ -3732,7 +3732,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
58399 * from the bitmap and continue.
58400 */
58401 }
58402- atomic_add(free, &sbi->s_mb_discarded);
58403+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
58404
58405 return err;
58406 }
58407@@ -3750,7 +3750,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
58408 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
58409 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
58410 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
58411- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
58412+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
58413 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
58414
58415 return 0;
58416diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
58417index 04434ad..6404663 100644
58418--- a/fs/ext4/mmp.c
58419+++ b/fs/ext4/mmp.c
58420@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
58421 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
58422 const char *function, unsigned int line, const char *msg)
58423 {
58424- __ext4_warning(sb, function, line, msg);
58425+ __ext4_warning(sb, function, line, "%s", msg);
58426 __ext4_warning(sb, function, line,
58427 "MMP failure info: last update time: %llu, last update "
58428 "node: %s, last update device: %s\n",
58429diff --git a/fs/ext4/super.c b/fs/ext4/super.c
58430index 1f7784d..5d8bbad 100644
58431--- a/fs/ext4/super.c
58432+++ b/fs/ext4/super.c
58433@@ -1270,7 +1270,7 @@ static ext4_fsblk_t get_sb_block(void **data)
58434 }
58435
58436 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
58437-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
58438+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
58439 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
58440
58441 #ifdef CONFIG_QUOTA
58442@@ -2450,7 +2450,7 @@ struct ext4_attr {
58443 int offset;
58444 int deprecated_val;
58445 } u;
58446-};
58447+} __do_const;
58448
58449 static int parse_strtoull(const char *buf,
58450 unsigned long long max, unsigned long long *value)
58451diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
58452index 1423c48..9c0c6dc 100644
58453--- a/fs/ext4/xattr.c
58454+++ b/fs/ext4/xattr.c
58455@@ -381,7 +381,7 @@ static int
58456 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
58457 char *buffer, size_t buffer_size)
58458 {
58459- size_t rest = buffer_size;
58460+ size_t rest = buffer_size, total_size = 0;
58461
58462 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
58463 const struct xattr_handler *handler =
58464@@ -398,9 +398,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
58465 buffer += size;
58466 }
58467 rest -= size;
58468+ total_size += size;
58469 }
58470 }
58471- return buffer_size - rest;
58472+ return total_size;
58473 }
58474
58475 static int
58476diff --git a/fs/fcntl.c b/fs/fcntl.c
58477index ef68665..5deacdc 100644
58478--- a/fs/fcntl.c
58479+++ b/fs/fcntl.c
58480@@ -106,6 +106,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
58481 if (err)
58482 return err;
58483
58484+ if (gr_handle_chroot_fowner(pid, type))
58485+ return -ENOENT;
58486+ if (gr_check_protected_task_fowner(pid, type))
58487+ return -EACCES;
58488+
58489 f_modown(filp, pid, type, force);
58490 return 0;
58491 }
58492diff --git a/fs/fhandle.c b/fs/fhandle.c
58493index 999ff5c..41f4109 100644
58494--- a/fs/fhandle.c
58495+++ b/fs/fhandle.c
58496@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
58497 } else
58498 retval = 0;
58499 /* copy the mount id */
58500- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
58501- sizeof(*mnt_id)) ||
58502+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
58503 copy_to_user(ufh, handle,
58504 sizeof(struct file_handle) + handle_bytes))
58505 retval = -EFAULT;
58506diff --git a/fs/file.c b/fs/file.c
58507index 4a78f98..f9a6d25 100644
58508--- a/fs/file.c
58509+++ b/fs/file.c
58510@@ -16,6 +16,7 @@
58511 #include <linux/slab.h>
58512 #include <linux/vmalloc.h>
58513 #include <linux/file.h>
58514+#include <linux/security.h>
58515 #include <linux/fdtable.h>
58516 #include <linux/bitops.h>
58517 #include <linux/interrupt.h>
58518@@ -141,7 +142,7 @@ out:
58519 * Return <0 error code on error; 1 on successful completion.
58520 * The files->file_lock should be held on entry, and will be held on exit.
58521 */
58522-static int expand_fdtable(struct files_struct *files, int nr)
58523+static int expand_fdtable(struct files_struct *files, unsigned int nr)
58524 __releases(files->file_lock)
58525 __acquires(files->file_lock)
58526 {
58527@@ -186,7 +187,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
58528 * expanded and execution may have blocked.
58529 * The files->file_lock should be held on entry, and will be held on exit.
58530 */
58531-static int expand_files(struct files_struct *files, int nr)
58532+static int expand_files(struct files_struct *files, unsigned int nr)
58533 {
58534 struct fdtable *fdt;
58535
58536@@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
58537 if (!file)
58538 return __close_fd(files, fd);
58539
58540+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
58541 if (fd >= rlimit(RLIMIT_NOFILE))
58542 return -EBADF;
58543
58544@@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
58545 if (unlikely(oldfd == newfd))
58546 return -EINVAL;
58547
58548+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
58549 if (newfd >= rlimit(RLIMIT_NOFILE))
58550 return -EBADF;
58551
58552@@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
58553 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
58554 {
58555 int err;
58556+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
58557 if (from >= rlimit(RLIMIT_NOFILE))
58558 return -EINVAL;
58559 err = alloc_fd(from, flags);
58560diff --git a/fs/filesystems.c b/fs/filesystems.c
58561index 92567d9..fcd8cbf 100644
58562--- a/fs/filesystems.c
58563+++ b/fs/filesystems.c
58564@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
58565 int len = dot ? dot - name : strlen(name);
58566
58567 fs = __get_fs_type(name, len);
58568+#ifdef CONFIG_GRKERNSEC_MODHARDEN
58569+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
58570+#else
58571 if (!fs && (request_module("fs-%.*s", len, name) == 0))
58572+#endif
58573 fs = __get_fs_type(name, len);
58574
58575 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
58576diff --git a/fs/fs_struct.c b/fs/fs_struct.c
58577index 7dca743..543d620 100644
58578--- a/fs/fs_struct.c
58579+++ b/fs/fs_struct.c
58580@@ -4,6 +4,7 @@
58581 #include <linux/path.h>
58582 #include <linux/slab.h>
58583 #include <linux/fs_struct.h>
58584+#include <linux/grsecurity.h>
58585 #include "internal.h"
58586
58587 /*
58588@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
58589 write_seqcount_begin(&fs->seq);
58590 old_root = fs->root;
58591 fs->root = *path;
58592+ gr_set_chroot_entries(current, path);
58593 write_seqcount_end(&fs->seq);
58594 spin_unlock(&fs->lock);
58595 if (old_root.dentry)
58596@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
58597 int hits = 0;
58598 spin_lock(&fs->lock);
58599 write_seqcount_begin(&fs->seq);
58600+ /* this root replacement is only done by pivot_root,
58601+ leave grsec's chroot tagging alone for this task
58602+ so that a pivoted root isn't treated as a chroot
58603+ */
58604 hits += replace_path(&fs->root, old_root, new_root);
58605 hits += replace_path(&fs->pwd, old_root, new_root);
58606 write_seqcount_end(&fs->seq);
58607@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
58608 task_lock(tsk);
58609 spin_lock(&fs->lock);
58610 tsk->fs = NULL;
58611- kill = !--fs->users;
58612+ gr_clear_chroot_entries(tsk);
58613+ kill = !atomic_dec_return(&fs->users);
58614 spin_unlock(&fs->lock);
58615 task_unlock(tsk);
58616 if (kill)
58617@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
58618 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
58619 /* We don't need to lock fs - think why ;-) */
58620 if (fs) {
58621- fs->users = 1;
58622+ atomic_set(&fs->users, 1);
58623 fs->in_exec = 0;
58624 spin_lock_init(&fs->lock);
58625 seqcount_init(&fs->seq);
58626@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
58627 spin_lock(&old->lock);
58628 fs->root = old->root;
58629 path_get(&fs->root);
58630+ /* instead of calling gr_set_chroot_entries here,
58631+ we call it from every caller of this function
58632+ */
58633 fs->pwd = old->pwd;
58634 path_get(&fs->pwd);
58635 spin_unlock(&old->lock);
58636@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
58637
58638 task_lock(current);
58639 spin_lock(&fs->lock);
58640- kill = !--fs->users;
58641+ kill = !atomic_dec_return(&fs->users);
58642 current->fs = new_fs;
58643+ gr_set_chroot_entries(current, &new_fs->root);
58644 spin_unlock(&fs->lock);
58645 task_unlock(current);
58646
58647@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
58648
58649 int current_umask(void)
58650 {
58651- return current->fs->umask;
58652+ return current->fs->umask | gr_acl_umask();
58653 }
58654 EXPORT_SYMBOL(current_umask);
58655
58656 /* to be mentioned only in INIT_TASK */
58657 struct fs_struct init_fs = {
58658- .users = 1,
58659+ .users = ATOMIC_INIT(1),
58660 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
58661 .seq = SEQCNT_ZERO(init_fs.seq),
58662 .umask = 0022,
58663diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
58664index 29d7feb..303644d 100644
58665--- a/fs/fscache/cookie.c
58666+++ b/fs/fscache/cookie.c
58667@@ -19,7 +19,7 @@
58668
58669 struct kmem_cache *fscache_cookie_jar;
58670
58671-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
58672+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
58673
58674 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
58675 static int fscache_alloc_object(struct fscache_cache *cache,
58676@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
58677 parent ? (char *) parent->def->name : "<no-parent>",
58678 def->name, netfs_data, enable);
58679
58680- fscache_stat(&fscache_n_acquires);
58681+ fscache_stat_unchecked(&fscache_n_acquires);
58682
58683 /* if there's no parent cookie, then we don't create one here either */
58684 if (!parent) {
58685- fscache_stat(&fscache_n_acquires_null);
58686+ fscache_stat_unchecked(&fscache_n_acquires_null);
58687 _leave(" [no parent]");
58688 return NULL;
58689 }
58690@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
58691 /* allocate and initialise a cookie */
58692 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
58693 if (!cookie) {
58694- fscache_stat(&fscache_n_acquires_oom);
58695+ fscache_stat_unchecked(&fscache_n_acquires_oom);
58696 _leave(" [ENOMEM]");
58697 return NULL;
58698 }
58699@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
58700
58701 switch (cookie->def->type) {
58702 case FSCACHE_COOKIE_TYPE_INDEX:
58703- fscache_stat(&fscache_n_cookie_index);
58704+ fscache_stat_unchecked(&fscache_n_cookie_index);
58705 break;
58706 case FSCACHE_COOKIE_TYPE_DATAFILE:
58707- fscache_stat(&fscache_n_cookie_data);
58708+ fscache_stat_unchecked(&fscache_n_cookie_data);
58709 break;
58710 default:
58711- fscache_stat(&fscache_n_cookie_special);
58712+ fscache_stat_unchecked(&fscache_n_cookie_special);
58713 break;
58714 }
58715
58716@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
58717 } else {
58718 atomic_dec(&parent->n_children);
58719 __fscache_cookie_put(cookie);
58720- fscache_stat(&fscache_n_acquires_nobufs);
58721+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
58722 _leave(" = NULL");
58723 return NULL;
58724 }
58725@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
58726 }
58727 }
58728
58729- fscache_stat(&fscache_n_acquires_ok);
58730+ fscache_stat_unchecked(&fscache_n_acquires_ok);
58731 _leave(" = %p", cookie);
58732 return cookie;
58733 }
58734@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
58735 cache = fscache_select_cache_for_object(cookie->parent);
58736 if (!cache) {
58737 up_read(&fscache_addremove_sem);
58738- fscache_stat(&fscache_n_acquires_no_cache);
58739+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
58740 _leave(" = -ENOMEDIUM [no cache]");
58741 return -ENOMEDIUM;
58742 }
58743@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
58744 object = cache->ops->alloc_object(cache, cookie);
58745 fscache_stat_d(&fscache_n_cop_alloc_object);
58746 if (IS_ERR(object)) {
58747- fscache_stat(&fscache_n_object_no_alloc);
58748+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
58749 ret = PTR_ERR(object);
58750 goto error;
58751 }
58752
58753- fscache_stat(&fscache_n_object_alloc);
58754+ fscache_stat_unchecked(&fscache_n_object_alloc);
58755
58756- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
58757+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
58758
58759 _debug("ALLOC OBJ%x: %s {%lx}",
58760 object->debug_id, cookie->def->name, object->events);
58761@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
58762
58763 _enter("{%s}", cookie->def->name);
58764
58765- fscache_stat(&fscache_n_invalidates);
58766+ fscache_stat_unchecked(&fscache_n_invalidates);
58767
58768 /* Only permit invalidation of data files. Invalidating an index will
58769 * require the caller to release all its attachments to the tree rooted
58770@@ -477,10 +477,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
58771 {
58772 struct fscache_object *object;
58773
58774- fscache_stat(&fscache_n_updates);
58775+ fscache_stat_unchecked(&fscache_n_updates);
58776
58777 if (!cookie) {
58778- fscache_stat(&fscache_n_updates_null);
58779+ fscache_stat_unchecked(&fscache_n_updates_null);
58780 _leave(" [no cookie]");
58781 return;
58782 }
58783@@ -581,12 +581,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
58784 */
58785 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
58786 {
58787- fscache_stat(&fscache_n_relinquishes);
58788+ fscache_stat_unchecked(&fscache_n_relinquishes);
58789 if (retire)
58790- fscache_stat(&fscache_n_relinquishes_retire);
58791+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
58792
58793 if (!cookie) {
58794- fscache_stat(&fscache_n_relinquishes_null);
58795+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
58796 _leave(" [no cookie]");
58797 return;
58798 }
58799@@ -687,7 +687,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
58800 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
58801 goto inconsistent;
58802
58803- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58804+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58805
58806 __fscache_use_cookie(cookie);
58807 if (fscache_submit_op(object, op) < 0)
58808diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
58809index 4226f66..0fb3f45 100644
58810--- a/fs/fscache/internal.h
58811+++ b/fs/fscache/internal.h
58812@@ -133,8 +133,8 @@ extern void fscache_operation_gc(struct work_struct *);
58813 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
58814 extern int fscache_wait_for_operation_activation(struct fscache_object *,
58815 struct fscache_operation *,
58816- atomic_t *,
58817- atomic_t *,
58818+ atomic_unchecked_t *,
58819+ atomic_unchecked_t *,
58820 void (*)(struct fscache_operation *));
58821 extern void fscache_invalidate_writes(struct fscache_cookie *);
58822
58823@@ -153,101 +153,101 @@ extern void fscache_proc_cleanup(void);
58824 * stats.c
58825 */
58826 #ifdef CONFIG_FSCACHE_STATS
58827-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
58828-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
58829+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
58830+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
58831
58832-extern atomic_t fscache_n_op_pend;
58833-extern atomic_t fscache_n_op_run;
58834-extern atomic_t fscache_n_op_enqueue;
58835-extern atomic_t fscache_n_op_deferred_release;
58836-extern atomic_t fscache_n_op_release;
58837-extern atomic_t fscache_n_op_gc;
58838-extern atomic_t fscache_n_op_cancelled;
58839-extern atomic_t fscache_n_op_rejected;
58840+extern atomic_unchecked_t fscache_n_op_pend;
58841+extern atomic_unchecked_t fscache_n_op_run;
58842+extern atomic_unchecked_t fscache_n_op_enqueue;
58843+extern atomic_unchecked_t fscache_n_op_deferred_release;
58844+extern atomic_unchecked_t fscache_n_op_release;
58845+extern atomic_unchecked_t fscache_n_op_gc;
58846+extern atomic_unchecked_t fscache_n_op_cancelled;
58847+extern atomic_unchecked_t fscache_n_op_rejected;
58848
58849-extern atomic_t fscache_n_attr_changed;
58850-extern atomic_t fscache_n_attr_changed_ok;
58851-extern atomic_t fscache_n_attr_changed_nobufs;
58852-extern atomic_t fscache_n_attr_changed_nomem;
58853-extern atomic_t fscache_n_attr_changed_calls;
58854+extern atomic_unchecked_t fscache_n_attr_changed;
58855+extern atomic_unchecked_t fscache_n_attr_changed_ok;
58856+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
58857+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
58858+extern atomic_unchecked_t fscache_n_attr_changed_calls;
58859
58860-extern atomic_t fscache_n_allocs;
58861-extern atomic_t fscache_n_allocs_ok;
58862-extern atomic_t fscache_n_allocs_wait;
58863-extern atomic_t fscache_n_allocs_nobufs;
58864-extern atomic_t fscache_n_allocs_intr;
58865-extern atomic_t fscache_n_allocs_object_dead;
58866-extern atomic_t fscache_n_alloc_ops;
58867-extern atomic_t fscache_n_alloc_op_waits;
58868+extern atomic_unchecked_t fscache_n_allocs;
58869+extern atomic_unchecked_t fscache_n_allocs_ok;
58870+extern atomic_unchecked_t fscache_n_allocs_wait;
58871+extern atomic_unchecked_t fscache_n_allocs_nobufs;
58872+extern atomic_unchecked_t fscache_n_allocs_intr;
58873+extern atomic_unchecked_t fscache_n_allocs_object_dead;
58874+extern atomic_unchecked_t fscache_n_alloc_ops;
58875+extern atomic_unchecked_t fscache_n_alloc_op_waits;
58876
58877-extern atomic_t fscache_n_retrievals;
58878-extern atomic_t fscache_n_retrievals_ok;
58879-extern atomic_t fscache_n_retrievals_wait;
58880-extern atomic_t fscache_n_retrievals_nodata;
58881-extern atomic_t fscache_n_retrievals_nobufs;
58882-extern atomic_t fscache_n_retrievals_intr;
58883-extern atomic_t fscache_n_retrievals_nomem;
58884-extern atomic_t fscache_n_retrievals_object_dead;
58885-extern atomic_t fscache_n_retrieval_ops;
58886-extern atomic_t fscache_n_retrieval_op_waits;
58887+extern atomic_unchecked_t fscache_n_retrievals;
58888+extern atomic_unchecked_t fscache_n_retrievals_ok;
58889+extern atomic_unchecked_t fscache_n_retrievals_wait;
58890+extern atomic_unchecked_t fscache_n_retrievals_nodata;
58891+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
58892+extern atomic_unchecked_t fscache_n_retrievals_intr;
58893+extern atomic_unchecked_t fscache_n_retrievals_nomem;
58894+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
58895+extern atomic_unchecked_t fscache_n_retrieval_ops;
58896+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
58897
58898-extern atomic_t fscache_n_stores;
58899-extern atomic_t fscache_n_stores_ok;
58900-extern atomic_t fscache_n_stores_again;
58901-extern atomic_t fscache_n_stores_nobufs;
58902-extern atomic_t fscache_n_stores_oom;
58903-extern atomic_t fscache_n_store_ops;
58904-extern atomic_t fscache_n_store_calls;
58905-extern atomic_t fscache_n_store_pages;
58906-extern atomic_t fscache_n_store_radix_deletes;
58907-extern atomic_t fscache_n_store_pages_over_limit;
58908+extern atomic_unchecked_t fscache_n_stores;
58909+extern atomic_unchecked_t fscache_n_stores_ok;
58910+extern atomic_unchecked_t fscache_n_stores_again;
58911+extern atomic_unchecked_t fscache_n_stores_nobufs;
58912+extern atomic_unchecked_t fscache_n_stores_oom;
58913+extern atomic_unchecked_t fscache_n_store_ops;
58914+extern atomic_unchecked_t fscache_n_store_calls;
58915+extern atomic_unchecked_t fscache_n_store_pages;
58916+extern atomic_unchecked_t fscache_n_store_radix_deletes;
58917+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
58918
58919-extern atomic_t fscache_n_store_vmscan_not_storing;
58920-extern atomic_t fscache_n_store_vmscan_gone;
58921-extern atomic_t fscache_n_store_vmscan_busy;
58922-extern atomic_t fscache_n_store_vmscan_cancelled;
58923-extern atomic_t fscache_n_store_vmscan_wait;
58924+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
58925+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
58926+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
58927+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
58928+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
58929
58930-extern atomic_t fscache_n_marks;
58931-extern atomic_t fscache_n_uncaches;
58932+extern atomic_unchecked_t fscache_n_marks;
58933+extern atomic_unchecked_t fscache_n_uncaches;
58934
58935-extern atomic_t fscache_n_acquires;
58936-extern atomic_t fscache_n_acquires_null;
58937-extern atomic_t fscache_n_acquires_no_cache;
58938-extern atomic_t fscache_n_acquires_ok;
58939-extern atomic_t fscache_n_acquires_nobufs;
58940-extern atomic_t fscache_n_acquires_oom;
58941+extern atomic_unchecked_t fscache_n_acquires;
58942+extern atomic_unchecked_t fscache_n_acquires_null;
58943+extern atomic_unchecked_t fscache_n_acquires_no_cache;
58944+extern atomic_unchecked_t fscache_n_acquires_ok;
58945+extern atomic_unchecked_t fscache_n_acquires_nobufs;
58946+extern atomic_unchecked_t fscache_n_acquires_oom;
58947
58948-extern atomic_t fscache_n_invalidates;
58949-extern atomic_t fscache_n_invalidates_run;
58950+extern atomic_unchecked_t fscache_n_invalidates;
58951+extern atomic_unchecked_t fscache_n_invalidates_run;
58952
58953-extern atomic_t fscache_n_updates;
58954-extern atomic_t fscache_n_updates_null;
58955-extern atomic_t fscache_n_updates_run;
58956+extern atomic_unchecked_t fscache_n_updates;
58957+extern atomic_unchecked_t fscache_n_updates_null;
58958+extern atomic_unchecked_t fscache_n_updates_run;
58959
58960-extern atomic_t fscache_n_relinquishes;
58961-extern atomic_t fscache_n_relinquishes_null;
58962-extern atomic_t fscache_n_relinquishes_waitcrt;
58963-extern atomic_t fscache_n_relinquishes_retire;
58964+extern atomic_unchecked_t fscache_n_relinquishes;
58965+extern atomic_unchecked_t fscache_n_relinquishes_null;
58966+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
58967+extern atomic_unchecked_t fscache_n_relinquishes_retire;
58968
58969-extern atomic_t fscache_n_cookie_index;
58970-extern atomic_t fscache_n_cookie_data;
58971-extern atomic_t fscache_n_cookie_special;
58972+extern atomic_unchecked_t fscache_n_cookie_index;
58973+extern atomic_unchecked_t fscache_n_cookie_data;
58974+extern atomic_unchecked_t fscache_n_cookie_special;
58975
58976-extern atomic_t fscache_n_object_alloc;
58977-extern atomic_t fscache_n_object_no_alloc;
58978-extern atomic_t fscache_n_object_lookups;
58979-extern atomic_t fscache_n_object_lookups_negative;
58980-extern atomic_t fscache_n_object_lookups_positive;
58981-extern atomic_t fscache_n_object_lookups_timed_out;
58982-extern atomic_t fscache_n_object_created;
58983-extern atomic_t fscache_n_object_avail;
58984-extern atomic_t fscache_n_object_dead;
58985+extern atomic_unchecked_t fscache_n_object_alloc;
58986+extern atomic_unchecked_t fscache_n_object_no_alloc;
58987+extern atomic_unchecked_t fscache_n_object_lookups;
58988+extern atomic_unchecked_t fscache_n_object_lookups_negative;
58989+extern atomic_unchecked_t fscache_n_object_lookups_positive;
58990+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
58991+extern atomic_unchecked_t fscache_n_object_created;
58992+extern atomic_unchecked_t fscache_n_object_avail;
58993+extern atomic_unchecked_t fscache_n_object_dead;
58994
58995-extern atomic_t fscache_n_checkaux_none;
58996-extern atomic_t fscache_n_checkaux_okay;
58997-extern atomic_t fscache_n_checkaux_update;
58998-extern atomic_t fscache_n_checkaux_obsolete;
58999+extern atomic_unchecked_t fscache_n_checkaux_none;
59000+extern atomic_unchecked_t fscache_n_checkaux_okay;
59001+extern atomic_unchecked_t fscache_n_checkaux_update;
59002+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
59003
59004 extern atomic_t fscache_n_cop_alloc_object;
59005 extern atomic_t fscache_n_cop_lookup_object;
59006@@ -272,6 +272,11 @@ static inline void fscache_stat(atomic_t *stat)
59007 atomic_inc(stat);
59008 }
59009
59010+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
59011+{
59012+ atomic_inc_unchecked(stat);
59013+}
59014+
59015 static inline void fscache_stat_d(atomic_t *stat)
59016 {
59017 atomic_dec(stat);
59018@@ -284,6 +289,7 @@ extern const struct file_operations fscache_stats_fops;
59019
59020 #define __fscache_stat(stat) (NULL)
59021 #define fscache_stat(stat) do {} while (0)
59022+#define fscache_stat_unchecked(stat) do {} while (0)
59023 #define fscache_stat_d(stat) do {} while (0)
59024 #endif
59025
59026diff --git a/fs/fscache/object.c b/fs/fscache/object.c
59027index 53d35c5..5d68ed4 100644
59028--- a/fs/fscache/object.c
59029+++ b/fs/fscache/object.c
59030@@ -451,7 +451,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
59031 _debug("LOOKUP \"%s\" in \"%s\"",
59032 cookie->def->name, object->cache->tag->name);
59033
59034- fscache_stat(&fscache_n_object_lookups);
59035+ fscache_stat_unchecked(&fscache_n_object_lookups);
59036 fscache_stat(&fscache_n_cop_lookup_object);
59037 ret = object->cache->ops->lookup_object(object);
59038 fscache_stat_d(&fscache_n_cop_lookup_object);
59039@@ -461,7 +461,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
59040 if (ret == -ETIMEDOUT) {
59041 /* probably stuck behind another object, so move this one to
59042 * the back of the queue */
59043- fscache_stat(&fscache_n_object_lookups_timed_out);
59044+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
59045 _leave(" [timeout]");
59046 return NO_TRANSIT;
59047 }
59048@@ -489,7 +489,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
59049 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
59050
59051 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
59052- fscache_stat(&fscache_n_object_lookups_negative);
59053+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
59054
59055 /* Allow write requests to begin stacking up and read requests to begin
59056 * returning ENODATA.
59057@@ -524,7 +524,7 @@ void fscache_obtained_object(struct fscache_object *object)
59058 /* if we were still looking up, then we must have a positive lookup
59059 * result, in which case there may be data available */
59060 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
59061- fscache_stat(&fscache_n_object_lookups_positive);
59062+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
59063
59064 /* We do (presumably) have data */
59065 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
59066@@ -536,7 +536,7 @@ void fscache_obtained_object(struct fscache_object *object)
59067 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
59068 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
59069 } else {
59070- fscache_stat(&fscache_n_object_created);
59071+ fscache_stat_unchecked(&fscache_n_object_created);
59072 }
59073
59074 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
59075@@ -572,7 +572,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
59076 fscache_stat_d(&fscache_n_cop_lookup_complete);
59077
59078 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
59079- fscache_stat(&fscache_n_object_avail);
59080+ fscache_stat_unchecked(&fscache_n_object_avail);
59081
59082 _leave("");
59083 return transit_to(JUMPSTART_DEPS);
59084@@ -719,7 +719,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
59085
59086 /* this just shifts the object release to the work processor */
59087 fscache_put_object(object);
59088- fscache_stat(&fscache_n_object_dead);
59089+ fscache_stat_unchecked(&fscache_n_object_dead);
59090
59091 _leave("");
59092 return transit_to(OBJECT_DEAD);
59093@@ -884,7 +884,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
59094 enum fscache_checkaux result;
59095
59096 if (!object->cookie->def->check_aux) {
59097- fscache_stat(&fscache_n_checkaux_none);
59098+ fscache_stat_unchecked(&fscache_n_checkaux_none);
59099 return FSCACHE_CHECKAUX_OKAY;
59100 }
59101
59102@@ -893,17 +893,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
59103 switch (result) {
59104 /* entry okay as is */
59105 case FSCACHE_CHECKAUX_OKAY:
59106- fscache_stat(&fscache_n_checkaux_okay);
59107+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
59108 break;
59109
59110 /* entry requires update */
59111 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
59112- fscache_stat(&fscache_n_checkaux_update);
59113+ fscache_stat_unchecked(&fscache_n_checkaux_update);
59114 break;
59115
59116 /* entry requires deletion */
59117 case FSCACHE_CHECKAUX_OBSOLETE:
59118- fscache_stat(&fscache_n_checkaux_obsolete);
59119+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
59120 break;
59121
59122 default:
59123@@ -989,7 +989,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
59124 {
59125 const struct fscache_state *s;
59126
59127- fscache_stat(&fscache_n_invalidates_run);
59128+ fscache_stat_unchecked(&fscache_n_invalidates_run);
59129 fscache_stat(&fscache_n_cop_invalidate_object);
59130 s = _fscache_invalidate_object(object, event);
59131 fscache_stat_d(&fscache_n_cop_invalidate_object);
59132@@ -1004,7 +1004,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
59133 {
59134 _enter("{OBJ%x},%d", object->debug_id, event);
59135
59136- fscache_stat(&fscache_n_updates_run);
59137+ fscache_stat_unchecked(&fscache_n_updates_run);
59138 fscache_stat(&fscache_n_cop_update_object);
59139 object->cache->ops->update_object(object);
59140 fscache_stat_d(&fscache_n_cop_update_object);
59141diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
59142index 318071a..379938b 100644
59143--- a/fs/fscache/operation.c
59144+++ b/fs/fscache/operation.c
59145@@ -17,7 +17,7 @@
59146 #include <linux/slab.h>
59147 #include "internal.h"
59148
59149-atomic_t fscache_op_debug_id;
59150+atomic_unchecked_t fscache_op_debug_id;
59151 EXPORT_SYMBOL(fscache_op_debug_id);
59152
59153 /**
59154@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
59155 ASSERTCMP(atomic_read(&op->usage), >, 0);
59156 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
59157
59158- fscache_stat(&fscache_n_op_enqueue);
59159+ fscache_stat_unchecked(&fscache_n_op_enqueue);
59160 switch (op->flags & FSCACHE_OP_TYPE) {
59161 case FSCACHE_OP_ASYNC:
59162 _debug("queue async");
59163@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
59164 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
59165 if (op->processor)
59166 fscache_enqueue_operation(op);
59167- fscache_stat(&fscache_n_op_run);
59168+ fscache_stat_unchecked(&fscache_n_op_run);
59169 }
59170
59171 /*
59172@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
59173 if (object->n_in_progress > 0) {
59174 atomic_inc(&op->usage);
59175 list_add_tail(&op->pend_link, &object->pending_ops);
59176- fscache_stat(&fscache_n_op_pend);
59177+ fscache_stat_unchecked(&fscache_n_op_pend);
59178 } else if (!list_empty(&object->pending_ops)) {
59179 atomic_inc(&op->usage);
59180 list_add_tail(&op->pend_link, &object->pending_ops);
59181- fscache_stat(&fscache_n_op_pend);
59182+ fscache_stat_unchecked(&fscache_n_op_pend);
59183 fscache_start_operations(object);
59184 } else {
59185 ASSERTCMP(object->n_in_progress, ==, 0);
59186@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
59187 object->n_exclusive++; /* reads and writes must wait */
59188 atomic_inc(&op->usage);
59189 list_add_tail(&op->pend_link, &object->pending_ops);
59190- fscache_stat(&fscache_n_op_pend);
59191+ fscache_stat_unchecked(&fscache_n_op_pend);
59192 ret = 0;
59193 } else {
59194 /* If we're in any other state, there must have been an I/O
59195@@ -212,11 +212,11 @@ int fscache_submit_op(struct fscache_object *object,
59196 if (object->n_exclusive > 0) {
59197 atomic_inc(&op->usage);
59198 list_add_tail(&op->pend_link, &object->pending_ops);
59199- fscache_stat(&fscache_n_op_pend);
59200+ fscache_stat_unchecked(&fscache_n_op_pend);
59201 } else if (!list_empty(&object->pending_ops)) {
59202 atomic_inc(&op->usage);
59203 list_add_tail(&op->pend_link, &object->pending_ops);
59204- fscache_stat(&fscache_n_op_pend);
59205+ fscache_stat_unchecked(&fscache_n_op_pend);
59206 fscache_start_operations(object);
59207 } else {
59208 ASSERTCMP(object->n_exclusive, ==, 0);
59209@@ -228,10 +228,10 @@ int fscache_submit_op(struct fscache_object *object,
59210 object->n_ops++;
59211 atomic_inc(&op->usage);
59212 list_add_tail(&op->pend_link, &object->pending_ops);
59213- fscache_stat(&fscache_n_op_pend);
59214+ fscache_stat_unchecked(&fscache_n_op_pend);
59215 ret = 0;
59216 } else if (fscache_object_is_dying(object)) {
59217- fscache_stat(&fscache_n_op_rejected);
59218+ fscache_stat_unchecked(&fscache_n_op_rejected);
59219 op->state = FSCACHE_OP_ST_CANCELLED;
59220 ret = -ENOBUFS;
59221 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
59222@@ -310,7 +310,7 @@ int fscache_cancel_op(struct fscache_operation *op,
59223 ret = -EBUSY;
59224 if (op->state == FSCACHE_OP_ST_PENDING) {
59225 ASSERT(!list_empty(&op->pend_link));
59226- fscache_stat(&fscache_n_op_cancelled);
59227+ fscache_stat_unchecked(&fscache_n_op_cancelled);
59228 list_del_init(&op->pend_link);
59229 if (do_cancel)
59230 do_cancel(op);
59231@@ -342,7 +342,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
59232 while (!list_empty(&object->pending_ops)) {
59233 op = list_entry(object->pending_ops.next,
59234 struct fscache_operation, pend_link);
59235- fscache_stat(&fscache_n_op_cancelled);
59236+ fscache_stat_unchecked(&fscache_n_op_cancelled);
59237 list_del_init(&op->pend_link);
59238
59239 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
59240@@ -414,7 +414,7 @@ void fscache_put_operation(struct fscache_operation *op)
59241 op->state, ==, FSCACHE_OP_ST_CANCELLED);
59242 op->state = FSCACHE_OP_ST_DEAD;
59243
59244- fscache_stat(&fscache_n_op_release);
59245+ fscache_stat_unchecked(&fscache_n_op_release);
59246
59247 if (op->release) {
59248 op->release(op);
59249@@ -433,7 +433,7 @@ void fscache_put_operation(struct fscache_operation *op)
59250 * lock, and defer it otherwise */
59251 if (!spin_trylock(&object->lock)) {
59252 _debug("defer put");
59253- fscache_stat(&fscache_n_op_deferred_release);
59254+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
59255
59256 cache = object->cache;
59257 spin_lock(&cache->op_gc_list_lock);
59258@@ -486,7 +486,7 @@ void fscache_operation_gc(struct work_struct *work)
59259
59260 _debug("GC DEFERRED REL OBJ%x OP%x",
59261 object->debug_id, op->debug_id);
59262- fscache_stat(&fscache_n_op_gc);
59263+ fscache_stat_unchecked(&fscache_n_op_gc);
59264
59265 ASSERTCMP(atomic_read(&op->usage), ==, 0);
59266 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
59267diff --git a/fs/fscache/page.c b/fs/fscache/page.c
59268index 7f5c658..6c1e164 100644
59269--- a/fs/fscache/page.c
59270+++ b/fs/fscache/page.c
59271@@ -61,7 +61,7 @@ try_again:
59272 val = radix_tree_lookup(&cookie->stores, page->index);
59273 if (!val) {
59274 rcu_read_unlock();
59275- fscache_stat(&fscache_n_store_vmscan_not_storing);
59276+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
59277 __fscache_uncache_page(cookie, page);
59278 return true;
59279 }
59280@@ -91,11 +91,11 @@ try_again:
59281 spin_unlock(&cookie->stores_lock);
59282
59283 if (xpage) {
59284- fscache_stat(&fscache_n_store_vmscan_cancelled);
59285- fscache_stat(&fscache_n_store_radix_deletes);
59286+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
59287+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
59288 ASSERTCMP(xpage, ==, page);
59289 } else {
59290- fscache_stat(&fscache_n_store_vmscan_gone);
59291+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
59292 }
59293
59294 wake_up_bit(&cookie->flags, 0);
59295@@ -110,11 +110,11 @@ page_busy:
59296 * sleeping on memory allocation, so we may need to impose a timeout
59297 * too. */
59298 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
59299- fscache_stat(&fscache_n_store_vmscan_busy);
59300+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
59301 return false;
59302 }
59303
59304- fscache_stat(&fscache_n_store_vmscan_wait);
59305+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
59306 __fscache_wait_on_page_write(cookie, page);
59307 gfp &= ~__GFP_WAIT;
59308 goto try_again;
59309@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
59310 FSCACHE_COOKIE_STORING_TAG);
59311 if (!radix_tree_tag_get(&cookie->stores, page->index,
59312 FSCACHE_COOKIE_PENDING_TAG)) {
59313- fscache_stat(&fscache_n_store_radix_deletes);
59314+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
59315 xpage = radix_tree_delete(&cookie->stores, page->index);
59316 }
59317 spin_unlock(&cookie->stores_lock);
59318@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
59319
59320 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
59321
59322- fscache_stat(&fscache_n_attr_changed_calls);
59323+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
59324
59325 if (fscache_object_is_active(object)) {
59326 fscache_stat(&fscache_n_cop_attr_changed);
59327@@ -188,11 +188,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
59328
59329 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
59330
59331- fscache_stat(&fscache_n_attr_changed);
59332+ fscache_stat_unchecked(&fscache_n_attr_changed);
59333
59334 op = kzalloc(sizeof(*op), GFP_KERNEL);
59335 if (!op) {
59336- fscache_stat(&fscache_n_attr_changed_nomem);
59337+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
59338 _leave(" = -ENOMEM");
59339 return -ENOMEM;
59340 }
59341@@ -214,7 +214,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
59342 if (fscache_submit_exclusive_op(object, op) < 0)
59343 goto nobufs;
59344 spin_unlock(&cookie->lock);
59345- fscache_stat(&fscache_n_attr_changed_ok);
59346+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
59347 fscache_put_operation(op);
59348 _leave(" = 0");
59349 return 0;
59350@@ -225,7 +225,7 @@ nobufs:
59351 kfree(op);
59352 if (wake_cookie)
59353 __fscache_wake_unused_cookie(cookie);
59354- fscache_stat(&fscache_n_attr_changed_nobufs);
59355+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
59356 _leave(" = %d", -ENOBUFS);
59357 return -ENOBUFS;
59358 }
59359@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
59360 /* allocate a retrieval operation and attempt to submit it */
59361 op = kzalloc(sizeof(*op), GFP_NOIO);
59362 if (!op) {
59363- fscache_stat(&fscache_n_retrievals_nomem);
59364+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
59365 return NULL;
59366 }
59367
59368@@ -294,13 +294,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
59369 return 0;
59370 }
59371
59372- fscache_stat(&fscache_n_retrievals_wait);
59373+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
59374
59375 jif = jiffies;
59376 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
59377 fscache_wait_bit_interruptible,
59378 TASK_INTERRUPTIBLE) != 0) {
59379- fscache_stat(&fscache_n_retrievals_intr);
59380+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
59381 _leave(" = -ERESTARTSYS");
59382 return -ERESTARTSYS;
59383 }
59384@@ -329,8 +329,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
59385 */
59386 int fscache_wait_for_operation_activation(struct fscache_object *object,
59387 struct fscache_operation *op,
59388- atomic_t *stat_op_waits,
59389- atomic_t *stat_object_dead,
59390+ atomic_unchecked_t *stat_op_waits,
59391+ atomic_unchecked_t *stat_object_dead,
59392 void (*do_cancel)(struct fscache_operation *))
59393 {
59394 int ret;
59395@@ -340,7 +340,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
59396
59397 _debug(">>> WT");
59398 if (stat_op_waits)
59399- fscache_stat(stat_op_waits);
59400+ fscache_stat_unchecked(stat_op_waits);
59401 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
59402 fscache_wait_bit_interruptible,
59403 TASK_INTERRUPTIBLE) != 0) {
59404@@ -358,7 +358,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
59405 check_if_dead:
59406 if (op->state == FSCACHE_OP_ST_CANCELLED) {
59407 if (stat_object_dead)
59408- fscache_stat(stat_object_dead);
59409+ fscache_stat_unchecked(stat_object_dead);
59410 _leave(" = -ENOBUFS [cancelled]");
59411 return -ENOBUFS;
59412 }
59413@@ -366,7 +366,7 @@ check_if_dead:
59414 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
59415 fscache_cancel_op(op, do_cancel);
59416 if (stat_object_dead)
59417- fscache_stat(stat_object_dead);
59418+ fscache_stat_unchecked(stat_object_dead);
59419 return -ENOBUFS;
59420 }
59421 return 0;
59422@@ -394,7 +394,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
59423
59424 _enter("%p,%p,,,", cookie, page);
59425
59426- fscache_stat(&fscache_n_retrievals);
59427+ fscache_stat_unchecked(&fscache_n_retrievals);
59428
59429 if (hlist_empty(&cookie->backing_objects))
59430 goto nobufs;
59431@@ -436,7 +436,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
59432 goto nobufs_unlock_dec;
59433 spin_unlock(&cookie->lock);
59434
59435- fscache_stat(&fscache_n_retrieval_ops);
59436+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
59437
59438 /* pin the netfs read context in case we need to do the actual netfs
59439 * read because we've encountered a cache read failure */
59440@@ -467,15 +467,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
59441
59442 error:
59443 if (ret == -ENOMEM)
59444- fscache_stat(&fscache_n_retrievals_nomem);
59445+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
59446 else if (ret == -ERESTARTSYS)
59447- fscache_stat(&fscache_n_retrievals_intr);
59448+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
59449 else if (ret == -ENODATA)
59450- fscache_stat(&fscache_n_retrievals_nodata);
59451+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
59452 else if (ret < 0)
59453- fscache_stat(&fscache_n_retrievals_nobufs);
59454+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
59455 else
59456- fscache_stat(&fscache_n_retrievals_ok);
59457+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
59458
59459 fscache_put_retrieval(op);
59460 _leave(" = %d", ret);
59461@@ -490,7 +490,7 @@ nobufs_unlock:
59462 __fscache_wake_unused_cookie(cookie);
59463 kfree(op);
59464 nobufs:
59465- fscache_stat(&fscache_n_retrievals_nobufs);
59466+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
59467 _leave(" = -ENOBUFS");
59468 return -ENOBUFS;
59469 }
59470@@ -529,7 +529,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
59471
59472 _enter("%p,,%d,,,", cookie, *nr_pages);
59473
59474- fscache_stat(&fscache_n_retrievals);
59475+ fscache_stat_unchecked(&fscache_n_retrievals);
59476
59477 if (hlist_empty(&cookie->backing_objects))
59478 goto nobufs;
59479@@ -567,7 +567,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
59480 goto nobufs_unlock_dec;
59481 spin_unlock(&cookie->lock);
59482
59483- fscache_stat(&fscache_n_retrieval_ops);
59484+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
59485
59486 /* pin the netfs read context in case we need to do the actual netfs
59487 * read because we've encountered a cache read failure */
59488@@ -598,15 +598,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
59489
59490 error:
59491 if (ret == -ENOMEM)
59492- fscache_stat(&fscache_n_retrievals_nomem);
59493+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
59494 else if (ret == -ERESTARTSYS)
59495- fscache_stat(&fscache_n_retrievals_intr);
59496+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
59497 else if (ret == -ENODATA)
59498- fscache_stat(&fscache_n_retrievals_nodata);
59499+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
59500 else if (ret < 0)
59501- fscache_stat(&fscache_n_retrievals_nobufs);
59502+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
59503 else
59504- fscache_stat(&fscache_n_retrievals_ok);
59505+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
59506
59507 fscache_put_retrieval(op);
59508 _leave(" = %d", ret);
59509@@ -621,7 +621,7 @@ nobufs_unlock:
59510 if (wake_cookie)
59511 __fscache_wake_unused_cookie(cookie);
59512 nobufs:
59513- fscache_stat(&fscache_n_retrievals_nobufs);
59514+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
59515 _leave(" = -ENOBUFS");
59516 return -ENOBUFS;
59517 }
59518@@ -646,7 +646,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
59519
59520 _enter("%p,%p,,,", cookie, page);
59521
59522- fscache_stat(&fscache_n_allocs);
59523+ fscache_stat_unchecked(&fscache_n_allocs);
59524
59525 if (hlist_empty(&cookie->backing_objects))
59526 goto nobufs;
59527@@ -680,7 +680,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
59528 goto nobufs_unlock_dec;
59529 spin_unlock(&cookie->lock);
59530
59531- fscache_stat(&fscache_n_alloc_ops);
59532+ fscache_stat_unchecked(&fscache_n_alloc_ops);
59533
59534 ret = fscache_wait_for_operation_activation(
59535 object, &op->op,
59536@@ -697,11 +697,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
59537
59538 error:
59539 if (ret == -ERESTARTSYS)
59540- fscache_stat(&fscache_n_allocs_intr);
59541+ fscache_stat_unchecked(&fscache_n_allocs_intr);
59542 else if (ret < 0)
59543- fscache_stat(&fscache_n_allocs_nobufs);
59544+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
59545 else
59546- fscache_stat(&fscache_n_allocs_ok);
59547+ fscache_stat_unchecked(&fscache_n_allocs_ok);
59548
59549 fscache_put_retrieval(op);
59550 _leave(" = %d", ret);
59551@@ -715,7 +715,7 @@ nobufs_unlock:
59552 if (wake_cookie)
59553 __fscache_wake_unused_cookie(cookie);
59554 nobufs:
59555- fscache_stat(&fscache_n_allocs_nobufs);
59556+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
59557 _leave(" = -ENOBUFS");
59558 return -ENOBUFS;
59559 }
59560@@ -791,7 +791,7 @@ static void fscache_write_op(struct fscache_operation *_op)
59561
59562 spin_lock(&cookie->stores_lock);
59563
59564- fscache_stat(&fscache_n_store_calls);
59565+ fscache_stat_unchecked(&fscache_n_store_calls);
59566
59567 /* find a page to store */
59568 page = NULL;
59569@@ -802,7 +802,7 @@ static void fscache_write_op(struct fscache_operation *_op)
59570 page = results[0];
59571 _debug("gang %d [%lx]", n, page->index);
59572 if (page->index > op->store_limit) {
59573- fscache_stat(&fscache_n_store_pages_over_limit);
59574+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
59575 goto superseded;
59576 }
59577
59578@@ -814,7 +814,7 @@ static void fscache_write_op(struct fscache_operation *_op)
59579 spin_unlock(&cookie->stores_lock);
59580 spin_unlock(&object->lock);
59581
59582- fscache_stat(&fscache_n_store_pages);
59583+ fscache_stat_unchecked(&fscache_n_store_pages);
59584 fscache_stat(&fscache_n_cop_write_page);
59585 ret = object->cache->ops->write_page(op, page);
59586 fscache_stat_d(&fscache_n_cop_write_page);
59587@@ -918,7 +918,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
59588 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
59589 ASSERT(PageFsCache(page));
59590
59591- fscache_stat(&fscache_n_stores);
59592+ fscache_stat_unchecked(&fscache_n_stores);
59593
59594 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
59595 _leave(" = -ENOBUFS [invalidating]");
59596@@ -977,7 +977,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
59597 spin_unlock(&cookie->stores_lock);
59598 spin_unlock(&object->lock);
59599
59600- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
59601+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
59602 op->store_limit = object->store_limit;
59603
59604 __fscache_use_cookie(cookie);
59605@@ -986,8 +986,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
59606
59607 spin_unlock(&cookie->lock);
59608 radix_tree_preload_end();
59609- fscache_stat(&fscache_n_store_ops);
59610- fscache_stat(&fscache_n_stores_ok);
59611+ fscache_stat_unchecked(&fscache_n_store_ops);
59612+ fscache_stat_unchecked(&fscache_n_stores_ok);
59613
59614 /* the work queue now carries its own ref on the object */
59615 fscache_put_operation(&op->op);
59616@@ -995,14 +995,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
59617 return 0;
59618
59619 already_queued:
59620- fscache_stat(&fscache_n_stores_again);
59621+ fscache_stat_unchecked(&fscache_n_stores_again);
59622 already_pending:
59623 spin_unlock(&cookie->stores_lock);
59624 spin_unlock(&object->lock);
59625 spin_unlock(&cookie->lock);
59626 radix_tree_preload_end();
59627 kfree(op);
59628- fscache_stat(&fscache_n_stores_ok);
59629+ fscache_stat_unchecked(&fscache_n_stores_ok);
59630 _leave(" = 0");
59631 return 0;
59632
59633@@ -1024,14 +1024,14 @@ nobufs:
59634 kfree(op);
59635 if (wake_cookie)
59636 __fscache_wake_unused_cookie(cookie);
59637- fscache_stat(&fscache_n_stores_nobufs);
59638+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
59639 _leave(" = -ENOBUFS");
59640 return -ENOBUFS;
59641
59642 nomem_free:
59643 kfree(op);
59644 nomem:
59645- fscache_stat(&fscache_n_stores_oom);
59646+ fscache_stat_unchecked(&fscache_n_stores_oom);
59647 _leave(" = -ENOMEM");
59648 return -ENOMEM;
59649 }
59650@@ -1049,7 +1049,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
59651 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
59652 ASSERTCMP(page, !=, NULL);
59653
59654- fscache_stat(&fscache_n_uncaches);
59655+ fscache_stat_unchecked(&fscache_n_uncaches);
59656
59657 /* cache withdrawal may beat us to it */
59658 if (!PageFsCache(page))
59659@@ -1100,7 +1100,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
59660 struct fscache_cookie *cookie = op->op.object->cookie;
59661
59662 #ifdef CONFIG_FSCACHE_STATS
59663- atomic_inc(&fscache_n_marks);
59664+ atomic_inc_unchecked(&fscache_n_marks);
59665 #endif
59666
59667 _debug("- mark %p{%lx}", page, page->index);
59668diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
59669index 40d13c7..ddf52b9 100644
59670--- a/fs/fscache/stats.c
59671+++ b/fs/fscache/stats.c
59672@@ -18,99 +18,99 @@
59673 /*
59674 * operation counters
59675 */
59676-atomic_t fscache_n_op_pend;
59677-atomic_t fscache_n_op_run;
59678-atomic_t fscache_n_op_enqueue;
59679-atomic_t fscache_n_op_requeue;
59680-atomic_t fscache_n_op_deferred_release;
59681-atomic_t fscache_n_op_release;
59682-atomic_t fscache_n_op_gc;
59683-atomic_t fscache_n_op_cancelled;
59684-atomic_t fscache_n_op_rejected;
59685+atomic_unchecked_t fscache_n_op_pend;
59686+atomic_unchecked_t fscache_n_op_run;
59687+atomic_unchecked_t fscache_n_op_enqueue;
59688+atomic_unchecked_t fscache_n_op_requeue;
59689+atomic_unchecked_t fscache_n_op_deferred_release;
59690+atomic_unchecked_t fscache_n_op_release;
59691+atomic_unchecked_t fscache_n_op_gc;
59692+atomic_unchecked_t fscache_n_op_cancelled;
59693+atomic_unchecked_t fscache_n_op_rejected;
59694
59695-atomic_t fscache_n_attr_changed;
59696-atomic_t fscache_n_attr_changed_ok;
59697-atomic_t fscache_n_attr_changed_nobufs;
59698-atomic_t fscache_n_attr_changed_nomem;
59699-atomic_t fscache_n_attr_changed_calls;
59700+atomic_unchecked_t fscache_n_attr_changed;
59701+atomic_unchecked_t fscache_n_attr_changed_ok;
59702+atomic_unchecked_t fscache_n_attr_changed_nobufs;
59703+atomic_unchecked_t fscache_n_attr_changed_nomem;
59704+atomic_unchecked_t fscache_n_attr_changed_calls;
59705
59706-atomic_t fscache_n_allocs;
59707-atomic_t fscache_n_allocs_ok;
59708-atomic_t fscache_n_allocs_wait;
59709-atomic_t fscache_n_allocs_nobufs;
59710-atomic_t fscache_n_allocs_intr;
59711-atomic_t fscache_n_allocs_object_dead;
59712-atomic_t fscache_n_alloc_ops;
59713-atomic_t fscache_n_alloc_op_waits;
59714+atomic_unchecked_t fscache_n_allocs;
59715+atomic_unchecked_t fscache_n_allocs_ok;
59716+atomic_unchecked_t fscache_n_allocs_wait;
59717+atomic_unchecked_t fscache_n_allocs_nobufs;
59718+atomic_unchecked_t fscache_n_allocs_intr;
59719+atomic_unchecked_t fscache_n_allocs_object_dead;
59720+atomic_unchecked_t fscache_n_alloc_ops;
59721+atomic_unchecked_t fscache_n_alloc_op_waits;
59722
59723-atomic_t fscache_n_retrievals;
59724-atomic_t fscache_n_retrievals_ok;
59725-atomic_t fscache_n_retrievals_wait;
59726-atomic_t fscache_n_retrievals_nodata;
59727-atomic_t fscache_n_retrievals_nobufs;
59728-atomic_t fscache_n_retrievals_intr;
59729-atomic_t fscache_n_retrievals_nomem;
59730-atomic_t fscache_n_retrievals_object_dead;
59731-atomic_t fscache_n_retrieval_ops;
59732-atomic_t fscache_n_retrieval_op_waits;
59733+atomic_unchecked_t fscache_n_retrievals;
59734+atomic_unchecked_t fscache_n_retrievals_ok;
59735+atomic_unchecked_t fscache_n_retrievals_wait;
59736+atomic_unchecked_t fscache_n_retrievals_nodata;
59737+atomic_unchecked_t fscache_n_retrievals_nobufs;
59738+atomic_unchecked_t fscache_n_retrievals_intr;
59739+atomic_unchecked_t fscache_n_retrievals_nomem;
59740+atomic_unchecked_t fscache_n_retrievals_object_dead;
59741+atomic_unchecked_t fscache_n_retrieval_ops;
59742+atomic_unchecked_t fscache_n_retrieval_op_waits;
59743
59744-atomic_t fscache_n_stores;
59745-atomic_t fscache_n_stores_ok;
59746-atomic_t fscache_n_stores_again;
59747-atomic_t fscache_n_stores_nobufs;
59748-atomic_t fscache_n_stores_oom;
59749-atomic_t fscache_n_store_ops;
59750-atomic_t fscache_n_store_calls;
59751-atomic_t fscache_n_store_pages;
59752-atomic_t fscache_n_store_radix_deletes;
59753-atomic_t fscache_n_store_pages_over_limit;
59754+atomic_unchecked_t fscache_n_stores;
59755+atomic_unchecked_t fscache_n_stores_ok;
59756+atomic_unchecked_t fscache_n_stores_again;
59757+atomic_unchecked_t fscache_n_stores_nobufs;
59758+atomic_unchecked_t fscache_n_stores_oom;
59759+atomic_unchecked_t fscache_n_store_ops;
59760+atomic_unchecked_t fscache_n_store_calls;
59761+atomic_unchecked_t fscache_n_store_pages;
59762+atomic_unchecked_t fscache_n_store_radix_deletes;
59763+atomic_unchecked_t fscache_n_store_pages_over_limit;
59764
59765-atomic_t fscache_n_store_vmscan_not_storing;
59766-atomic_t fscache_n_store_vmscan_gone;
59767-atomic_t fscache_n_store_vmscan_busy;
59768-atomic_t fscache_n_store_vmscan_cancelled;
59769-atomic_t fscache_n_store_vmscan_wait;
59770+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
59771+atomic_unchecked_t fscache_n_store_vmscan_gone;
59772+atomic_unchecked_t fscache_n_store_vmscan_busy;
59773+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
59774+atomic_unchecked_t fscache_n_store_vmscan_wait;
59775
59776-atomic_t fscache_n_marks;
59777-atomic_t fscache_n_uncaches;
59778+atomic_unchecked_t fscache_n_marks;
59779+atomic_unchecked_t fscache_n_uncaches;
59780
59781-atomic_t fscache_n_acquires;
59782-atomic_t fscache_n_acquires_null;
59783-atomic_t fscache_n_acquires_no_cache;
59784-atomic_t fscache_n_acquires_ok;
59785-atomic_t fscache_n_acquires_nobufs;
59786-atomic_t fscache_n_acquires_oom;
59787+atomic_unchecked_t fscache_n_acquires;
59788+atomic_unchecked_t fscache_n_acquires_null;
59789+atomic_unchecked_t fscache_n_acquires_no_cache;
59790+atomic_unchecked_t fscache_n_acquires_ok;
59791+atomic_unchecked_t fscache_n_acquires_nobufs;
59792+atomic_unchecked_t fscache_n_acquires_oom;
59793
59794-atomic_t fscache_n_invalidates;
59795-atomic_t fscache_n_invalidates_run;
59796+atomic_unchecked_t fscache_n_invalidates;
59797+atomic_unchecked_t fscache_n_invalidates_run;
59798
59799-atomic_t fscache_n_updates;
59800-atomic_t fscache_n_updates_null;
59801-atomic_t fscache_n_updates_run;
59802+atomic_unchecked_t fscache_n_updates;
59803+atomic_unchecked_t fscache_n_updates_null;
59804+atomic_unchecked_t fscache_n_updates_run;
59805
59806-atomic_t fscache_n_relinquishes;
59807-atomic_t fscache_n_relinquishes_null;
59808-atomic_t fscache_n_relinquishes_waitcrt;
59809-atomic_t fscache_n_relinquishes_retire;
59810+atomic_unchecked_t fscache_n_relinquishes;
59811+atomic_unchecked_t fscache_n_relinquishes_null;
59812+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
59813+atomic_unchecked_t fscache_n_relinquishes_retire;
59814
59815-atomic_t fscache_n_cookie_index;
59816-atomic_t fscache_n_cookie_data;
59817-atomic_t fscache_n_cookie_special;
59818+atomic_unchecked_t fscache_n_cookie_index;
59819+atomic_unchecked_t fscache_n_cookie_data;
59820+atomic_unchecked_t fscache_n_cookie_special;
59821
59822-atomic_t fscache_n_object_alloc;
59823-atomic_t fscache_n_object_no_alloc;
59824-atomic_t fscache_n_object_lookups;
59825-atomic_t fscache_n_object_lookups_negative;
59826-atomic_t fscache_n_object_lookups_positive;
59827-atomic_t fscache_n_object_lookups_timed_out;
59828-atomic_t fscache_n_object_created;
59829-atomic_t fscache_n_object_avail;
59830-atomic_t fscache_n_object_dead;
59831+atomic_unchecked_t fscache_n_object_alloc;
59832+atomic_unchecked_t fscache_n_object_no_alloc;
59833+atomic_unchecked_t fscache_n_object_lookups;
59834+atomic_unchecked_t fscache_n_object_lookups_negative;
59835+atomic_unchecked_t fscache_n_object_lookups_positive;
59836+atomic_unchecked_t fscache_n_object_lookups_timed_out;
59837+atomic_unchecked_t fscache_n_object_created;
59838+atomic_unchecked_t fscache_n_object_avail;
59839+atomic_unchecked_t fscache_n_object_dead;
59840
59841-atomic_t fscache_n_checkaux_none;
59842-atomic_t fscache_n_checkaux_okay;
59843-atomic_t fscache_n_checkaux_update;
59844-atomic_t fscache_n_checkaux_obsolete;
59845+atomic_unchecked_t fscache_n_checkaux_none;
59846+atomic_unchecked_t fscache_n_checkaux_okay;
59847+atomic_unchecked_t fscache_n_checkaux_update;
59848+atomic_unchecked_t fscache_n_checkaux_obsolete;
59849
59850 atomic_t fscache_n_cop_alloc_object;
59851 atomic_t fscache_n_cop_lookup_object;
59852@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
59853 seq_puts(m, "FS-Cache statistics\n");
59854
59855 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
59856- atomic_read(&fscache_n_cookie_index),
59857- atomic_read(&fscache_n_cookie_data),
59858- atomic_read(&fscache_n_cookie_special));
59859+ atomic_read_unchecked(&fscache_n_cookie_index),
59860+ atomic_read_unchecked(&fscache_n_cookie_data),
59861+ atomic_read_unchecked(&fscache_n_cookie_special));
59862
59863 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
59864- atomic_read(&fscache_n_object_alloc),
59865- atomic_read(&fscache_n_object_no_alloc),
59866- atomic_read(&fscache_n_object_avail),
59867- atomic_read(&fscache_n_object_dead));
59868+ atomic_read_unchecked(&fscache_n_object_alloc),
59869+ atomic_read_unchecked(&fscache_n_object_no_alloc),
59870+ atomic_read_unchecked(&fscache_n_object_avail),
59871+ atomic_read_unchecked(&fscache_n_object_dead));
59872 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
59873- atomic_read(&fscache_n_checkaux_none),
59874- atomic_read(&fscache_n_checkaux_okay),
59875- atomic_read(&fscache_n_checkaux_update),
59876- atomic_read(&fscache_n_checkaux_obsolete));
59877+ atomic_read_unchecked(&fscache_n_checkaux_none),
59878+ atomic_read_unchecked(&fscache_n_checkaux_okay),
59879+ atomic_read_unchecked(&fscache_n_checkaux_update),
59880+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
59881
59882 seq_printf(m, "Pages : mrk=%u unc=%u\n",
59883- atomic_read(&fscache_n_marks),
59884- atomic_read(&fscache_n_uncaches));
59885+ atomic_read_unchecked(&fscache_n_marks),
59886+ atomic_read_unchecked(&fscache_n_uncaches));
59887
59888 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
59889 " oom=%u\n",
59890- atomic_read(&fscache_n_acquires),
59891- atomic_read(&fscache_n_acquires_null),
59892- atomic_read(&fscache_n_acquires_no_cache),
59893- atomic_read(&fscache_n_acquires_ok),
59894- atomic_read(&fscache_n_acquires_nobufs),
59895- atomic_read(&fscache_n_acquires_oom));
59896+ atomic_read_unchecked(&fscache_n_acquires),
59897+ atomic_read_unchecked(&fscache_n_acquires_null),
59898+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
59899+ atomic_read_unchecked(&fscache_n_acquires_ok),
59900+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
59901+ atomic_read_unchecked(&fscache_n_acquires_oom));
59902
59903 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
59904- atomic_read(&fscache_n_object_lookups),
59905- atomic_read(&fscache_n_object_lookups_negative),
59906- atomic_read(&fscache_n_object_lookups_positive),
59907- atomic_read(&fscache_n_object_created),
59908- atomic_read(&fscache_n_object_lookups_timed_out));
59909+ atomic_read_unchecked(&fscache_n_object_lookups),
59910+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
59911+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
59912+ atomic_read_unchecked(&fscache_n_object_created),
59913+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
59914
59915 seq_printf(m, "Invals : n=%u run=%u\n",
59916- atomic_read(&fscache_n_invalidates),
59917- atomic_read(&fscache_n_invalidates_run));
59918+ atomic_read_unchecked(&fscache_n_invalidates),
59919+ atomic_read_unchecked(&fscache_n_invalidates_run));
59920
59921 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
59922- atomic_read(&fscache_n_updates),
59923- atomic_read(&fscache_n_updates_null),
59924- atomic_read(&fscache_n_updates_run));
59925+ atomic_read_unchecked(&fscache_n_updates),
59926+ atomic_read_unchecked(&fscache_n_updates_null),
59927+ atomic_read_unchecked(&fscache_n_updates_run));
59928
59929 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
59930- atomic_read(&fscache_n_relinquishes),
59931- atomic_read(&fscache_n_relinquishes_null),
59932- atomic_read(&fscache_n_relinquishes_waitcrt),
59933- atomic_read(&fscache_n_relinquishes_retire));
59934+ atomic_read_unchecked(&fscache_n_relinquishes),
59935+ atomic_read_unchecked(&fscache_n_relinquishes_null),
59936+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
59937+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
59938
59939 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
59940- atomic_read(&fscache_n_attr_changed),
59941- atomic_read(&fscache_n_attr_changed_ok),
59942- atomic_read(&fscache_n_attr_changed_nobufs),
59943- atomic_read(&fscache_n_attr_changed_nomem),
59944- atomic_read(&fscache_n_attr_changed_calls));
59945+ atomic_read_unchecked(&fscache_n_attr_changed),
59946+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
59947+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
59948+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
59949+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
59950
59951 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
59952- atomic_read(&fscache_n_allocs),
59953- atomic_read(&fscache_n_allocs_ok),
59954- atomic_read(&fscache_n_allocs_wait),
59955- atomic_read(&fscache_n_allocs_nobufs),
59956- atomic_read(&fscache_n_allocs_intr));
59957+ atomic_read_unchecked(&fscache_n_allocs),
59958+ atomic_read_unchecked(&fscache_n_allocs_ok),
59959+ atomic_read_unchecked(&fscache_n_allocs_wait),
59960+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
59961+ atomic_read_unchecked(&fscache_n_allocs_intr));
59962 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
59963- atomic_read(&fscache_n_alloc_ops),
59964- atomic_read(&fscache_n_alloc_op_waits),
59965- atomic_read(&fscache_n_allocs_object_dead));
59966+ atomic_read_unchecked(&fscache_n_alloc_ops),
59967+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
59968+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
59969
59970 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
59971 " int=%u oom=%u\n",
59972- atomic_read(&fscache_n_retrievals),
59973- atomic_read(&fscache_n_retrievals_ok),
59974- atomic_read(&fscache_n_retrievals_wait),
59975- atomic_read(&fscache_n_retrievals_nodata),
59976- atomic_read(&fscache_n_retrievals_nobufs),
59977- atomic_read(&fscache_n_retrievals_intr),
59978- atomic_read(&fscache_n_retrievals_nomem));
59979+ atomic_read_unchecked(&fscache_n_retrievals),
59980+ atomic_read_unchecked(&fscache_n_retrievals_ok),
59981+ atomic_read_unchecked(&fscache_n_retrievals_wait),
59982+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
59983+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
59984+ atomic_read_unchecked(&fscache_n_retrievals_intr),
59985+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
59986 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
59987- atomic_read(&fscache_n_retrieval_ops),
59988- atomic_read(&fscache_n_retrieval_op_waits),
59989- atomic_read(&fscache_n_retrievals_object_dead));
59990+ atomic_read_unchecked(&fscache_n_retrieval_ops),
59991+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
59992+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
59993
59994 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
59995- atomic_read(&fscache_n_stores),
59996- atomic_read(&fscache_n_stores_ok),
59997- atomic_read(&fscache_n_stores_again),
59998- atomic_read(&fscache_n_stores_nobufs),
59999- atomic_read(&fscache_n_stores_oom));
60000+ atomic_read_unchecked(&fscache_n_stores),
60001+ atomic_read_unchecked(&fscache_n_stores_ok),
60002+ atomic_read_unchecked(&fscache_n_stores_again),
60003+ atomic_read_unchecked(&fscache_n_stores_nobufs),
60004+ atomic_read_unchecked(&fscache_n_stores_oom));
60005 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
60006- atomic_read(&fscache_n_store_ops),
60007- atomic_read(&fscache_n_store_calls),
60008- atomic_read(&fscache_n_store_pages),
60009- atomic_read(&fscache_n_store_radix_deletes),
60010- atomic_read(&fscache_n_store_pages_over_limit));
60011+ atomic_read_unchecked(&fscache_n_store_ops),
60012+ atomic_read_unchecked(&fscache_n_store_calls),
60013+ atomic_read_unchecked(&fscache_n_store_pages),
60014+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
60015+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
60016
60017 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
60018- atomic_read(&fscache_n_store_vmscan_not_storing),
60019- atomic_read(&fscache_n_store_vmscan_gone),
60020- atomic_read(&fscache_n_store_vmscan_busy),
60021- atomic_read(&fscache_n_store_vmscan_cancelled),
60022- atomic_read(&fscache_n_store_vmscan_wait));
60023+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
60024+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
60025+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
60026+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
60027+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
60028
60029 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
60030- atomic_read(&fscache_n_op_pend),
60031- atomic_read(&fscache_n_op_run),
60032- atomic_read(&fscache_n_op_enqueue),
60033- atomic_read(&fscache_n_op_cancelled),
60034- atomic_read(&fscache_n_op_rejected));
60035+ atomic_read_unchecked(&fscache_n_op_pend),
60036+ atomic_read_unchecked(&fscache_n_op_run),
60037+ atomic_read_unchecked(&fscache_n_op_enqueue),
60038+ atomic_read_unchecked(&fscache_n_op_cancelled),
60039+ atomic_read_unchecked(&fscache_n_op_rejected));
60040 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
60041- atomic_read(&fscache_n_op_deferred_release),
60042- atomic_read(&fscache_n_op_release),
60043- atomic_read(&fscache_n_op_gc));
60044+ atomic_read_unchecked(&fscache_n_op_deferred_release),
60045+ atomic_read_unchecked(&fscache_n_op_release),
60046+ atomic_read_unchecked(&fscache_n_op_gc));
60047
60048 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
60049 atomic_read(&fscache_n_cop_alloc_object),
60050diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
60051index b96a49b..9bfdc47 100644
60052--- a/fs/fuse/cuse.c
60053+++ b/fs/fuse/cuse.c
60054@@ -606,10 +606,12 @@ static int __init cuse_init(void)
60055 INIT_LIST_HEAD(&cuse_conntbl[i]);
60056
60057 /* inherit and extend fuse_dev_operations */
60058- cuse_channel_fops = fuse_dev_operations;
60059- cuse_channel_fops.owner = THIS_MODULE;
60060- cuse_channel_fops.open = cuse_channel_open;
60061- cuse_channel_fops.release = cuse_channel_release;
60062+ pax_open_kernel();
60063+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
60064+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
60065+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
60066+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
60067+ pax_close_kernel();
60068
60069 cuse_class = class_create(THIS_MODULE, "cuse");
60070 if (IS_ERR(cuse_class))
60071diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
60072index ef74ad5..c9ac759e 100644
60073--- a/fs/fuse/dev.c
60074+++ b/fs/fuse/dev.c
60075@@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
60076 ret = 0;
60077 pipe_lock(pipe);
60078
60079- if (!pipe->readers) {
60080+ if (!atomic_read(&pipe->readers)) {
60081 send_sig(SIGPIPE, current, 0);
60082 if (!ret)
60083 ret = -EPIPE;
60084@@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
60085 page_nr++;
60086 ret += buf->len;
60087
60088- if (pipe->files)
60089+ if (atomic_read(&pipe->files))
60090 do_wakeup = 1;
60091 }
60092
60093diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
60094index c3eb2c4..98007d4 100644
60095--- a/fs/fuse/dir.c
60096+++ b/fs/fuse/dir.c
60097@@ -1408,7 +1408,7 @@ static char *read_link(struct dentry *dentry)
60098 return link;
60099 }
60100
60101-static void free_link(char *link)
60102+static void free_link(const char *link)
60103 {
60104 if (!IS_ERR(link))
60105 free_page((unsigned long) link);
60106diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
60107index db23ce1..9e6cd9d 100644
60108--- a/fs/hostfs/hostfs_kern.c
60109+++ b/fs/hostfs/hostfs_kern.c
60110@@ -895,7 +895,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
60111
60112 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
60113 {
60114- char *s = nd_get_link(nd);
60115+ const char *s = nd_get_link(nd);
60116 if (!IS_ERR(s))
60117 __putname(s);
60118 }
60119diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
60120index d19b30a..ef89c36 100644
60121--- a/fs/hugetlbfs/inode.c
60122+++ b/fs/hugetlbfs/inode.c
60123@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
60124 struct mm_struct *mm = current->mm;
60125 struct vm_area_struct *vma;
60126 struct hstate *h = hstate_file(file);
60127+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
60128 struct vm_unmapped_area_info info;
60129
60130 if (len & ~huge_page_mask(h))
60131@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
60132 return addr;
60133 }
60134
60135+#ifdef CONFIG_PAX_RANDMMAP
60136+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
60137+#endif
60138+
60139 if (addr) {
60140 addr = ALIGN(addr, huge_page_size(h));
60141 vma = find_vma(mm, addr);
60142- if (TASK_SIZE - len >= addr &&
60143- (!vma || addr + len <= vma->vm_start))
60144+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
60145 return addr;
60146 }
60147
60148 info.flags = 0;
60149 info.length = len;
60150 info.low_limit = TASK_UNMAPPED_BASE;
60151+
60152+#ifdef CONFIG_PAX_RANDMMAP
60153+ if (mm->pax_flags & MF_PAX_RANDMMAP)
60154+ info.low_limit += mm->delta_mmap;
60155+#endif
60156+
60157 info.high_limit = TASK_SIZE;
60158 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
60159 info.align_offset = 0;
60160@@ -908,7 +918,7 @@ static struct file_system_type hugetlbfs_fs_type = {
60161 };
60162 MODULE_ALIAS_FS("hugetlbfs");
60163
60164-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
60165+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
60166
60167 static int can_do_hugetlb_shm(void)
60168 {
60169diff --git a/fs/inode.c b/fs/inode.c
60170index 4bcdad3..1883822 100644
60171--- a/fs/inode.c
60172+++ b/fs/inode.c
60173@@ -841,8 +841,8 @@ unsigned int get_next_ino(void)
60174
60175 #ifdef CONFIG_SMP
60176 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
60177- static atomic_t shared_last_ino;
60178- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
60179+ static atomic_unchecked_t shared_last_ino;
60180+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
60181
60182 res = next - LAST_INO_BATCH;
60183 }
60184diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
60185index 4a6cf28..d3a29d3 100644
60186--- a/fs/jffs2/erase.c
60187+++ b/fs/jffs2/erase.c
60188@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
60189 struct jffs2_unknown_node marker = {
60190 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
60191 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
60192- .totlen = cpu_to_je32(c->cleanmarker_size)
60193+ .totlen = cpu_to_je32(c->cleanmarker_size),
60194+ .hdr_crc = cpu_to_je32(0)
60195 };
60196
60197 jffs2_prealloc_raw_node_refs(c, jeb, 1);
60198diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
60199index a6597d6..41b30ec 100644
60200--- a/fs/jffs2/wbuf.c
60201+++ b/fs/jffs2/wbuf.c
60202@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
60203 {
60204 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
60205 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
60206- .totlen = constant_cpu_to_je32(8)
60207+ .totlen = constant_cpu_to_je32(8),
60208+ .hdr_crc = constant_cpu_to_je32(0)
60209 };
60210
60211 /*
60212diff --git a/fs/jfs/super.c b/fs/jfs/super.c
60213index 6669aa2..36b033d 100644
60214--- a/fs/jfs/super.c
60215+++ b/fs/jfs/super.c
60216@@ -882,7 +882,7 @@ static int __init init_jfs_fs(void)
60217
60218 jfs_inode_cachep =
60219 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
60220- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
60221+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
60222 init_once);
60223 if (jfs_inode_cachep == NULL)
60224 return -ENOMEM;
60225diff --git a/fs/libfs.c b/fs/libfs.c
60226index a184424..944ddce 100644
60227--- a/fs/libfs.c
60228+++ b/fs/libfs.c
60229@@ -159,6 +159,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
60230
60231 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
60232 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
60233+ char d_name[sizeof(next->d_iname)];
60234+ const unsigned char *name;
60235+
60236 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
60237 if (!simple_positive(next)) {
60238 spin_unlock(&next->d_lock);
60239@@ -167,7 +170,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
60240
60241 spin_unlock(&next->d_lock);
60242 spin_unlock(&dentry->d_lock);
60243- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
60244+ name = next->d_name.name;
60245+ if (name == next->d_iname) {
60246+ memcpy(d_name, name, next->d_name.len);
60247+ name = d_name;
60248+ }
60249+ if (!dir_emit(ctx, name, next->d_name.len,
60250 next->d_inode->i_ino, dt_type(next->d_inode)))
60251 return 0;
60252 spin_lock(&dentry->d_lock);
60253@@ -999,7 +1007,7 @@ EXPORT_SYMBOL(noop_fsync);
60254 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
60255 void *cookie)
60256 {
60257- char *s = nd_get_link(nd);
60258+ const char *s = nd_get_link(nd);
60259 if (!IS_ERR(s))
60260 kfree(s);
60261 }
60262diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
60263index acd3947..1f896e2 100644
60264--- a/fs/lockd/clntproc.c
60265+++ b/fs/lockd/clntproc.c
60266@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
60267 /*
60268 * Cookie counter for NLM requests
60269 */
60270-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
60271+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
60272
60273 void nlmclnt_next_cookie(struct nlm_cookie *c)
60274 {
60275- u32 cookie = atomic_inc_return(&nlm_cookie);
60276+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
60277
60278 memcpy(c->data, &cookie, 4);
60279 c->len=4;
60280diff --git a/fs/locks.c b/fs/locks.c
60281index 92a0f0a..45a48f0 100644
60282--- a/fs/locks.c
60283+++ b/fs/locks.c
60284@@ -2219,16 +2219,16 @@ void locks_remove_flock(struct file *filp)
60285 return;
60286
60287 if (filp->f_op->flock) {
60288- struct file_lock fl = {
60289+ struct file_lock flock = {
60290 .fl_pid = current->tgid,
60291 .fl_file = filp,
60292 .fl_flags = FL_FLOCK,
60293 .fl_type = F_UNLCK,
60294 .fl_end = OFFSET_MAX,
60295 };
60296- filp->f_op->flock(filp, F_SETLKW, &fl);
60297- if (fl.fl_ops && fl.fl_ops->fl_release_private)
60298- fl.fl_ops->fl_release_private(&fl);
60299+ filp->f_op->flock(filp, F_SETLKW, &flock);
60300+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
60301+ flock.fl_ops->fl_release_private(&flock);
60302 }
60303
60304 spin_lock(&inode->i_lock);
60305diff --git a/fs/mount.h b/fs/mount.h
60306index a17458c..e69fb5b 100644
60307--- a/fs/mount.h
60308+++ b/fs/mount.h
60309@@ -11,7 +11,7 @@ struct mnt_namespace {
60310 u64 seq; /* Sequence number to prevent loops */
60311 wait_queue_head_t poll;
60312 int event;
60313-};
60314+} __randomize_layout;
60315
60316 struct mnt_pcp {
60317 int mnt_count;
60318@@ -57,7 +57,7 @@ struct mount {
60319 int mnt_expiry_mark; /* true if marked for expiry */
60320 int mnt_pinned;
60321 struct path mnt_ex_mountpoint;
60322-};
60323+} __randomize_layout;
60324
60325 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
60326
60327diff --git a/fs/namei.c b/fs/namei.c
60328index 3531dee..3177227 100644
60329--- a/fs/namei.c
60330+++ b/fs/namei.c
60331@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
60332 if (ret != -EACCES)
60333 return ret;
60334
60335+#ifdef CONFIG_GRKERNSEC
60336+ /* we'll block if we have to log due to a denied capability use */
60337+ if (mask & MAY_NOT_BLOCK)
60338+ return -ECHILD;
60339+#endif
60340+
60341 if (S_ISDIR(inode->i_mode)) {
60342 /* DACs are overridable for directories */
60343- if (inode_capable(inode, CAP_DAC_OVERRIDE))
60344- return 0;
60345 if (!(mask & MAY_WRITE))
60346- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
60347+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
60348+ inode_capable(inode, CAP_DAC_READ_SEARCH))
60349 return 0;
60350+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
60351+ return 0;
60352 return -EACCES;
60353 }
60354 /*
60355+ * Searching includes executable on directories, else just read.
60356+ */
60357+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
60358+ if (mask == MAY_READ)
60359+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
60360+ inode_capable(inode, CAP_DAC_READ_SEARCH))
60361+ return 0;
60362+
60363+ /*
60364 * Read/write DACs are always overridable.
60365 * Executable DACs are overridable when there is
60366 * at least one exec bit set.
60367@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
60368 if (inode_capable(inode, CAP_DAC_OVERRIDE))
60369 return 0;
60370
60371- /*
60372- * Searching includes executable on directories, else just read.
60373- */
60374- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
60375- if (mask == MAY_READ)
60376- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
60377- return 0;
60378-
60379 return -EACCES;
60380 }
60381
60382@@ -810,7 +818,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
60383 {
60384 struct dentry *dentry = link->dentry;
60385 int error;
60386- char *s;
60387+ const char *s;
60388
60389 BUG_ON(nd->flags & LOOKUP_RCU);
60390
60391@@ -831,6 +839,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
60392 if (error)
60393 goto out_put_nd_path;
60394
60395+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
60396+ dentry->d_inode, dentry, nd->path.mnt)) {
60397+ error = -EACCES;
60398+ goto out_put_nd_path;
60399+ }
60400+
60401 nd->last_type = LAST_BIND;
60402 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
60403 error = PTR_ERR(*p);
60404@@ -1582,6 +1596,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
60405 if (res)
60406 break;
60407 res = walk_component(nd, path, LOOKUP_FOLLOW);
60408+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
60409+ res = -EACCES;
60410 put_link(nd, &link, cookie);
60411 } while (res > 0);
60412
60413@@ -1655,7 +1671,7 @@ EXPORT_SYMBOL(full_name_hash);
60414 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
60415 {
60416 unsigned long a, b, adata, bdata, mask, hash, len;
60417- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
60418+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
60419
60420 hash = a = 0;
60421 len = -sizeof(unsigned long);
60422@@ -1939,6 +1955,8 @@ static int path_lookupat(int dfd, const char *name,
60423 if (err)
60424 break;
60425 err = lookup_last(nd, &path);
60426+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
60427+ err = -EACCES;
60428 put_link(nd, &link, cookie);
60429 }
60430 }
60431@@ -1946,6 +1964,13 @@ static int path_lookupat(int dfd, const char *name,
60432 if (!err)
60433 err = complete_walk(nd);
60434
60435+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
60436+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
60437+ path_put(&nd->path);
60438+ err = -ENOENT;
60439+ }
60440+ }
60441+
60442 if (!err && nd->flags & LOOKUP_DIRECTORY) {
60443 if (!d_is_directory(nd->path.dentry)) {
60444 path_put(&nd->path);
60445@@ -1973,8 +1998,15 @@ static int filename_lookup(int dfd, struct filename *name,
60446 retval = path_lookupat(dfd, name->name,
60447 flags | LOOKUP_REVAL, nd);
60448
60449- if (likely(!retval))
60450+ if (likely(!retval)) {
60451 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
60452+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
60453+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
60454+ path_put(&nd->path);
60455+ return -ENOENT;
60456+ }
60457+ }
60458+ }
60459 return retval;
60460 }
60461
60462@@ -2548,6 +2580,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
60463 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
60464 return -EPERM;
60465
60466+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
60467+ return -EPERM;
60468+ if (gr_handle_rawio(inode))
60469+ return -EPERM;
60470+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
60471+ return -EACCES;
60472+
60473 return 0;
60474 }
60475
60476@@ -2779,7 +2818,7 @@ looked_up:
60477 * cleared otherwise prior to returning.
60478 */
60479 static int lookup_open(struct nameidata *nd, struct path *path,
60480- struct file *file,
60481+ struct path *link, struct file *file,
60482 const struct open_flags *op,
60483 bool got_write, int *opened)
60484 {
60485@@ -2814,6 +2853,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
60486 /* Negative dentry, just create the file */
60487 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
60488 umode_t mode = op->mode;
60489+
60490+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
60491+ error = -EACCES;
60492+ goto out_dput;
60493+ }
60494+
60495+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
60496+ error = -EACCES;
60497+ goto out_dput;
60498+ }
60499+
60500 if (!IS_POSIXACL(dir->d_inode))
60501 mode &= ~current_umask();
60502 /*
60503@@ -2835,6 +2885,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
60504 nd->flags & LOOKUP_EXCL);
60505 if (error)
60506 goto out_dput;
60507+ else
60508+ gr_handle_create(dentry, nd->path.mnt);
60509 }
60510 out_no_open:
60511 path->dentry = dentry;
60512@@ -2849,7 +2901,7 @@ out_dput:
60513 /*
60514 * Handle the last step of open()
60515 */
60516-static int do_last(struct nameidata *nd, struct path *path,
60517+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
60518 struct file *file, const struct open_flags *op,
60519 int *opened, struct filename *name)
60520 {
60521@@ -2899,6 +2951,15 @@ static int do_last(struct nameidata *nd, struct path *path,
60522 if (error)
60523 return error;
60524
60525+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
60526+ error = -ENOENT;
60527+ goto out;
60528+ }
60529+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
60530+ error = -EACCES;
60531+ goto out;
60532+ }
60533+
60534 audit_inode(name, dir, LOOKUP_PARENT);
60535 error = -EISDIR;
60536 /* trailing slashes? */
60537@@ -2918,7 +2979,7 @@ retry_lookup:
60538 */
60539 }
60540 mutex_lock(&dir->d_inode->i_mutex);
60541- error = lookup_open(nd, path, file, op, got_write, opened);
60542+ error = lookup_open(nd, path, link, file, op, got_write, opened);
60543 mutex_unlock(&dir->d_inode->i_mutex);
60544
60545 if (error <= 0) {
60546@@ -2942,11 +3003,28 @@ retry_lookup:
60547 goto finish_open_created;
60548 }
60549
60550+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
60551+ error = -ENOENT;
60552+ goto exit_dput;
60553+ }
60554+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
60555+ error = -EACCES;
60556+ goto exit_dput;
60557+ }
60558+
60559 /*
60560 * create/update audit record if it already exists.
60561 */
60562- if (d_is_positive(path->dentry))
60563+ if (d_is_positive(path->dentry)) {
60564+ /* only check if O_CREAT is specified, all other checks need to go
60565+ into may_open */
60566+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
60567+ error = -EACCES;
60568+ goto exit_dput;
60569+ }
60570+
60571 audit_inode(name, path->dentry, 0);
60572+ }
60573
60574 /*
60575 * If atomic_open() acquired write access it is dropped now due to
60576@@ -2987,6 +3065,11 @@ finish_lookup:
60577 }
60578 }
60579 BUG_ON(inode != path->dentry->d_inode);
60580+ /* if we're resolving a symlink to another symlink */
60581+ if (link && gr_handle_symlink_owner(link, inode)) {
60582+ error = -EACCES;
60583+ goto out;
60584+ }
60585 return 1;
60586 }
60587
60588@@ -2996,7 +3079,6 @@ finish_lookup:
60589 save_parent.dentry = nd->path.dentry;
60590 save_parent.mnt = mntget(path->mnt);
60591 nd->path.dentry = path->dentry;
60592-
60593 }
60594 nd->inode = inode;
60595 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
60596@@ -3006,7 +3088,18 @@ finish_open:
60597 path_put(&save_parent);
60598 return error;
60599 }
60600+
60601+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
60602+ error = -ENOENT;
60603+ goto out;
60604+ }
60605+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
60606+ error = -EACCES;
60607+ goto out;
60608+ }
60609+
60610 audit_inode(name, nd->path.dentry, 0);
60611+
60612 error = -EISDIR;
60613 if ((open_flag & O_CREAT) &&
60614 (d_is_directory(nd->path.dentry) || d_is_autodir(nd->path.dentry)))
60615@@ -3170,7 +3263,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
60616 if (unlikely(error))
60617 goto out;
60618
60619- error = do_last(nd, &path, file, op, &opened, pathname);
60620+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
60621 while (unlikely(error > 0)) { /* trailing symlink */
60622 struct path link = path;
60623 void *cookie;
60624@@ -3188,7 +3281,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
60625 error = follow_link(&link, nd, &cookie);
60626 if (unlikely(error))
60627 break;
60628- error = do_last(nd, &path, file, op, &opened, pathname);
60629+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
60630 put_link(nd, &link, cookie);
60631 }
60632 out:
60633@@ -3288,9 +3381,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
60634 goto unlock;
60635
60636 error = -EEXIST;
60637- if (d_is_positive(dentry))
60638+ if (d_is_positive(dentry)) {
60639+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
60640+ error = -ENOENT;
60641 goto fail;
60642-
60643+ }
60644 /*
60645 * Special case - lookup gave negative, but... we had foo/bar/
60646 * From the vfs_mknod() POV we just have a negative dentry -
60647@@ -3342,6 +3437,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
60648 }
60649 EXPORT_SYMBOL(user_path_create);
60650
60651+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
60652+{
60653+ struct filename *tmp = getname(pathname);
60654+ struct dentry *res;
60655+ if (IS_ERR(tmp))
60656+ return ERR_CAST(tmp);
60657+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
60658+ if (IS_ERR(res))
60659+ putname(tmp);
60660+ else
60661+ *to = tmp;
60662+ return res;
60663+}
60664+
60665 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
60666 {
60667 int error = may_create(dir, dentry);
60668@@ -3404,6 +3513,17 @@ retry:
60669
60670 if (!IS_POSIXACL(path.dentry->d_inode))
60671 mode &= ~current_umask();
60672+
60673+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
60674+ error = -EPERM;
60675+ goto out;
60676+ }
60677+
60678+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
60679+ error = -EACCES;
60680+ goto out;
60681+ }
60682+
60683 error = security_path_mknod(&path, dentry, mode, dev);
60684 if (error)
60685 goto out;
60686@@ -3420,6 +3540,8 @@ retry:
60687 break;
60688 }
60689 out:
60690+ if (!error)
60691+ gr_handle_create(dentry, path.mnt);
60692 done_path_create(&path, dentry);
60693 if (retry_estale(error, lookup_flags)) {
60694 lookup_flags |= LOOKUP_REVAL;
60695@@ -3472,9 +3594,16 @@ retry:
60696
60697 if (!IS_POSIXACL(path.dentry->d_inode))
60698 mode &= ~current_umask();
60699+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
60700+ error = -EACCES;
60701+ goto out;
60702+ }
60703 error = security_path_mkdir(&path, dentry, mode);
60704 if (!error)
60705 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
60706+ if (!error)
60707+ gr_handle_create(dentry, path.mnt);
60708+out:
60709 done_path_create(&path, dentry);
60710 if (retry_estale(error, lookup_flags)) {
60711 lookup_flags |= LOOKUP_REVAL;
60712@@ -3555,6 +3684,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
60713 struct filename *name;
60714 struct dentry *dentry;
60715 struct nameidata nd;
60716+ ino_t saved_ino = 0;
60717+ dev_t saved_dev = 0;
60718 unsigned int lookup_flags = 0;
60719 retry:
60720 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
60721@@ -3587,10 +3718,21 @@ retry:
60722 error = -ENOENT;
60723 goto exit3;
60724 }
60725+
60726+ saved_ino = dentry->d_inode->i_ino;
60727+ saved_dev = gr_get_dev_from_dentry(dentry);
60728+
60729+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
60730+ error = -EACCES;
60731+ goto exit3;
60732+ }
60733+
60734 error = security_path_rmdir(&nd.path, dentry);
60735 if (error)
60736 goto exit3;
60737 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
60738+ if (!error && (saved_dev || saved_ino))
60739+ gr_handle_delete(saved_ino, saved_dev);
60740 exit3:
60741 dput(dentry);
60742 exit2:
60743@@ -3680,6 +3822,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
60744 struct nameidata nd;
60745 struct inode *inode = NULL;
60746 struct inode *delegated_inode = NULL;
60747+ ino_t saved_ino = 0;
60748+ dev_t saved_dev = 0;
60749 unsigned int lookup_flags = 0;
60750 retry:
60751 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
60752@@ -3706,10 +3850,22 @@ retry_deleg:
60753 if (d_is_negative(dentry))
60754 goto slashes;
60755 ihold(inode);
60756+
60757+ if (inode->i_nlink <= 1) {
60758+ saved_ino = inode->i_ino;
60759+ saved_dev = gr_get_dev_from_dentry(dentry);
60760+ }
60761+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
60762+ error = -EACCES;
60763+ goto exit2;
60764+ }
60765+
60766 error = security_path_unlink(&nd.path, dentry);
60767 if (error)
60768 goto exit2;
60769 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
60770+ if (!error && (saved_ino || saved_dev))
60771+ gr_handle_delete(saved_ino, saved_dev);
60772 exit2:
60773 dput(dentry);
60774 }
60775@@ -3797,9 +3953,17 @@ retry:
60776 if (IS_ERR(dentry))
60777 goto out_putname;
60778
60779+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
60780+ error = -EACCES;
60781+ goto out;
60782+ }
60783+
60784 error = security_path_symlink(&path, dentry, from->name);
60785 if (!error)
60786 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
60787+ if (!error)
60788+ gr_handle_create(dentry, path.mnt);
60789+out:
60790 done_path_create(&path, dentry);
60791 if (retry_estale(error, lookup_flags)) {
60792 lookup_flags |= LOOKUP_REVAL;
60793@@ -3902,6 +4066,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
60794 struct dentry *new_dentry;
60795 struct path old_path, new_path;
60796 struct inode *delegated_inode = NULL;
60797+ struct filename *to = NULL;
60798 int how = 0;
60799 int error;
60800
60801@@ -3925,7 +4090,7 @@ retry:
60802 if (error)
60803 return error;
60804
60805- new_dentry = user_path_create(newdfd, newname, &new_path,
60806+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
60807 (how & LOOKUP_REVAL));
60808 error = PTR_ERR(new_dentry);
60809 if (IS_ERR(new_dentry))
60810@@ -3937,11 +4102,28 @@ retry:
60811 error = may_linkat(&old_path);
60812 if (unlikely(error))
60813 goto out_dput;
60814+
60815+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
60816+ old_path.dentry->d_inode,
60817+ old_path.dentry->d_inode->i_mode, to)) {
60818+ error = -EACCES;
60819+ goto out_dput;
60820+ }
60821+
60822+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
60823+ old_path.dentry, old_path.mnt, to)) {
60824+ error = -EACCES;
60825+ goto out_dput;
60826+ }
60827+
60828 error = security_path_link(old_path.dentry, &new_path, new_dentry);
60829 if (error)
60830 goto out_dput;
60831 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
60832+ if (!error)
60833+ gr_handle_create(new_dentry, new_path.mnt);
60834 out_dput:
60835+ putname(to);
60836 done_path_create(&new_path, new_dentry);
60837 if (delegated_inode) {
60838 error = break_deleg_wait(&delegated_inode);
60839@@ -4225,6 +4407,12 @@ retry_deleg:
60840 if (new_dentry == trap)
60841 goto exit5;
60842
60843+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
60844+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
60845+ to);
60846+ if (error)
60847+ goto exit5;
60848+
60849 error = security_path_rename(&oldnd.path, old_dentry,
60850 &newnd.path, new_dentry);
60851 if (error)
60852@@ -4232,6 +4420,9 @@ retry_deleg:
60853 error = vfs_rename(old_dir->d_inode, old_dentry,
60854 new_dir->d_inode, new_dentry,
60855 &delegated_inode);
60856+ if (!error)
60857+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
60858+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
60859 exit5:
60860 dput(new_dentry);
60861 exit4:
60862@@ -4268,6 +4459,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
60863
60864 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
60865 {
60866+ char tmpbuf[64];
60867+ const char *newlink;
60868 int len;
60869
60870 len = PTR_ERR(link);
60871@@ -4277,7 +4470,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
60872 len = strlen(link);
60873 if (len > (unsigned) buflen)
60874 len = buflen;
60875- if (copy_to_user(buffer, link, len))
60876+
60877+ if (len < sizeof(tmpbuf)) {
60878+ memcpy(tmpbuf, link, len);
60879+ newlink = tmpbuf;
60880+ } else
60881+ newlink = link;
60882+
60883+ if (copy_to_user(buffer, newlink, len))
60884 len = -EFAULT;
60885 out:
60886 return len;
60887diff --git a/fs/namespace.c b/fs/namespace.c
60888index be32ebc..c595734 100644
60889--- a/fs/namespace.c
60890+++ b/fs/namespace.c
60891@@ -1293,6 +1293,9 @@ static int do_umount(struct mount *mnt, int flags)
60892 if (!(sb->s_flags & MS_RDONLY))
60893 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
60894 up_write(&sb->s_umount);
60895+
60896+ gr_log_remount(mnt->mnt_devname, retval);
60897+
60898 return retval;
60899 }
60900
60901@@ -1315,6 +1318,9 @@ static int do_umount(struct mount *mnt, int flags)
60902 }
60903 unlock_mount_hash();
60904 namespace_unlock();
60905+
60906+ gr_log_unmount(mnt->mnt_devname, retval);
60907+
60908 return retval;
60909 }
60910
60911@@ -1334,7 +1340,7 @@ static inline bool may_mount(void)
60912 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
60913 */
60914
60915-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
60916+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
60917 {
60918 struct path path;
60919 struct mount *mnt;
60920@@ -1376,7 +1382,7 @@ out:
60921 /*
60922 * The 2.0 compatible umount. No flags.
60923 */
60924-SYSCALL_DEFINE1(oldumount, char __user *, name)
60925+SYSCALL_DEFINE1(oldumount, const char __user *, name)
60926 {
60927 return sys_umount(name, 0);
60928 }
60929@@ -2379,6 +2385,16 @@ long do_mount(const char *dev_name, const char *dir_name,
60930 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
60931 MS_STRICTATIME);
60932
60933+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
60934+ retval = -EPERM;
60935+ goto dput_out;
60936+ }
60937+
60938+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
60939+ retval = -EPERM;
60940+ goto dput_out;
60941+ }
60942+
60943 if (flags & MS_REMOUNT)
60944 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
60945 data_page);
60946@@ -2393,6 +2409,9 @@ long do_mount(const char *dev_name, const char *dir_name,
60947 dev_name, data_page);
60948 dput_out:
60949 path_put(&path);
60950+
60951+ gr_log_mount(dev_name, dir_name, retval);
60952+
60953 return retval;
60954 }
60955
60956@@ -2410,7 +2429,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
60957 * number incrementing at 10Ghz will take 12,427 years to wrap which
60958 * is effectively never, so we can ignore the possibility.
60959 */
60960-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
60961+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
60962
60963 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
60964 {
60965@@ -2425,7 +2444,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
60966 kfree(new_ns);
60967 return ERR_PTR(ret);
60968 }
60969- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
60970+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
60971 atomic_set(&new_ns->count, 1);
60972 new_ns->root = NULL;
60973 INIT_LIST_HEAD(&new_ns->list);
60974@@ -2435,7 +2454,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
60975 return new_ns;
60976 }
60977
60978-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
60979+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
60980 struct user_namespace *user_ns, struct fs_struct *new_fs)
60981 {
60982 struct mnt_namespace *new_ns;
60983@@ -2556,8 +2575,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
60984 }
60985 EXPORT_SYMBOL(mount_subtree);
60986
60987-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
60988- char __user *, type, unsigned long, flags, void __user *, data)
60989+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
60990+ const char __user *, type, unsigned long, flags, void __user *, data)
60991 {
60992 int ret;
60993 char *kernel_type;
60994@@ -2670,6 +2689,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
60995 if (error)
60996 goto out2;
60997
60998+ if (gr_handle_chroot_pivot()) {
60999+ error = -EPERM;
61000+ goto out2;
61001+ }
61002+
61003 get_fs_root(current->fs, &root);
61004 old_mp = lock_mount(&old);
61005 error = PTR_ERR(old_mp);
61006@@ -2930,7 +2954,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
61007 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
61008 return -EPERM;
61009
61010- if (fs->users != 1)
61011+ if (atomic_read(&fs->users) != 1)
61012 return -EINVAL;
61013
61014 get_mnt_ns(mnt_ns);
61015diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
61016index f4ccfe6..a5cf064 100644
61017--- a/fs/nfs/callback_xdr.c
61018+++ b/fs/nfs/callback_xdr.c
61019@@ -51,7 +51,7 @@ struct callback_op {
61020 callback_decode_arg_t decode_args;
61021 callback_encode_res_t encode_res;
61022 long res_maxsize;
61023-};
61024+} __do_const;
61025
61026 static struct callback_op callback_ops[];
61027
61028diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
61029index 00ad1c2..2fde15e 100644
61030--- a/fs/nfs/inode.c
61031+++ b/fs/nfs/inode.c
61032@@ -1146,16 +1146,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
61033 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
61034 }
61035
61036-static atomic_long_t nfs_attr_generation_counter;
61037+static atomic_long_unchecked_t nfs_attr_generation_counter;
61038
61039 static unsigned long nfs_read_attr_generation_counter(void)
61040 {
61041- return atomic_long_read(&nfs_attr_generation_counter);
61042+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
61043 }
61044
61045 unsigned long nfs_inc_attr_generation_counter(void)
61046 {
61047- return atomic_long_inc_return(&nfs_attr_generation_counter);
61048+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
61049 }
61050
61051 void nfs_fattr_init(struct nfs_fattr *fattr)
61052diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
61053index b4a160a..2b9bfba 100644
61054--- a/fs/nfs/nfs4client.c
61055+++ b/fs/nfs/nfs4client.c
61056@@ -409,13 +409,11 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
61057 error = nfs4_discover_server_trunking(clp, &old);
61058 if (error < 0)
61059 goto error;
61060- nfs_put_client(clp);
61061- if (clp != old) {
61062+
61063+ if (clp != old)
61064 clp->cl_preserve_clid = true;
61065- clp = old;
61066- }
61067-
61068- return clp;
61069+ nfs_put_client(clp);
61070+ return old;
61071
61072 error:
61073 nfs_mark_client_ready(clp, error);
61074@@ -493,9 +491,10 @@ int nfs40_walk_client_list(struct nfs_client *new,
61075 prev = pos;
61076
61077 status = nfs_wait_client_init_complete(pos);
61078- spin_lock(&nn->nfs_client_lock);
61079 if (status < 0)
61080- continue;
61081+ goto out;
61082+ status = -NFS4ERR_STALE_CLIENTID;
61083+ spin_lock(&nn->nfs_client_lock);
61084 }
61085 if (pos->cl_cons_state != NFS_CS_READY)
61086 continue;
61087@@ -633,7 +632,8 @@ int nfs41_walk_client_list(struct nfs_client *new,
61088 }
61089 spin_lock(&nn->nfs_client_lock);
61090 if (status < 0)
61091- continue;
61092+ break;
61093+ status = -NFS4ERR_STALE_CLIENTID;
61094 }
61095 if (pos->cl_cons_state != NFS_CS_READY)
61096 continue;
61097diff --git a/fs/nfs/write.c b/fs/nfs/write.c
61098index c1d5482..6a85038 100644
61099--- a/fs/nfs/write.c
61100+++ b/fs/nfs/write.c
61101@@ -922,19 +922,20 @@ out:
61102 * extend the write to cover the entire page in order to avoid fragmentation
61103 * inefficiencies.
61104 *
61105- * If the file is opened for synchronous writes or if we have a write delegation
61106- * from the server then we can just skip the rest of the checks.
61107+ * If the file is opened for synchronous writes then we can just skip the rest
61108+ * of the checks.
61109 */
61110 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
61111 {
61112 if (file->f_flags & O_DSYNC)
61113 return 0;
61114+ if (!nfs_write_pageuptodate(page, inode))
61115+ return 0;
61116 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
61117 return 1;
61118- if (nfs_write_pageuptodate(page, inode) && (inode->i_flock == NULL ||
61119- (inode->i_flock->fl_start == 0 &&
61120+ if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
61121 inode->i_flock->fl_end == OFFSET_MAX &&
61122- inode->i_flock->fl_type != F_RDLCK)))
61123+ inode->i_flock->fl_type != F_RDLCK))
61124 return 1;
61125 return 0;
61126 }
61127diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
61128index 419572f..5414a23 100644
61129--- a/fs/nfsd/nfs4proc.c
61130+++ b/fs/nfsd/nfs4proc.c
61131@@ -1168,7 +1168,7 @@ struct nfsd4_operation {
61132 nfsd4op_rsize op_rsize_bop;
61133 stateid_getter op_get_currentstateid;
61134 stateid_setter op_set_currentstateid;
61135-};
61136+} __do_const;
61137
61138 static struct nfsd4_operation nfsd4_ops[];
61139
61140diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
61141index ee7237f..e3ae60a 100644
61142--- a/fs/nfsd/nfs4xdr.c
61143+++ b/fs/nfsd/nfs4xdr.c
61144@@ -1523,7 +1523,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
61145
61146 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
61147
61148-static nfsd4_dec nfsd4_dec_ops[] = {
61149+static const nfsd4_dec nfsd4_dec_ops[] = {
61150 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
61151 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
61152 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
61153diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
61154index b6af150..f6ec5e3 100644
61155--- a/fs/nfsd/nfscache.c
61156+++ b/fs/nfsd/nfscache.c
61157@@ -547,14 +547,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
61158 {
61159 struct svc_cacherep *rp = rqstp->rq_cacherep;
61160 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
61161- int len;
61162+ long len;
61163 size_t bufsize = 0;
61164
61165 if (!rp)
61166 return;
61167
61168- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
61169- len >>= 2;
61170+ if (statp) {
61171+ len = (char*)statp - (char*)resv->iov_base;
61172+ len = resv->iov_len - len;
61173+ len >>= 2;
61174+ }
61175
61176 /* Don't cache excessive amounts of data and XDR failures */
61177 if (!statp || len > (256 >> 2)) {
61178diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
61179index 7eea63c..a35f4fb 100644
61180--- a/fs/nfsd/vfs.c
61181+++ b/fs/nfsd/vfs.c
61182@@ -993,7 +993,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
61183 } else {
61184 oldfs = get_fs();
61185 set_fs(KERNEL_DS);
61186- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
61187+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
61188 set_fs(oldfs);
61189 }
61190
61191@@ -1084,7 +1084,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
61192
61193 /* Write the data. */
61194 oldfs = get_fs(); set_fs(KERNEL_DS);
61195- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
61196+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
61197 set_fs(oldfs);
61198 if (host_err < 0)
61199 goto out_nfserr;
61200@@ -1629,7 +1629,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
61201 */
61202
61203 oldfs = get_fs(); set_fs(KERNEL_DS);
61204- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
61205+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
61206 set_fs(oldfs);
61207
61208 if (host_err < 0)
61209diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
61210index fea6bd5..8ee9d81 100644
61211--- a/fs/nls/nls_base.c
61212+++ b/fs/nls/nls_base.c
61213@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
61214
61215 int register_nls(struct nls_table * nls)
61216 {
61217- struct nls_table ** tmp = &tables;
61218+ struct nls_table *tmp = tables;
61219
61220 if (nls->next)
61221 return -EBUSY;
61222
61223 spin_lock(&nls_lock);
61224- while (*tmp) {
61225- if (nls == *tmp) {
61226+ while (tmp) {
61227+ if (nls == tmp) {
61228 spin_unlock(&nls_lock);
61229 return -EBUSY;
61230 }
61231- tmp = &(*tmp)->next;
61232+ tmp = tmp->next;
61233 }
61234- nls->next = tables;
61235+ pax_open_kernel();
61236+ *(struct nls_table **)&nls->next = tables;
61237+ pax_close_kernel();
61238 tables = nls;
61239 spin_unlock(&nls_lock);
61240 return 0;
61241@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
61242
61243 int unregister_nls(struct nls_table * nls)
61244 {
61245- struct nls_table ** tmp = &tables;
61246+ struct nls_table * const * tmp = &tables;
61247
61248 spin_lock(&nls_lock);
61249 while (*tmp) {
61250 if (nls == *tmp) {
61251- *tmp = nls->next;
61252+ pax_open_kernel();
61253+ *(struct nls_table **)tmp = nls->next;
61254+ pax_close_kernel();
61255 spin_unlock(&nls_lock);
61256 return 0;
61257 }
61258diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
61259index 7424929..35f6be5 100644
61260--- a/fs/nls/nls_euc-jp.c
61261+++ b/fs/nls/nls_euc-jp.c
61262@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
61263 p_nls = load_nls("cp932");
61264
61265 if (p_nls) {
61266- table.charset2upper = p_nls->charset2upper;
61267- table.charset2lower = p_nls->charset2lower;
61268+ pax_open_kernel();
61269+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
61270+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
61271+ pax_close_kernel();
61272 return register_nls(&table);
61273 }
61274
61275diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
61276index e7bc1d7..06bd4bb 100644
61277--- a/fs/nls/nls_koi8-ru.c
61278+++ b/fs/nls/nls_koi8-ru.c
61279@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
61280 p_nls = load_nls("koi8-u");
61281
61282 if (p_nls) {
61283- table.charset2upper = p_nls->charset2upper;
61284- table.charset2lower = p_nls->charset2lower;
61285+ pax_open_kernel();
61286+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
61287+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
61288+ pax_close_kernel();
61289 return register_nls(&table);
61290 }
61291
61292diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
61293index e44cb64..7668ca4 100644
61294--- a/fs/notify/fanotify/fanotify_user.c
61295+++ b/fs/notify/fanotify/fanotify_user.c
61296@@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
61297
61298 fd = fanotify_event_metadata.fd;
61299 ret = -EFAULT;
61300- if (copy_to_user(buf, &fanotify_event_metadata,
61301- fanotify_event_metadata.event_len))
61302+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
61303+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
61304 goto out_close_fd;
61305
61306 ret = prepare_for_access_response(group, event, fd);
61307@@ -888,9 +888,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
61308 {
61309 return sys_fanotify_mark(fanotify_fd, flags,
61310 #ifdef __BIG_ENDIAN
61311- ((__u64)mask1 << 32) | mask0,
61312-#else
61313 ((__u64)mask0 << 32) | mask1,
61314+#else
61315+ ((__u64)mask1 << 32) | mask0,
61316 #endif
61317 dfd, pathname);
61318 }
61319diff --git a/fs/notify/notification.c b/fs/notify/notification.c
61320index 7b51b05..5ea5ef6 100644
61321--- a/fs/notify/notification.c
61322+++ b/fs/notify/notification.c
61323@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
61324 * get set to 0 so it will never get 'freed'
61325 */
61326 static struct fsnotify_event *q_overflow_event;
61327-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
61328+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
61329
61330 /**
61331 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
61332@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
61333 */
61334 u32 fsnotify_get_cookie(void)
61335 {
61336- return atomic_inc_return(&fsnotify_sync_cookie);
61337+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
61338 }
61339 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
61340
61341diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
61342index 9e38daf..5727cae 100644
61343--- a/fs/ntfs/dir.c
61344+++ b/fs/ntfs/dir.c
61345@@ -1310,7 +1310,7 @@ find_next_index_buffer:
61346 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
61347 ~(s64)(ndir->itype.index.block_size - 1)));
61348 /* Bounds checks. */
61349- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
61350+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
61351 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
61352 "inode 0x%lx or driver bug.", vdir->i_ino);
61353 goto err_out;
61354diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
61355index ea4ba9d..1e13d34 100644
61356--- a/fs/ntfs/file.c
61357+++ b/fs/ntfs/file.c
61358@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
61359 char *addr;
61360 size_t total = 0;
61361 unsigned len;
61362- int left;
61363+ unsigned left;
61364
61365 do {
61366 len = PAGE_CACHE_SIZE - ofs;
61367diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
61368index 82650d5..db37dcf 100644
61369--- a/fs/ntfs/super.c
61370+++ b/fs/ntfs/super.c
61371@@ -685,7 +685,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
61372 if (!silent)
61373 ntfs_error(sb, "Primary boot sector is invalid.");
61374 } else if (!silent)
61375- ntfs_error(sb, read_err_str, "primary");
61376+ ntfs_error(sb, read_err_str, "%s", "primary");
61377 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
61378 if (bh_primary)
61379 brelse(bh_primary);
61380@@ -701,7 +701,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
61381 goto hotfix_primary_boot_sector;
61382 brelse(bh_backup);
61383 } else if (!silent)
61384- ntfs_error(sb, read_err_str, "backup");
61385+ ntfs_error(sb, read_err_str, "%s", "backup");
61386 /* Try to read NT3.51- backup boot sector. */
61387 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
61388 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
61389@@ -712,7 +712,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
61390 "sector.");
61391 brelse(bh_backup);
61392 } else if (!silent)
61393- ntfs_error(sb, read_err_str, "backup");
61394+ ntfs_error(sb, read_err_str, "%s", "backup");
61395 /* We failed. Cleanup and return. */
61396 if (bh_primary)
61397 brelse(bh_primary);
61398diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
61399index cd5496b..26a1055 100644
61400--- a/fs/ocfs2/localalloc.c
61401+++ b/fs/ocfs2/localalloc.c
61402@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
61403 goto bail;
61404 }
61405
61406- atomic_inc(&osb->alloc_stats.moves);
61407+ atomic_inc_unchecked(&osb->alloc_stats.moves);
61408
61409 bail:
61410 if (handle)
61411diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
61412index 3a90347..c40bef8 100644
61413--- a/fs/ocfs2/ocfs2.h
61414+++ b/fs/ocfs2/ocfs2.h
61415@@ -235,11 +235,11 @@ enum ocfs2_vol_state
61416
61417 struct ocfs2_alloc_stats
61418 {
61419- atomic_t moves;
61420- atomic_t local_data;
61421- atomic_t bitmap_data;
61422- atomic_t bg_allocs;
61423- atomic_t bg_extends;
61424+ atomic_unchecked_t moves;
61425+ atomic_unchecked_t local_data;
61426+ atomic_unchecked_t bitmap_data;
61427+ atomic_unchecked_t bg_allocs;
61428+ atomic_unchecked_t bg_extends;
61429 };
61430
61431 enum ocfs2_local_alloc_state
61432diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
61433index 2c91452..77a3cd2 100644
61434--- a/fs/ocfs2/suballoc.c
61435+++ b/fs/ocfs2/suballoc.c
61436@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
61437 mlog_errno(status);
61438 goto bail;
61439 }
61440- atomic_inc(&osb->alloc_stats.bg_extends);
61441+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
61442
61443 /* You should never ask for this much metadata */
61444 BUG_ON(bits_wanted >
61445@@ -2000,7 +2000,7 @@ int ocfs2_claim_metadata(handle_t *handle,
61446 mlog_errno(status);
61447 goto bail;
61448 }
61449- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
61450+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
61451
61452 *suballoc_loc = res.sr_bg_blkno;
61453 *suballoc_bit_start = res.sr_bit_offset;
61454@@ -2164,7 +2164,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
61455 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
61456 res->sr_bits);
61457
61458- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
61459+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
61460
61461 BUG_ON(res->sr_bits != 1);
61462
61463@@ -2206,7 +2206,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
61464 mlog_errno(status);
61465 goto bail;
61466 }
61467- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
61468+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
61469
61470 BUG_ON(res.sr_bits != 1);
61471
61472@@ -2310,7 +2310,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
61473 cluster_start,
61474 num_clusters);
61475 if (!status)
61476- atomic_inc(&osb->alloc_stats.local_data);
61477+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
61478 } else {
61479 if (min_clusters > (osb->bitmap_cpg - 1)) {
61480 /* The only paths asking for contiguousness
61481@@ -2336,7 +2336,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
61482 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
61483 res.sr_bg_blkno,
61484 res.sr_bit_offset);
61485- atomic_inc(&osb->alloc_stats.bitmap_data);
61486+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
61487 *num_clusters = res.sr_bits;
61488 }
61489 }
61490diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
61491index c414929..5c9ee542 100644
61492--- a/fs/ocfs2/super.c
61493+++ b/fs/ocfs2/super.c
61494@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
61495 "%10s => GlobalAllocs: %d LocalAllocs: %d "
61496 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
61497 "Stats",
61498- atomic_read(&osb->alloc_stats.bitmap_data),
61499- atomic_read(&osb->alloc_stats.local_data),
61500- atomic_read(&osb->alloc_stats.bg_allocs),
61501- atomic_read(&osb->alloc_stats.moves),
61502- atomic_read(&osb->alloc_stats.bg_extends));
61503+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
61504+ atomic_read_unchecked(&osb->alloc_stats.local_data),
61505+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
61506+ atomic_read_unchecked(&osb->alloc_stats.moves),
61507+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
61508
61509 out += snprintf(buf + out, len - out,
61510 "%10s => State: %u Descriptor: %llu Size: %u bits "
61511@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
61512 spin_lock_init(&osb->osb_xattr_lock);
61513 ocfs2_init_steal_slots(osb);
61514
61515- atomic_set(&osb->alloc_stats.moves, 0);
61516- atomic_set(&osb->alloc_stats.local_data, 0);
61517- atomic_set(&osb->alloc_stats.bitmap_data, 0);
61518- atomic_set(&osb->alloc_stats.bg_allocs, 0);
61519- atomic_set(&osb->alloc_stats.bg_extends, 0);
61520+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
61521+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
61522+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
61523+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
61524+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
61525
61526 /* Copy the blockcheck stats from the superblock probe */
61527 osb->osb_ecc_stats = *stats;
61528diff --git a/fs/open.c b/fs/open.c
61529index 4b3e1ed..1c84599 100644
61530--- a/fs/open.c
61531+++ b/fs/open.c
61532@@ -32,6 +32,8 @@
61533 #include <linux/dnotify.h>
61534 #include <linux/compat.h>
61535
61536+#define CREATE_TRACE_POINTS
61537+#include <trace/events/fs.h>
61538 #include "internal.h"
61539
61540 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
61541@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
61542 error = locks_verify_truncate(inode, NULL, length);
61543 if (!error)
61544 error = security_path_truncate(path);
61545+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
61546+ error = -EACCES;
61547 if (!error)
61548 error = do_truncate(path->dentry, length, 0, NULL);
61549
61550@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
61551 error = locks_verify_truncate(inode, f.file, length);
61552 if (!error)
61553 error = security_path_truncate(&f.file->f_path);
61554+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
61555+ error = -EACCES;
61556 if (!error)
61557 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
61558 sb_end_write(inode->i_sb);
61559@@ -361,6 +367,9 @@ retry:
61560 if (__mnt_is_readonly(path.mnt))
61561 res = -EROFS;
61562
61563+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
61564+ res = -EACCES;
61565+
61566 out_path_release:
61567 path_put(&path);
61568 if (retry_estale(res, lookup_flags)) {
61569@@ -392,6 +401,8 @@ retry:
61570 if (error)
61571 goto dput_and_out;
61572
61573+ gr_log_chdir(path.dentry, path.mnt);
61574+
61575 set_fs_pwd(current->fs, &path);
61576
61577 dput_and_out:
61578@@ -421,6 +432,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
61579 goto out_putf;
61580
61581 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
61582+
61583+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
61584+ error = -EPERM;
61585+
61586+ if (!error)
61587+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
61588+
61589 if (!error)
61590 set_fs_pwd(current->fs, &f.file->f_path);
61591 out_putf:
61592@@ -450,7 +468,13 @@ retry:
61593 if (error)
61594 goto dput_and_out;
61595
61596+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
61597+ goto dput_and_out;
61598+
61599 set_fs_root(current->fs, &path);
61600+
61601+ gr_handle_chroot_chdir(&path);
61602+
61603 error = 0;
61604 dput_and_out:
61605 path_put(&path);
61606@@ -474,6 +498,16 @@ static int chmod_common(struct path *path, umode_t mode)
61607 return error;
61608 retry_deleg:
61609 mutex_lock(&inode->i_mutex);
61610+
61611+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
61612+ error = -EACCES;
61613+ goto out_unlock;
61614+ }
61615+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
61616+ error = -EACCES;
61617+ goto out_unlock;
61618+ }
61619+
61620 error = security_path_chmod(path, mode);
61621 if (error)
61622 goto out_unlock;
61623@@ -539,6 +573,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
61624 uid = make_kuid(current_user_ns(), user);
61625 gid = make_kgid(current_user_ns(), group);
61626
61627+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
61628+ return -EACCES;
61629+
61630 newattrs.ia_valid = ATTR_CTIME;
61631 if (user != (uid_t) -1) {
61632 if (!uid_valid(uid))
61633@@ -990,6 +1027,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
61634 } else {
61635 fsnotify_open(f);
61636 fd_install(fd, f);
61637+ trace_do_sys_open(tmp->name, flags, mode);
61638 }
61639 }
61640 putname(tmp);
61641diff --git a/fs/pipe.c b/fs/pipe.c
61642index 0e0752e..7cfdd50 100644
61643--- a/fs/pipe.c
61644+++ b/fs/pipe.c
61645@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
61646
61647 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
61648 {
61649- if (pipe->files)
61650+ if (atomic_read(&pipe->files))
61651 mutex_lock_nested(&pipe->mutex, subclass);
61652 }
61653
61654@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
61655
61656 void pipe_unlock(struct pipe_inode_info *pipe)
61657 {
61658- if (pipe->files)
61659+ if (atomic_read(&pipe->files))
61660 mutex_unlock(&pipe->mutex);
61661 }
61662 EXPORT_SYMBOL(pipe_unlock);
61663@@ -449,9 +449,9 @@ redo:
61664 }
61665 if (bufs) /* More to do? */
61666 continue;
61667- if (!pipe->writers)
61668+ if (!atomic_read(&pipe->writers))
61669 break;
61670- if (!pipe->waiting_writers) {
61671+ if (!atomic_read(&pipe->waiting_writers)) {
61672 /* syscall merging: Usually we must not sleep
61673 * if O_NONBLOCK is set, or if we got some data.
61674 * But if a writer sleeps in kernel space, then
61675@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
61676 ret = 0;
61677 __pipe_lock(pipe);
61678
61679- if (!pipe->readers) {
61680+ if (!atomic_read(&pipe->readers)) {
61681 send_sig(SIGPIPE, current, 0);
61682 ret = -EPIPE;
61683 goto out;
61684@@ -562,7 +562,7 @@ redo1:
61685 for (;;) {
61686 int bufs;
61687
61688- if (!pipe->readers) {
61689+ if (!atomic_read(&pipe->readers)) {
61690 send_sig(SIGPIPE, current, 0);
61691 if (!ret)
61692 ret = -EPIPE;
61693@@ -653,9 +653,9 @@ redo2:
61694 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
61695 do_wakeup = 0;
61696 }
61697- pipe->waiting_writers++;
61698+ atomic_inc(&pipe->waiting_writers);
61699 pipe_wait(pipe);
61700- pipe->waiting_writers--;
61701+ atomic_dec(&pipe->waiting_writers);
61702 }
61703 out:
61704 __pipe_unlock(pipe);
61705@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
61706 mask = 0;
61707 if (filp->f_mode & FMODE_READ) {
61708 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
61709- if (!pipe->writers && filp->f_version != pipe->w_counter)
61710+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
61711 mask |= POLLHUP;
61712 }
61713
61714@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
61715 * Most Unices do not set POLLERR for FIFOs but on Linux they
61716 * behave exactly like pipes for poll().
61717 */
61718- if (!pipe->readers)
61719+ if (!atomic_read(&pipe->readers))
61720 mask |= POLLERR;
61721 }
61722
61723@@ -731,7 +731,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
61724 int kill = 0;
61725
61726 spin_lock(&inode->i_lock);
61727- if (!--pipe->files) {
61728+ if (atomic_dec_and_test(&pipe->files)) {
61729 inode->i_pipe = NULL;
61730 kill = 1;
61731 }
61732@@ -748,11 +748,11 @@ pipe_release(struct inode *inode, struct file *file)
61733
61734 __pipe_lock(pipe);
61735 if (file->f_mode & FMODE_READ)
61736- pipe->readers--;
61737+ atomic_dec(&pipe->readers);
61738 if (file->f_mode & FMODE_WRITE)
61739- pipe->writers--;
61740+ atomic_dec(&pipe->writers);
61741
61742- if (pipe->readers || pipe->writers) {
61743+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
61744 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
61745 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
61746 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
61747@@ -817,7 +817,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
61748 kfree(pipe);
61749 }
61750
61751-static struct vfsmount *pipe_mnt __read_mostly;
61752+struct vfsmount *pipe_mnt __read_mostly;
61753
61754 /*
61755 * pipefs_dname() is called from d_path().
61756@@ -847,8 +847,9 @@ static struct inode * get_pipe_inode(void)
61757 goto fail_iput;
61758
61759 inode->i_pipe = pipe;
61760- pipe->files = 2;
61761- pipe->readers = pipe->writers = 1;
61762+ atomic_set(&pipe->files, 2);
61763+ atomic_set(&pipe->readers, 1);
61764+ atomic_set(&pipe->writers, 1);
61765 inode->i_fop = &pipefifo_fops;
61766
61767 /*
61768@@ -1027,17 +1028,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
61769 spin_lock(&inode->i_lock);
61770 if (inode->i_pipe) {
61771 pipe = inode->i_pipe;
61772- pipe->files++;
61773+ atomic_inc(&pipe->files);
61774 spin_unlock(&inode->i_lock);
61775 } else {
61776 spin_unlock(&inode->i_lock);
61777 pipe = alloc_pipe_info();
61778 if (!pipe)
61779 return -ENOMEM;
61780- pipe->files = 1;
61781+ atomic_set(&pipe->files, 1);
61782 spin_lock(&inode->i_lock);
61783 if (unlikely(inode->i_pipe)) {
61784- inode->i_pipe->files++;
61785+ atomic_inc(&inode->i_pipe->files);
61786 spin_unlock(&inode->i_lock);
61787 free_pipe_info(pipe);
61788 pipe = inode->i_pipe;
61789@@ -1062,10 +1063,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
61790 * opened, even when there is no process writing the FIFO.
61791 */
61792 pipe->r_counter++;
61793- if (pipe->readers++ == 0)
61794+ if (atomic_inc_return(&pipe->readers) == 1)
61795 wake_up_partner(pipe);
61796
61797- if (!is_pipe && !pipe->writers) {
61798+ if (!is_pipe && !atomic_read(&pipe->writers)) {
61799 if ((filp->f_flags & O_NONBLOCK)) {
61800 /* suppress POLLHUP until we have
61801 * seen a writer */
61802@@ -1084,14 +1085,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
61803 * errno=ENXIO when there is no process reading the FIFO.
61804 */
61805 ret = -ENXIO;
61806- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
61807+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
61808 goto err;
61809
61810 pipe->w_counter++;
61811- if (!pipe->writers++)
61812+ if (atomic_inc_return(&pipe->writers) == 1)
61813 wake_up_partner(pipe);
61814
61815- if (!is_pipe && !pipe->readers) {
61816+ if (!is_pipe && !atomic_read(&pipe->readers)) {
61817 if (wait_for_partner(pipe, &pipe->r_counter))
61818 goto err_wr;
61819 }
61820@@ -1105,11 +1106,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
61821 * the process can at least talk to itself.
61822 */
61823
61824- pipe->readers++;
61825- pipe->writers++;
61826+ atomic_inc(&pipe->readers);
61827+ atomic_inc(&pipe->writers);
61828 pipe->r_counter++;
61829 pipe->w_counter++;
61830- if (pipe->readers == 1 || pipe->writers == 1)
61831+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
61832 wake_up_partner(pipe);
61833 break;
61834
61835@@ -1123,13 +1124,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
61836 return 0;
61837
61838 err_rd:
61839- if (!--pipe->readers)
61840+ if (atomic_dec_and_test(&pipe->readers))
61841 wake_up_interruptible(&pipe->wait);
61842 ret = -ERESTARTSYS;
61843 goto err;
61844
61845 err_wr:
61846- if (!--pipe->writers)
61847+ if (atomic_dec_and_test(&pipe->writers))
61848 wake_up_interruptible(&pipe->wait);
61849 ret = -ERESTARTSYS;
61850 goto err;
61851diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
61852index 2183fcf..3c32a98 100644
61853--- a/fs/proc/Kconfig
61854+++ b/fs/proc/Kconfig
61855@@ -30,7 +30,7 @@ config PROC_FS
61856
61857 config PROC_KCORE
61858 bool "/proc/kcore support" if !ARM
61859- depends on PROC_FS && MMU
61860+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
61861 help
61862 Provides a virtual ELF core file of the live kernel. This can
61863 be read with gdb and other ELF tools. No modifications can be
61864@@ -38,8 +38,8 @@ config PROC_KCORE
61865
61866 config PROC_VMCORE
61867 bool "/proc/vmcore support"
61868- depends on PROC_FS && CRASH_DUMP
61869- default y
61870+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
61871+ default n
61872 help
61873 Exports the dump image of crashed kernel in ELF format.
61874
61875@@ -63,8 +63,8 @@ config PROC_SYSCTL
61876 limited in memory.
61877
61878 config PROC_PAGE_MONITOR
61879- default y
61880- depends on PROC_FS && MMU
61881+ default n
61882+ depends on PROC_FS && MMU && !GRKERNSEC
61883 bool "Enable /proc page monitoring" if EXPERT
61884 help
61885 Various /proc files exist to monitor process memory utilization:
61886diff --git a/fs/proc/array.c b/fs/proc/array.c
61887index 1bd2077..2f7cfd5 100644
61888--- a/fs/proc/array.c
61889+++ b/fs/proc/array.c
61890@@ -60,6 +60,7 @@
61891 #include <linux/tty.h>
61892 #include <linux/string.h>
61893 #include <linux/mman.h>
61894+#include <linux/grsecurity.h>
61895 #include <linux/proc_fs.h>
61896 #include <linux/ioport.h>
61897 #include <linux/uaccess.h>
61898@@ -365,6 +366,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
61899 seq_putc(m, '\n');
61900 }
61901
61902+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
61903+static inline void task_pax(struct seq_file *m, struct task_struct *p)
61904+{
61905+ if (p->mm)
61906+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
61907+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
61908+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
61909+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
61910+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
61911+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
61912+ else
61913+ seq_printf(m, "PaX:\t-----\n");
61914+}
61915+#endif
61916+
61917 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
61918 struct pid *pid, struct task_struct *task)
61919 {
61920@@ -383,9 +399,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
61921 task_cpus_allowed(m, task);
61922 cpuset_task_status_allowed(m, task);
61923 task_context_switch_counts(m, task);
61924+
61925+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
61926+ task_pax(m, task);
61927+#endif
61928+
61929+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
61930+ task_grsec_rbac(m, task);
61931+#endif
61932+
61933 return 0;
61934 }
61935
61936+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61937+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
61938+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
61939+ _mm->pax_flags & MF_PAX_SEGMEXEC))
61940+#endif
61941+
61942 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
61943 struct pid *pid, struct task_struct *task, int whole)
61944 {
61945@@ -407,6 +438,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
61946 char tcomm[sizeof(task->comm)];
61947 unsigned long flags;
61948
61949+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61950+ if (current->exec_id != m->exec_id) {
61951+ gr_log_badprocpid("stat");
61952+ return 0;
61953+ }
61954+#endif
61955+
61956 state = *get_task_state(task);
61957 vsize = eip = esp = 0;
61958 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
61959@@ -478,6 +516,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
61960 gtime = task_gtime(task);
61961 }
61962
61963+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61964+ if (PAX_RAND_FLAGS(mm)) {
61965+ eip = 0;
61966+ esp = 0;
61967+ wchan = 0;
61968+ }
61969+#endif
61970+#ifdef CONFIG_GRKERNSEC_HIDESYM
61971+ wchan = 0;
61972+ eip =0;
61973+ esp =0;
61974+#endif
61975+
61976 /* scale priority and nice values from timeslices to -20..20 */
61977 /* to make it look like a "normal" Unix priority/nice value */
61978 priority = task_prio(task);
61979@@ -514,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
61980 seq_put_decimal_ull(m, ' ', vsize);
61981 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
61982 seq_put_decimal_ull(m, ' ', rsslim);
61983+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61984+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
61985+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
61986+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
61987+#else
61988 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
61989 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
61990 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
61991+#endif
61992 seq_put_decimal_ull(m, ' ', esp);
61993 seq_put_decimal_ull(m, ' ', eip);
61994 /* The signal information here is obsolete.
61995@@ -538,7 +595,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
61996 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
61997 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
61998
61999- if (mm && permitted) {
62000+ if (mm && permitted
62001+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62002+ && !PAX_RAND_FLAGS(mm)
62003+#endif
62004+ ) {
62005 seq_put_decimal_ull(m, ' ', mm->start_data);
62006 seq_put_decimal_ull(m, ' ', mm->end_data);
62007 seq_put_decimal_ull(m, ' ', mm->start_brk);
62008@@ -576,8 +637,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
62009 struct pid *pid, struct task_struct *task)
62010 {
62011 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
62012- struct mm_struct *mm = get_task_mm(task);
62013+ struct mm_struct *mm;
62014
62015+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62016+ if (current->exec_id != m->exec_id) {
62017+ gr_log_badprocpid("statm");
62018+ return 0;
62019+ }
62020+#endif
62021+ mm = get_task_mm(task);
62022 if (mm) {
62023 size = task_statm(mm, &shared, &text, &data, &resident);
62024 mmput(mm);
62025@@ -600,6 +668,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
62026 return 0;
62027 }
62028
62029+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
62030+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
62031+{
62032+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
62033+}
62034+#endif
62035+
62036 #ifdef CONFIG_CHECKPOINT_RESTORE
62037 static struct pid *
62038 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
62039diff --git a/fs/proc/base.c b/fs/proc/base.c
62040index 03c8d74..4efb575 100644
62041--- a/fs/proc/base.c
62042+++ b/fs/proc/base.c
62043@@ -113,6 +113,14 @@ struct pid_entry {
62044 union proc_op op;
62045 };
62046
62047+struct getdents_callback {
62048+ struct linux_dirent __user * current_dir;
62049+ struct linux_dirent __user * previous;
62050+ struct file * file;
62051+ int count;
62052+ int error;
62053+};
62054+
62055 #define NOD(NAME, MODE, IOP, FOP, OP) { \
62056 .name = (NAME), \
62057 .len = sizeof(NAME) - 1, \
62058@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
62059 if (!mm->arg_end)
62060 goto out_mm; /* Shh! No looking before we're done */
62061
62062+ if (gr_acl_handle_procpidmem(task))
62063+ goto out_mm;
62064+
62065 len = mm->arg_end - mm->arg_start;
62066
62067 if (len > PAGE_SIZE)
62068@@ -237,12 +248,28 @@ out:
62069 return res;
62070 }
62071
62072+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62073+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
62074+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
62075+ _mm->pax_flags & MF_PAX_SEGMEXEC))
62076+#endif
62077+
62078 static int proc_pid_auxv(struct task_struct *task, char *buffer)
62079 {
62080 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
62081 int res = PTR_ERR(mm);
62082 if (mm && !IS_ERR(mm)) {
62083 unsigned int nwords = 0;
62084+
62085+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62086+ /* allow if we're currently ptracing this task */
62087+ if (PAX_RAND_FLAGS(mm) &&
62088+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
62089+ mmput(mm);
62090+ return 0;
62091+ }
62092+#endif
62093+
62094 do {
62095 nwords += 2;
62096 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
62097@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
62098 }
62099
62100
62101-#ifdef CONFIG_KALLSYMS
62102+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62103 /*
62104 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
62105 * Returns the resolved symbol. If that fails, simply return the address.
62106@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
62107 mutex_unlock(&task->signal->cred_guard_mutex);
62108 }
62109
62110-#ifdef CONFIG_STACKTRACE
62111+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62112
62113 #define MAX_STACK_TRACE_DEPTH 64
62114
62115@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
62116 return count;
62117 }
62118
62119-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
62120+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
62121 static int proc_pid_syscall(struct task_struct *task, char *buffer)
62122 {
62123 long nr;
62124@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
62125 /************************************************************************/
62126
62127 /* permission checks */
62128-static int proc_fd_access_allowed(struct inode *inode)
62129+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
62130 {
62131 struct task_struct *task;
62132 int allowed = 0;
62133@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
62134 */
62135 task = get_proc_task(inode);
62136 if (task) {
62137- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
62138+ if (log)
62139+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
62140+ else
62141+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
62142 put_task_struct(task);
62143 }
62144 return allowed;
62145@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
62146 struct task_struct *task,
62147 int hide_pid_min)
62148 {
62149+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
62150+ return false;
62151+
62152+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62153+ rcu_read_lock();
62154+ {
62155+ const struct cred *tmpcred = current_cred();
62156+ const struct cred *cred = __task_cred(task);
62157+
62158+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
62159+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62160+ || in_group_p(grsec_proc_gid)
62161+#endif
62162+ ) {
62163+ rcu_read_unlock();
62164+ return true;
62165+ }
62166+ }
62167+ rcu_read_unlock();
62168+
62169+ if (!pid->hide_pid)
62170+ return false;
62171+#endif
62172+
62173 if (pid->hide_pid < hide_pid_min)
62174 return true;
62175 if (in_group_p(pid->pid_gid))
62176 return true;
62177+
62178 return ptrace_may_access(task, PTRACE_MODE_READ);
62179 }
62180
62181@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
62182 put_task_struct(task);
62183
62184 if (!has_perms) {
62185+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62186+ {
62187+#else
62188 if (pid->hide_pid == 2) {
62189+#endif
62190 /*
62191 * Let's make getdents(), stat(), and open()
62192 * consistent with each other. If a process
62193@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
62194 if (!task)
62195 return -ESRCH;
62196
62197+ if (gr_acl_handle_procpidmem(task)) {
62198+ put_task_struct(task);
62199+ return -EPERM;
62200+ }
62201+
62202 mm = mm_access(task, mode);
62203 put_task_struct(task);
62204
62205@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
62206
62207 file->private_data = mm;
62208
62209+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62210+ file->f_version = current->exec_id;
62211+#endif
62212+
62213 return 0;
62214 }
62215
62216@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
62217 ssize_t copied;
62218 char *page;
62219
62220+#ifdef CONFIG_GRKERNSEC
62221+ if (write)
62222+ return -EPERM;
62223+#endif
62224+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62225+ if (file->f_version != current->exec_id) {
62226+ gr_log_badprocpid("mem");
62227+ return 0;
62228+ }
62229+#endif
62230+
62231 if (!mm)
62232 return 0;
62233
62234@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
62235 goto free;
62236
62237 while (count > 0) {
62238- int this_len = min_t(int, count, PAGE_SIZE);
62239+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
62240
62241 if (write && copy_from_user(page, buf, this_len)) {
62242 copied = -EFAULT;
62243@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
62244 if (!mm)
62245 return 0;
62246
62247+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62248+ if (file->f_version != current->exec_id) {
62249+ gr_log_badprocpid("environ");
62250+ return 0;
62251+ }
62252+#endif
62253+
62254 page = (char *)__get_free_page(GFP_TEMPORARY);
62255 if (!page)
62256 return -ENOMEM;
62257@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
62258 goto free;
62259 while (count > 0) {
62260 size_t this_len, max_len;
62261- int retval;
62262+ ssize_t retval;
62263
62264 if (src >= (mm->env_end - mm->env_start))
62265 break;
62266@@ -1467,7 +1553,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
62267 int error = -EACCES;
62268
62269 /* Are we allowed to snoop on the tasks file descriptors? */
62270- if (!proc_fd_access_allowed(inode))
62271+ if (!proc_fd_access_allowed(inode, 0))
62272 goto out;
62273
62274 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
62275@@ -1511,8 +1597,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
62276 struct path path;
62277
62278 /* Are we allowed to snoop on the tasks file descriptors? */
62279- if (!proc_fd_access_allowed(inode))
62280- goto out;
62281+ /* logging this is needed for learning on chromium to work properly,
62282+ but we don't want to flood the logs from 'ps' which does a readlink
62283+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
62284+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
62285+ */
62286+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
62287+ if (!proc_fd_access_allowed(inode,0))
62288+ goto out;
62289+ } else {
62290+ if (!proc_fd_access_allowed(inode,1))
62291+ goto out;
62292+ }
62293
62294 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
62295 if (error)
62296@@ -1562,7 +1658,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
62297 rcu_read_lock();
62298 cred = __task_cred(task);
62299 inode->i_uid = cred->euid;
62300+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62301+ inode->i_gid = grsec_proc_gid;
62302+#else
62303 inode->i_gid = cred->egid;
62304+#endif
62305 rcu_read_unlock();
62306 }
62307 security_task_to_inode(task, inode);
62308@@ -1598,10 +1698,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
62309 return -ENOENT;
62310 }
62311 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
62312+#ifdef CONFIG_GRKERNSEC_PROC_USER
62313+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
62314+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62315+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
62316+#endif
62317 task_dumpable(task)) {
62318 cred = __task_cred(task);
62319 stat->uid = cred->euid;
62320+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62321+ stat->gid = grsec_proc_gid;
62322+#else
62323 stat->gid = cred->egid;
62324+#endif
62325 }
62326 }
62327 rcu_read_unlock();
62328@@ -1639,11 +1748,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
62329
62330 if (task) {
62331 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
62332+#ifdef CONFIG_GRKERNSEC_PROC_USER
62333+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
62334+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62335+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
62336+#endif
62337 task_dumpable(task)) {
62338 rcu_read_lock();
62339 cred = __task_cred(task);
62340 inode->i_uid = cred->euid;
62341+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62342+ inode->i_gid = grsec_proc_gid;
62343+#else
62344 inode->i_gid = cred->egid;
62345+#endif
62346 rcu_read_unlock();
62347 } else {
62348 inode->i_uid = GLOBAL_ROOT_UID;
62349@@ -2172,6 +2290,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
62350 if (!task)
62351 goto out_no_task;
62352
62353+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
62354+ goto out;
62355+
62356 /*
62357 * Yes, it does not scale. And it should not. Don't add
62358 * new entries into /proc/<tgid>/ without very good reasons.
62359@@ -2202,6 +2323,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
62360 if (!task)
62361 return -ENOENT;
62362
62363+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
62364+ goto out;
62365+
62366 if (!dir_emit_dots(file, ctx))
62367 goto out;
62368
62369@@ -2591,7 +2715,7 @@ static const struct pid_entry tgid_base_stuff[] = {
62370 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
62371 #endif
62372 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
62373-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
62374+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
62375 INF("syscall", S_IRUGO, proc_pid_syscall),
62376 #endif
62377 INF("cmdline", S_IRUGO, proc_pid_cmdline),
62378@@ -2616,10 +2740,10 @@ static const struct pid_entry tgid_base_stuff[] = {
62379 #ifdef CONFIG_SECURITY
62380 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
62381 #endif
62382-#ifdef CONFIG_KALLSYMS
62383+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62384 INF("wchan", S_IRUGO, proc_pid_wchan),
62385 #endif
62386-#ifdef CONFIG_STACKTRACE
62387+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62388 ONE("stack", S_IRUGO, proc_pid_stack),
62389 #endif
62390 #ifdef CONFIG_SCHEDSTATS
62391@@ -2653,6 +2777,9 @@ static const struct pid_entry tgid_base_stuff[] = {
62392 #ifdef CONFIG_HARDWALL
62393 INF("hardwall", S_IRUGO, proc_pid_hardwall),
62394 #endif
62395+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
62396+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
62397+#endif
62398 #ifdef CONFIG_USER_NS
62399 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
62400 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
62401@@ -2783,7 +2910,14 @@ static int proc_pid_instantiate(struct inode *dir,
62402 if (!inode)
62403 goto out;
62404
62405+#ifdef CONFIG_GRKERNSEC_PROC_USER
62406+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
62407+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62408+ inode->i_gid = grsec_proc_gid;
62409+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
62410+#else
62411 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
62412+#endif
62413 inode->i_op = &proc_tgid_base_inode_operations;
62414 inode->i_fop = &proc_tgid_base_operations;
62415 inode->i_flags|=S_IMMUTABLE;
62416@@ -2821,7 +2955,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
62417 if (!task)
62418 goto out;
62419
62420+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
62421+ goto out_put_task;
62422+
62423 result = proc_pid_instantiate(dir, dentry, task, NULL);
62424+out_put_task:
62425 put_task_struct(task);
62426 out:
62427 return ERR_PTR(result);
62428@@ -2927,7 +3065,7 @@ static const struct pid_entry tid_base_stuff[] = {
62429 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
62430 #endif
62431 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
62432-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
62433+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
62434 INF("syscall", S_IRUGO, proc_pid_syscall),
62435 #endif
62436 INF("cmdline", S_IRUGO, proc_pid_cmdline),
62437@@ -2954,10 +3092,10 @@ static const struct pid_entry tid_base_stuff[] = {
62438 #ifdef CONFIG_SECURITY
62439 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
62440 #endif
62441-#ifdef CONFIG_KALLSYMS
62442+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62443 INF("wchan", S_IRUGO, proc_pid_wchan),
62444 #endif
62445-#ifdef CONFIG_STACKTRACE
62446+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62447 ONE("stack", S_IRUGO, proc_pid_stack),
62448 #endif
62449 #ifdef CONFIG_SCHEDSTATS
62450diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
62451index 82676e3..5f8518a 100644
62452--- a/fs/proc/cmdline.c
62453+++ b/fs/proc/cmdline.c
62454@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
62455
62456 static int __init proc_cmdline_init(void)
62457 {
62458+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62459+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
62460+#else
62461 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
62462+#endif
62463 return 0;
62464 }
62465 module_init(proc_cmdline_init);
62466diff --git a/fs/proc/devices.c b/fs/proc/devices.c
62467index b143471..bb105e5 100644
62468--- a/fs/proc/devices.c
62469+++ b/fs/proc/devices.c
62470@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
62471
62472 static int __init proc_devices_init(void)
62473 {
62474+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62475+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
62476+#else
62477 proc_create("devices", 0, NULL, &proc_devinfo_operations);
62478+#endif
62479 return 0;
62480 }
62481 module_init(proc_devices_init);
62482diff --git a/fs/proc/fd.c b/fs/proc/fd.c
62483index 985ea88..d118a0a 100644
62484--- a/fs/proc/fd.c
62485+++ b/fs/proc/fd.c
62486@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
62487 if (!task)
62488 return -ENOENT;
62489
62490- files = get_files_struct(task);
62491+ if (!gr_acl_handle_procpidmem(task))
62492+ files = get_files_struct(task);
62493 put_task_struct(task);
62494
62495 if (files) {
62496@@ -283,11 +284,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
62497 */
62498 int proc_fd_permission(struct inode *inode, int mask)
62499 {
62500+ struct task_struct *task;
62501 int rv = generic_permission(inode, mask);
62502- if (rv == 0)
62503- return 0;
62504+
62505 if (task_tgid(current) == proc_pid(inode))
62506 rv = 0;
62507+
62508+ task = get_proc_task(inode);
62509+ if (task == NULL)
62510+ return rv;
62511+
62512+ if (gr_acl_handle_procpidmem(task))
62513+ rv = -EACCES;
62514+
62515+ put_task_struct(task);
62516+
62517 return rv;
62518 }
62519
62520diff --git a/fs/proc/inode.c b/fs/proc/inode.c
62521index 124fc43..8afbb02 100644
62522--- a/fs/proc/inode.c
62523+++ b/fs/proc/inode.c
62524@@ -23,11 +23,17 @@
62525 #include <linux/slab.h>
62526 #include <linux/mount.h>
62527 #include <linux/magic.h>
62528+#include <linux/grsecurity.h>
62529
62530 #include <asm/uaccess.h>
62531
62532 #include "internal.h"
62533
62534+#ifdef CONFIG_PROC_SYSCTL
62535+extern const struct inode_operations proc_sys_inode_operations;
62536+extern const struct inode_operations proc_sys_dir_operations;
62537+#endif
62538+
62539 static void proc_evict_inode(struct inode *inode)
62540 {
62541 struct proc_dir_entry *de;
62542@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
62543 ns = PROC_I(inode)->ns.ns;
62544 if (ns_ops && ns)
62545 ns_ops->put(ns);
62546+
62547+#ifdef CONFIG_PROC_SYSCTL
62548+ if (inode->i_op == &proc_sys_inode_operations ||
62549+ inode->i_op == &proc_sys_dir_operations)
62550+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
62551+#endif
62552+
62553 }
62554
62555 static struct kmem_cache * proc_inode_cachep;
62556@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
62557 if (de->mode) {
62558 inode->i_mode = de->mode;
62559 inode->i_uid = de->uid;
62560+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62561+ inode->i_gid = grsec_proc_gid;
62562+#else
62563 inode->i_gid = de->gid;
62564+#endif
62565 }
62566 if (de->size)
62567 inode->i_size = de->size;
62568diff --git a/fs/proc/internal.h b/fs/proc/internal.h
62569index 651d09a..3d7f0bf 100644
62570--- a/fs/proc/internal.h
62571+++ b/fs/proc/internal.h
62572@@ -48,7 +48,7 @@ struct proc_dir_entry {
62573 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
62574 u8 namelen;
62575 char name[];
62576-};
62577+} __randomize_layout;
62578
62579 union proc_op {
62580 int (*proc_get_link)(struct dentry *, struct path *);
62581@@ -67,7 +67,7 @@ struct proc_inode {
62582 struct ctl_table *sysctl_entry;
62583 struct proc_ns ns;
62584 struct inode vfs_inode;
62585-};
62586+} __randomize_layout;
62587
62588 /*
62589 * General functions
62590@@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
62591 struct pid *, struct task_struct *);
62592 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
62593 struct pid *, struct task_struct *);
62594+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
62595+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
62596+#endif
62597
62598 /*
62599 * base.c
62600diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
62601index 05029c0..7ea1987 100644
62602--- a/fs/proc/interrupts.c
62603+++ b/fs/proc/interrupts.c
62604@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
62605
62606 static int __init proc_interrupts_init(void)
62607 {
62608+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62609+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
62610+#else
62611 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
62612+#endif
62613 return 0;
62614 }
62615 module_init(proc_interrupts_init);
62616diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
62617index 5ed0e52..a1c1f2e 100644
62618--- a/fs/proc/kcore.c
62619+++ b/fs/proc/kcore.c
62620@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
62621 * the addresses in the elf_phdr on our list.
62622 */
62623 start = kc_offset_to_vaddr(*fpos - elf_buflen);
62624- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
62625+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
62626+ if (tsz > buflen)
62627 tsz = buflen;
62628-
62629+
62630 while (buflen) {
62631 struct kcore_list *m;
62632
62633@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
62634 kfree(elf_buf);
62635 } else {
62636 if (kern_addr_valid(start)) {
62637- unsigned long n;
62638+ char *elf_buf;
62639+ mm_segment_t oldfs;
62640
62641- n = copy_to_user(buffer, (char *)start, tsz);
62642- /*
62643- * We cannot distinguish between fault on source
62644- * and fault on destination. When this happens
62645- * we clear too and hope it will trigger the
62646- * EFAULT again.
62647- */
62648- if (n) {
62649- if (clear_user(buffer + tsz - n,
62650- n))
62651+ elf_buf = kmalloc(tsz, GFP_KERNEL);
62652+ if (!elf_buf)
62653+ return -ENOMEM;
62654+ oldfs = get_fs();
62655+ set_fs(KERNEL_DS);
62656+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
62657+ set_fs(oldfs);
62658+ if (copy_to_user(buffer, elf_buf, tsz)) {
62659+ kfree(elf_buf);
62660 return -EFAULT;
62661+ }
62662 }
62663+ set_fs(oldfs);
62664+ kfree(elf_buf);
62665 } else {
62666 if (clear_user(buffer, tsz))
62667 return -EFAULT;
62668@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
62669
62670 static int open_kcore(struct inode *inode, struct file *filp)
62671 {
62672+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62673+ return -EPERM;
62674+#endif
62675 if (!capable(CAP_SYS_RAWIO))
62676 return -EPERM;
62677 if (kcore_need_update)
62678diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
62679index a77d2b2..a9153f0 100644
62680--- a/fs/proc/meminfo.c
62681+++ b/fs/proc/meminfo.c
62682@@ -150,7 +150,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
62683 vmi.used >> 10,
62684 vmi.largest_chunk >> 10
62685 #ifdef CONFIG_MEMORY_FAILURE
62686- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
62687+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
62688 #endif
62689 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
62690 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
62691diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
62692index 5f9bc8a..5c35f08 100644
62693--- a/fs/proc/nommu.c
62694+++ b/fs/proc/nommu.c
62695@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
62696
62697 if (file) {
62698 seq_pad(m, ' ');
62699- seq_path(m, &file->f_path, "");
62700+ seq_path(m, &file->f_path, "\n\\");
62701 }
62702
62703 seq_putc(m, '\n');
62704diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
62705index 4677bb7..408e936 100644
62706--- a/fs/proc/proc_net.c
62707+++ b/fs/proc/proc_net.c
62708@@ -23,6 +23,7 @@
62709 #include <linux/nsproxy.h>
62710 #include <net/net_namespace.h>
62711 #include <linux/seq_file.h>
62712+#include <linux/grsecurity.h>
62713
62714 #include "internal.h"
62715
62716@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
62717 struct task_struct *task;
62718 struct nsproxy *ns;
62719 struct net *net = NULL;
62720+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62721+ const struct cred *cred = current_cred();
62722+#endif
62723+
62724+#ifdef CONFIG_GRKERNSEC_PROC_USER
62725+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
62726+ return net;
62727+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62728+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
62729+ return net;
62730+#endif
62731
62732 rcu_read_lock();
62733 task = pid_task(proc_pid(dir), PIDTYPE_PID);
62734diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
62735index 7129046..6914844 100644
62736--- a/fs/proc/proc_sysctl.c
62737+++ b/fs/proc/proc_sysctl.c
62738@@ -11,13 +11,21 @@
62739 #include <linux/namei.h>
62740 #include <linux/mm.h>
62741 #include <linux/module.h>
62742+#include <linux/nsproxy.h>
62743+#ifdef CONFIG_GRKERNSEC
62744+#include <net/net_namespace.h>
62745+#endif
62746 #include "internal.h"
62747
62748+extern int gr_handle_chroot_sysctl(const int op);
62749+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62750+ const int op);
62751+
62752 static const struct dentry_operations proc_sys_dentry_operations;
62753 static const struct file_operations proc_sys_file_operations;
62754-static const struct inode_operations proc_sys_inode_operations;
62755+const struct inode_operations proc_sys_inode_operations;
62756 static const struct file_operations proc_sys_dir_file_operations;
62757-static const struct inode_operations proc_sys_dir_operations;
62758+const struct inode_operations proc_sys_dir_operations;
62759
62760 void proc_sys_poll_notify(struct ctl_table_poll *poll)
62761 {
62762@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
62763
62764 err = NULL;
62765 d_set_d_op(dentry, &proc_sys_dentry_operations);
62766+
62767+ gr_handle_proc_create(dentry, inode);
62768+
62769 d_add(dentry, inode);
62770
62771 out:
62772@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
62773 struct inode *inode = file_inode(filp);
62774 struct ctl_table_header *head = grab_header(inode);
62775 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
62776+ int op = write ? MAY_WRITE : MAY_READ;
62777 ssize_t error;
62778 size_t res;
62779
62780@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
62781 * and won't be until we finish.
62782 */
62783 error = -EPERM;
62784- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
62785+ if (sysctl_perm(head, table, op))
62786 goto out;
62787
62788 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
62789@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
62790 if (!table->proc_handler)
62791 goto out;
62792
62793+#ifdef CONFIG_GRKERNSEC
62794+ error = -EPERM;
62795+ if (gr_handle_chroot_sysctl(op))
62796+ goto out;
62797+ dget(filp->f_path.dentry);
62798+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
62799+ dput(filp->f_path.dentry);
62800+ goto out;
62801+ }
62802+ dput(filp->f_path.dentry);
62803+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
62804+ goto out;
62805+ if (write) {
62806+ if (current->nsproxy->net_ns != table->extra2) {
62807+ if (!capable(CAP_SYS_ADMIN))
62808+ goto out;
62809+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
62810+ goto out;
62811+ }
62812+#endif
62813+
62814 /* careful: calling conventions are nasty here */
62815 res = count;
62816 error = table->proc_handler(table, write, buf, &res, ppos);
62817@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
62818 return false;
62819 } else {
62820 d_set_d_op(child, &proc_sys_dentry_operations);
62821+
62822+ gr_handle_proc_create(child, inode);
62823+
62824 d_add(child, inode);
62825 }
62826 } else {
62827@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
62828 if ((*pos)++ < ctx->pos)
62829 return true;
62830
62831+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
62832+ return 0;
62833+
62834 if (unlikely(S_ISLNK(table->mode)))
62835 res = proc_sys_link_fill_cache(file, ctx, head, table);
62836 else
62837@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
62838 if (IS_ERR(head))
62839 return PTR_ERR(head);
62840
62841+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
62842+ return -ENOENT;
62843+
62844 generic_fillattr(inode, stat);
62845 if (table)
62846 stat->mode = (stat->mode & S_IFMT) | table->mode;
62847@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
62848 .llseek = generic_file_llseek,
62849 };
62850
62851-static const struct inode_operations proc_sys_inode_operations = {
62852+const struct inode_operations proc_sys_inode_operations = {
62853 .permission = proc_sys_permission,
62854 .setattr = proc_sys_setattr,
62855 .getattr = proc_sys_getattr,
62856 };
62857
62858-static const struct inode_operations proc_sys_dir_operations = {
62859+const struct inode_operations proc_sys_dir_operations = {
62860 .lookup = proc_sys_lookup,
62861 .permission = proc_sys_permission,
62862 .setattr = proc_sys_setattr,
62863@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
62864 static struct ctl_dir *new_dir(struct ctl_table_set *set,
62865 const char *name, int namelen)
62866 {
62867- struct ctl_table *table;
62868+ ctl_table_no_const *table;
62869 struct ctl_dir *new;
62870 struct ctl_node *node;
62871 char *new_name;
62872@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
62873 return NULL;
62874
62875 node = (struct ctl_node *)(new + 1);
62876- table = (struct ctl_table *)(node + 1);
62877+ table = (ctl_table_no_const *)(node + 1);
62878 new_name = (char *)(table + 2);
62879 memcpy(new_name, name, namelen);
62880 new_name[namelen] = '\0';
62881@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
62882 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
62883 struct ctl_table_root *link_root)
62884 {
62885- struct ctl_table *link_table, *entry, *link;
62886+ ctl_table_no_const *link_table, *link;
62887+ struct ctl_table *entry;
62888 struct ctl_table_header *links;
62889 struct ctl_node *node;
62890 char *link_name;
62891@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
62892 return NULL;
62893
62894 node = (struct ctl_node *)(links + 1);
62895- link_table = (struct ctl_table *)(node + nr_entries);
62896+ link_table = (ctl_table_no_const *)(node + nr_entries);
62897 link_name = (char *)&link_table[nr_entries + 1];
62898
62899 for (link = link_table, entry = table; entry->procname; link++, entry++) {
62900@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
62901 struct ctl_table_header ***subheader, struct ctl_table_set *set,
62902 struct ctl_table *table)
62903 {
62904- struct ctl_table *ctl_table_arg = NULL;
62905- struct ctl_table *entry, *files;
62906+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
62907+ struct ctl_table *entry;
62908 int nr_files = 0;
62909 int nr_dirs = 0;
62910 int err = -ENOMEM;
62911@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
62912 nr_files++;
62913 }
62914
62915- files = table;
62916 /* If there are mixed files and directories we need a new table */
62917 if (nr_dirs && nr_files) {
62918- struct ctl_table *new;
62919+ ctl_table_no_const *new;
62920 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
62921 GFP_KERNEL);
62922 if (!files)
62923@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
62924 /* Register everything except a directory full of subdirectories */
62925 if (nr_files || !nr_dirs) {
62926 struct ctl_table_header *header;
62927- header = __register_sysctl_table(set, path, files);
62928+ header = __register_sysctl_table(set, path, files ? files : table);
62929 if (!header) {
62930 kfree(ctl_table_arg);
62931 goto out;
62932diff --git a/fs/proc/root.c b/fs/proc/root.c
62933index 87dbcbe..55e1b4d 100644
62934--- a/fs/proc/root.c
62935+++ b/fs/proc/root.c
62936@@ -186,7 +186,15 @@ void __init proc_root_init(void)
62937 #ifdef CONFIG_PROC_DEVICETREE
62938 proc_device_tree_init();
62939 #endif
62940+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62941+#ifdef CONFIG_GRKERNSEC_PROC_USER
62942+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
62943+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62944+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
62945+#endif
62946+#else
62947 proc_mkdir("bus", NULL);
62948+#endif
62949 proc_sys_init();
62950 }
62951
62952diff --git a/fs/proc/stat.c b/fs/proc/stat.c
62953index 1cf86c0..0ee1ca5 100644
62954--- a/fs/proc/stat.c
62955+++ b/fs/proc/stat.c
62956@@ -11,6 +11,7 @@
62957 #include <linux/irqnr.h>
62958 #include <asm/cputime.h>
62959 #include <linux/tick.h>
62960+#include <linux/grsecurity.h>
62961
62962 #ifndef arch_irq_stat_cpu
62963 #define arch_irq_stat_cpu(cpu) 0
62964@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
62965 u64 sum_softirq = 0;
62966 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
62967 struct timespec boottime;
62968+ int unrestricted = 1;
62969+
62970+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62971+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62972+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
62973+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62974+ && !in_group_p(grsec_proc_gid)
62975+#endif
62976+ )
62977+ unrestricted = 0;
62978+#endif
62979+#endif
62980
62981 user = nice = system = idle = iowait =
62982 irq = softirq = steal = 0;
62983@@ -94,6 +107,7 @@ static int show_stat(struct seq_file *p, void *v)
62984 getboottime(&boottime);
62985 jif = boottime.tv_sec;
62986
62987+ if (unrestricted) {
62988 for_each_possible_cpu(i) {
62989 user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
62990 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
62991@@ -116,6 +130,7 @@ static int show_stat(struct seq_file *p, void *v)
62992 }
62993 }
62994 sum += arch_irq_stat();
62995+ }
62996
62997 seq_puts(p, "cpu ");
62998 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
62999@@ -131,6 +146,7 @@ static int show_stat(struct seq_file *p, void *v)
63000 seq_putc(p, '\n');
63001
63002 for_each_online_cpu(i) {
63003+ if (unrestricted) {
63004 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
63005 user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
63006 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
63007@@ -142,6 +158,7 @@ static int show_stat(struct seq_file *p, void *v)
63008 steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
63009 guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
63010 guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
63011+ }
63012 seq_printf(p, "cpu%d", i);
63013 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
63014 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
63015@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
63016
63017 /* sum again ? it could be updated? */
63018 for_each_irq_nr(j)
63019- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
63020+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
63021
63022 seq_printf(p,
63023 "\nctxt %llu\n"
63024@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
63025 "processes %lu\n"
63026 "procs_running %lu\n"
63027 "procs_blocked %lu\n",
63028- nr_context_switches(),
63029+ unrestricted ? nr_context_switches() : 0ULL,
63030 (unsigned long)jif,
63031- total_forks,
63032- nr_running(),
63033- nr_iowait());
63034+ unrestricted ? total_forks : 0UL,
63035+ unrestricted ? nr_running() : 0UL,
63036+ unrestricted ? nr_iowait() : 0UL);
63037
63038 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
63039
63040diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
63041index fb52b54..5fc7c14 100644
63042--- a/fs/proc/task_mmu.c
63043+++ b/fs/proc/task_mmu.c
63044@@ -12,12 +12,19 @@
63045 #include <linux/swap.h>
63046 #include <linux/swapops.h>
63047 #include <linux/mmu_notifier.h>
63048+#include <linux/grsecurity.h>
63049
63050 #include <asm/elf.h>
63051 #include <asm/uaccess.h>
63052 #include <asm/tlbflush.h>
63053 #include "internal.h"
63054
63055+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63056+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
63057+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
63058+ _mm->pax_flags & MF_PAX_SEGMEXEC))
63059+#endif
63060+
63061 void task_mem(struct seq_file *m, struct mm_struct *mm)
63062 {
63063 unsigned long data, text, lib, swap;
63064@@ -53,8 +60,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
63065 "VmExe:\t%8lu kB\n"
63066 "VmLib:\t%8lu kB\n"
63067 "VmPTE:\t%8lu kB\n"
63068- "VmSwap:\t%8lu kB\n",
63069- hiwater_vm << (PAGE_SHIFT-10),
63070+ "VmSwap:\t%8lu kB\n"
63071+
63072+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63073+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
63074+#endif
63075+
63076+ ,hiwater_vm << (PAGE_SHIFT-10),
63077 total_vm << (PAGE_SHIFT-10),
63078 mm->locked_vm << (PAGE_SHIFT-10),
63079 mm->pinned_vm << (PAGE_SHIFT-10),
63080@@ -64,7 +76,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
63081 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
63082 (PTRS_PER_PTE * sizeof(pte_t) *
63083 atomic_long_read(&mm->nr_ptes)) >> 10,
63084- swap << (PAGE_SHIFT-10));
63085+ swap << (PAGE_SHIFT-10)
63086+
63087+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63088+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63089+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
63090+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
63091+#else
63092+ , mm->context.user_cs_base
63093+ , mm->context.user_cs_limit
63094+#endif
63095+#endif
63096+
63097+ );
63098 }
63099
63100 unsigned long task_vsize(struct mm_struct *mm)
63101@@ -270,13 +294,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63102 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
63103 }
63104
63105- /* We don't show the stack guard page in /proc/maps */
63106+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63107+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
63108+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
63109+#else
63110 start = vma->vm_start;
63111- if (stack_guard_page_start(vma, start))
63112- start += PAGE_SIZE;
63113 end = vma->vm_end;
63114- if (stack_guard_page_end(vma, end))
63115- end -= PAGE_SIZE;
63116+#endif
63117
63118 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
63119 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
63120@@ -286,7 +310,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63121 flags & VM_WRITE ? 'w' : '-',
63122 flags & VM_EXEC ? 'x' : '-',
63123 flags & VM_MAYSHARE ? 's' : 'p',
63124+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63125+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
63126+#else
63127 pgoff,
63128+#endif
63129 MAJOR(dev), MINOR(dev), ino);
63130
63131 /*
63132@@ -295,7 +323,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63133 */
63134 if (file) {
63135 seq_pad(m, ' ');
63136- seq_path(m, &file->f_path, "\n");
63137+ seq_path(m, &file->f_path, "\n\\");
63138 goto done;
63139 }
63140
63141@@ -321,8 +349,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63142 * Thread stack in /proc/PID/task/TID/maps or
63143 * the main process stack.
63144 */
63145- if (!is_pid || (vma->vm_start <= mm->start_stack &&
63146- vma->vm_end >= mm->start_stack)) {
63147+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
63148+ (vma->vm_start <= mm->start_stack &&
63149+ vma->vm_end >= mm->start_stack)) {
63150 name = "[stack]";
63151 } else {
63152 /* Thread stack in /proc/PID/maps */
63153@@ -346,6 +375,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
63154 struct proc_maps_private *priv = m->private;
63155 struct task_struct *task = priv->task;
63156
63157+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63158+ if (current->exec_id != m->exec_id) {
63159+ gr_log_badprocpid("maps");
63160+ return 0;
63161+ }
63162+#endif
63163+
63164 show_map_vma(m, vma, is_pid);
63165
63166 if (m->count < m->size) /* vma is copied successfully */
63167@@ -586,12 +622,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
63168 .private = &mss,
63169 };
63170
63171+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63172+ if (current->exec_id != m->exec_id) {
63173+ gr_log_badprocpid("smaps");
63174+ return 0;
63175+ }
63176+#endif
63177 memset(&mss, 0, sizeof mss);
63178- mss.vma = vma;
63179- /* mmap_sem is held in m_start */
63180- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
63181- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
63182-
63183+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63184+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
63185+#endif
63186+ mss.vma = vma;
63187+ /* mmap_sem is held in m_start */
63188+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
63189+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
63190+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63191+ }
63192+#endif
63193 show_map_vma(m, vma, is_pid);
63194
63195 seq_printf(m,
63196@@ -609,7 +656,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
63197 "KernelPageSize: %8lu kB\n"
63198 "MMUPageSize: %8lu kB\n"
63199 "Locked: %8lu kB\n",
63200+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63201+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
63202+#else
63203 (vma->vm_end - vma->vm_start) >> 10,
63204+#endif
63205 mss.resident >> 10,
63206 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
63207 mss.shared_clean >> 10,
63208@@ -1387,6 +1438,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
63209 char buffer[64];
63210 int nid;
63211
63212+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63213+ if (current->exec_id != m->exec_id) {
63214+ gr_log_badprocpid("numa_maps");
63215+ return 0;
63216+ }
63217+#endif
63218+
63219 if (!mm)
63220 return 0;
63221
63222@@ -1404,11 +1462,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
63223 mpol_to_str(buffer, sizeof(buffer), pol);
63224 mpol_cond_put(pol);
63225
63226+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63227+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
63228+#else
63229 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
63230+#endif
63231
63232 if (file) {
63233 seq_printf(m, " file=");
63234- seq_path(m, &file->f_path, "\n\t= ");
63235+ seq_path(m, &file->f_path, "\n\t\\= ");
63236 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
63237 seq_printf(m, " heap");
63238 } else {
63239diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
63240index 678455d..ebd3245 100644
63241--- a/fs/proc/task_nommu.c
63242+++ b/fs/proc/task_nommu.c
63243@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
63244 else
63245 bytes += kobjsize(mm);
63246
63247- if (current->fs && current->fs->users > 1)
63248+ if (current->fs && atomic_read(&current->fs->users) > 1)
63249 sbytes += kobjsize(current->fs);
63250 else
63251 bytes += kobjsize(current->fs);
63252@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
63253
63254 if (file) {
63255 seq_pad(m, ' ');
63256- seq_path(m, &file->f_path, "");
63257+ seq_path(m, &file->f_path, "\n\\");
63258 } else if (mm) {
63259 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
63260
63261diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
63262index 9100d69..51cd925 100644
63263--- a/fs/proc/vmcore.c
63264+++ b/fs/proc/vmcore.c
63265@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
63266 nr_bytes = count;
63267
63268 /* If pfn is not ram, return zeros for sparse dump files */
63269- if (pfn_is_ram(pfn) == 0)
63270- memset(buf, 0, nr_bytes);
63271- else {
63272+ if (pfn_is_ram(pfn) == 0) {
63273+ if (userbuf) {
63274+ if (clear_user((char __force_user *)buf, nr_bytes))
63275+ return -EFAULT;
63276+ } else
63277+ memset(buf, 0, nr_bytes);
63278+ } else {
63279 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
63280 offset, userbuf);
63281 if (tmp < 0)
63282@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
63283 static int copy_to(void *target, void *src, size_t size, int userbuf)
63284 {
63285 if (userbuf) {
63286- if (copy_to_user((char __user *) target, src, size))
63287+ if (copy_to_user((char __force_user *) target, src, size))
63288 return -EFAULT;
63289 } else {
63290 memcpy(target, src, size);
63291@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
63292 if (*fpos < m->offset + m->size) {
63293 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
63294 start = m->paddr + *fpos - m->offset;
63295- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
63296+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
63297 if (tmp < 0)
63298 return tmp;
63299 buflen -= tsz;
63300@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
63301 static ssize_t read_vmcore(struct file *file, char __user *buffer,
63302 size_t buflen, loff_t *fpos)
63303 {
63304- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
63305+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
63306 }
63307
63308 /*
63309diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
63310index b00fcc9..e0c6381 100644
63311--- a/fs/qnx6/qnx6.h
63312+++ b/fs/qnx6/qnx6.h
63313@@ -74,7 +74,7 @@ enum {
63314 BYTESEX_BE,
63315 };
63316
63317-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
63318+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
63319 {
63320 if (sbi->s_bytesex == BYTESEX_LE)
63321 return le64_to_cpu((__force __le64)n);
63322@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
63323 return (__force __fs64)cpu_to_be64(n);
63324 }
63325
63326-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
63327+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
63328 {
63329 if (sbi->s_bytesex == BYTESEX_LE)
63330 return le32_to_cpu((__force __le32)n);
63331diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
63332index 72d2917..c917c12 100644
63333--- a/fs/quota/netlink.c
63334+++ b/fs/quota/netlink.c
63335@@ -45,7 +45,7 @@ static struct genl_family quota_genl_family = {
63336 void quota_send_warning(struct kqid qid, dev_t dev,
63337 const char warntype)
63338 {
63339- static atomic_t seq;
63340+ static atomic_unchecked_t seq;
63341 struct sk_buff *skb;
63342 void *msg_head;
63343 int ret;
63344@@ -61,7 +61,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
63345 "VFS: Not enough memory to send quota warning.\n");
63346 return;
63347 }
63348- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
63349+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
63350 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
63351 if (!msg_head) {
63352 printk(KERN_ERR
63353diff --git a/fs/read_write.c b/fs/read_write.c
63354index 58e440d..8ec2838 100644
63355--- a/fs/read_write.c
63356+++ b/fs/read_write.c
63357@@ -438,7 +438,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
63358
63359 old_fs = get_fs();
63360 set_fs(get_ds());
63361- p = (__force const char __user *)buf;
63362+ p = (const char __force_user *)buf;
63363 if (count > MAX_RW_COUNT)
63364 count = MAX_RW_COUNT;
63365 if (file->f_op->write)
63366diff --git a/fs/readdir.c b/fs/readdir.c
63367index 5b53d99..a6c3049 100644
63368--- a/fs/readdir.c
63369+++ b/fs/readdir.c
63370@@ -17,6 +17,7 @@
63371 #include <linux/security.h>
63372 #include <linux/syscalls.h>
63373 #include <linux/unistd.h>
63374+#include <linux/namei.h>
63375
63376 #include <asm/uaccess.h>
63377
63378@@ -69,6 +70,7 @@ struct old_linux_dirent {
63379 struct readdir_callback {
63380 struct dir_context ctx;
63381 struct old_linux_dirent __user * dirent;
63382+ struct file * file;
63383 int result;
63384 };
63385
63386@@ -86,6 +88,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
63387 buf->result = -EOVERFLOW;
63388 return -EOVERFLOW;
63389 }
63390+
63391+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
63392+ return 0;
63393+
63394 buf->result++;
63395 dirent = buf->dirent;
63396 if (!access_ok(VERIFY_WRITE, dirent,
63397@@ -117,6 +123,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
63398 if (!f.file)
63399 return -EBADF;
63400
63401+ buf.file = f.file;
63402 error = iterate_dir(f.file, &buf.ctx);
63403 if (buf.result)
63404 error = buf.result;
63405@@ -142,6 +149,7 @@ struct getdents_callback {
63406 struct dir_context ctx;
63407 struct linux_dirent __user * current_dir;
63408 struct linux_dirent __user * previous;
63409+ struct file * file;
63410 int count;
63411 int error;
63412 };
63413@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
63414 buf->error = -EOVERFLOW;
63415 return -EOVERFLOW;
63416 }
63417+
63418+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
63419+ return 0;
63420+
63421 dirent = buf->previous;
63422 if (dirent) {
63423 if (__put_user(offset, &dirent->d_off))
63424@@ -208,6 +220,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
63425 if (!f.file)
63426 return -EBADF;
63427
63428+ buf.file = f.file;
63429 error = iterate_dir(f.file, &buf.ctx);
63430 if (error >= 0)
63431 error = buf.error;
63432@@ -226,6 +239,7 @@ struct getdents_callback64 {
63433 struct dir_context ctx;
63434 struct linux_dirent64 __user * current_dir;
63435 struct linux_dirent64 __user * previous;
63436+ struct file *file;
63437 int count;
63438 int error;
63439 };
63440@@ -241,6 +255,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
63441 buf->error = -EINVAL; /* only used if we fail.. */
63442 if (reclen > buf->count)
63443 return -EINVAL;
63444+
63445+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
63446+ return 0;
63447+
63448 dirent = buf->previous;
63449 if (dirent) {
63450 if (__put_user(offset, &dirent->d_off))
63451@@ -288,6 +306,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
63452 if (!f.file)
63453 return -EBADF;
63454
63455+ buf.file = f.file;
63456 error = iterate_dir(f.file, &buf.ctx);
63457 if (error >= 0)
63458 error = buf.error;
63459diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
63460index 2b7882b..1c5ef48 100644
63461--- a/fs/reiserfs/do_balan.c
63462+++ b/fs/reiserfs/do_balan.c
63463@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
63464 return;
63465 }
63466
63467- atomic_inc(&(fs_generation(tb->tb_sb)));
63468+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
63469 do_balance_starts(tb);
63470
63471 /* balance leaf returns 0 except if combining L R and S into
63472diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
63473index ee382ef..f4eb6eb5 100644
63474--- a/fs/reiserfs/item_ops.c
63475+++ b/fs/reiserfs/item_ops.c
63476@@ -725,18 +725,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
63477 }
63478
63479 static struct item_operations errcatch_ops = {
63480- errcatch_bytes_number,
63481- errcatch_decrement_key,
63482- errcatch_is_left_mergeable,
63483- errcatch_print_item,
63484- errcatch_check_item,
63485+ .bytes_number = errcatch_bytes_number,
63486+ .decrement_key = errcatch_decrement_key,
63487+ .is_left_mergeable = errcatch_is_left_mergeable,
63488+ .print_item = errcatch_print_item,
63489+ .check_item = errcatch_check_item,
63490
63491- errcatch_create_vi,
63492- errcatch_check_left,
63493- errcatch_check_right,
63494- errcatch_part_size,
63495- errcatch_unit_num,
63496- errcatch_print_vi
63497+ .create_vi = errcatch_create_vi,
63498+ .check_left = errcatch_check_left,
63499+ .check_right = errcatch_check_right,
63500+ .part_size = errcatch_part_size,
63501+ .unit_num = errcatch_unit_num,
63502+ .print_vi = errcatch_print_vi
63503 };
63504
63505 //////////////////////////////////////////////////////////////////////////////
63506diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
63507index a958444..42b2323 100644
63508--- a/fs/reiserfs/procfs.c
63509+++ b/fs/reiserfs/procfs.c
63510@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
63511 "SMALL_TAILS " : "NO_TAILS ",
63512 replay_only(sb) ? "REPLAY_ONLY " : "",
63513 convert_reiserfs(sb) ? "CONV " : "",
63514- atomic_read(&r->s_generation_counter),
63515+ atomic_read_unchecked(&r->s_generation_counter),
63516 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
63517 SF(s_do_balance), SF(s_unneeded_left_neighbor),
63518 SF(s_good_search_by_key_reada), SF(s_bmaps),
63519diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
63520index f8adaee..0eeeeca 100644
63521--- a/fs/reiserfs/reiserfs.h
63522+++ b/fs/reiserfs/reiserfs.h
63523@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
63524 /* Comment? -Hans */
63525 wait_queue_head_t s_wait;
63526 /* To be obsoleted soon by per buffer seals.. -Hans */
63527- atomic_t s_generation_counter; // increased by one every time the
63528+ atomic_unchecked_t s_generation_counter; // increased by one every time the
63529 // tree gets re-balanced
63530 unsigned long s_properties; /* File system properties. Currently holds
63531 on-disk FS format */
63532@@ -1982,7 +1982,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
63533 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
63534
63535 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
63536-#define get_generation(s) atomic_read (&fs_generation(s))
63537+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
63538 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
63539 #define __fs_changed(gen,s) (gen != get_generation (s))
63540 #define fs_changed(gen,s) \
63541diff --git a/fs/select.c b/fs/select.c
63542index 467bb1c..cf9d65a 100644
63543--- a/fs/select.c
63544+++ b/fs/select.c
63545@@ -20,6 +20,7 @@
63546 #include <linux/export.h>
63547 #include <linux/slab.h>
63548 #include <linux/poll.h>
63549+#include <linux/security.h>
63550 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
63551 #include <linux/file.h>
63552 #include <linux/fdtable.h>
63553@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
63554 struct poll_list *walk = head;
63555 unsigned long todo = nfds;
63556
63557+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
63558 if (nfds > rlimit(RLIMIT_NOFILE))
63559 return -EINVAL;
63560
63561diff --git a/fs/seq_file.c b/fs/seq_file.c
63562index 1d641bb..e600623 100644
63563--- a/fs/seq_file.c
63564+++ b/fs/seq_file.c
63565@@ -10,6 +10,7 @@
63566 #include <linux/seq_file.h>
63567 #include <linux/slab.h>
63568 #include <linux/cred.h>
63569+#include <linux/sched.h>
63570
63571 #include <asm/uaccess.h>
63572 #include <asm/page.h>
63573@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
63574 #ifdef CONFIG_USER_NS
63575 p->user_ns = file->f_cred->user_ns;
63576 #endif
63577+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63578+ p->exec_id = current->exec_id;
63579+#endif
63580
63581 /*
63582 * Wrappers around seq_open(e.g. swaps_open) need to be
63583@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
63584 return 0;
63585 }
63586 if (!m->buf) {
63587- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
63588+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
63589 if (!m->buf)
63590 return -ENOMEM;
63591 }
63592@@ -137,7 +141,7 @@ Eoverflow:
63593 m->op->stop(m, p);
63594 kfree(m->buf);
63595 m->count = 0;
63596- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
63597+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
63598 return !m->buf ? -ENOMEM : -EAGAIN;
63599 }
63600
63601@@ -153,7 +157,7 @@ Eoverflow:
63602 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
63603 {
63604 struct seq_file *m = file->private_data;
63605- size_t copied = 0;
63606+ ssize_t copied = 0;
63607 loff_t pos;
63608 size_t n;
63609 void *p;
63610@@ -192,7 +196,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
63611
63612 /* grab buffer if we didn't have one */
63613 if (!m->buf) {
63614- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
63615+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
63616 if (!m->buf)
63617 goto Enomem;
63618 }
63619@@ -234,7 +238,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
63620 m->op->stop(m, p);
63621 kfree(m->buf);
63622 m->count = 0;
63623- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
63624+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
63625 if (!m->buf)
63626 goto Enomem;
63627 m->version = 0;
63628@@ -584,7 +588,7 @@ static void single_stop(struct seq_file *p, void *v)
63629 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
63630 void *data)
63631 {
63632- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
63633+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
63634 int res = -ENOMEM;
63635
63636 if (op) {
63637diff --git a/fs/splice.c b/fs/splice.c
63638index 46a08f7..bb163cc 100644
63639--- a/fs/splice.c
63640+++ b/fs/splice.c
63641@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
63642 pipe_lock(pipe);
63643
63644 for (;;) {
63645- if (!pipe->readers) {
63646+ if (!atomic_read(&pipe->readers)) {
63647 send_sig(SIGPIPE, current, 0);
63648 if (!ret)
63649 ret = -EPIPE;
63650@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
63651 page_nr++;
63652 ret += buf->len;
63653
63654- if (pipe->files)
63655+ if (atomic_read(&pipe->files))
63656 do_wakeup = 1;
63657
63658 if (!--spd->nr_pages)
63659@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
63660 do_wakeup = 0;
63661 }
63662
63663- pipe->waiting_writers++;
63664+ atomic_inc(&pipe->waiting_writers);
63665 pipe_wait(pipe);
63666- pipe->waiting_writers--;
63667+ atomic_dec(&pipe->waiting_writers);
63668 }
63669
63670 pipe_unlock(pipe);
63671@@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
63672 old_fs = get_fs();
63673 set_fs(get_ds());
63674 /* The cast to a user pointer is valid due to the set_fs() */
63675- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
63676+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
63677 set_fs(old_fs);
63678
63679 return res;
63680@@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
63681 old_fs = get_fs();
63682 set_fs(get_ds());
63683 /* The cast to a user pointer is valid due to the set_fs() */
63684- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
63685+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
63686 set_fs(old_fs);
63687
63688 return res;
63689@@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
63690 goto err;
63691
63692 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
63693- vec[i].iov_base = (void __user *) page_address(page);
63694+ vec[i].iov_base = (void __force_user *) page_address(page);
63695 vec[i].iov_len = this_len;
63696 spd.pages[i] = page;
63697 spd.nr_pages++;
63698@@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
63699 ops->release(pipe, buf);
63700 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
63701 pipe->nrbufs--;
63702- if (pipe->files)
63703+ if (atomic_read(&pipe->files))
63704 sd->need_wakeup = true;
63705 }
63706
63707@@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
63708 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
63709 {
63710 while (!pipe->nrbufs) {
63711- if (!pipe->writers)
63712+ if (!atomic_read(&pipe->writers))
63713 return 0;
63714
63715- if (!pipe->waiting_writers && sd->num_spliced)
63716+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
63717 return 0;
63718
63719 if (sd->flags & SPLICE_F_NONBLOCK)
63720@@ -1179,7 +1179,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
63721 * out of the pipe right after the splice_to_pipe(). So set
63722 * PIPE_READERS appropriately.
63723 */
63724- pipe->readers = 1;
63725+ atomic_set(&pipe->readers, 1);
63726
63727 current->splice_pipe = pipe;
63728 }
63729@@ -1475,6 +1475,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
63730
63731 partial[buffers].offset = off;
63732 partial[buffers].len = plen;
63733+ partial[buffers].private = 0;
63734
63735 off = 0;
63736 len -= plen;
63737@@ -1777,9 +1778,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
63738 ret = -ERESTARTSYS;
63739 break;
63740 }
63741- if (!pipe->writers)
63742+ if (!atomic_read(&pipe->writers))
63743 break;
63744- if (!pipe->waiting_writers) {
63745+ if (!atomic_read(&pipe->waiting_writers)) {
63746 if (flags & SPLICE_F_NONBLOCK) {
63747 ret = -EAGAIN;
63748 break;
63749@@ -1811,7 +1812,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
63750 pipe_lock(pipe);
63751
63752 while (pipe->nrbufs >= pipe->buffers) {
63753- if (!pipe->readers) {
63754+ if (!atomic_read(&pipe->readers)) {
63755 send_sig(SIGPIPE, current, 0);
63756 ret = -EPIPE;
63757 break;
63758@@ -1824,9 +1825,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
63759 ret = -ERESTARTSYS;
63760 break;
63761 }
63762- pipe->waiting_writers++;
63763+ atomic_inc(&pipe->waiting_writers);
63764 pipe_wait(pipe);
63765- pipe->waiting_writers--;
63766+ atomic_dec(&pipe->waiting_writers);
63767 }
63768
63769 pipe_unlock(pipe);
63770@@ -1862,14 +1863,14 @@ retry:
63771 pipe_double_lock(ipipe, opipe);
63772
63773 do {
63774- if (!opipe->readers) {
63775+ if (!atomic_read(&opipe->readers)) {
63776 send_sig(SIGPIPE, current, 0);
63777 if (!ret)
63778 ret = -EPIPE;
63779 break;
63780 }
63781
63782- if (!ipipe->nrbufs && !ipipe->writers)
63783+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
63784 break;
63785
63786 /*
63787@@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
63788 pipe_double_lock(ipipe, opipe);
63789
63790 do {
63791- if (!opipe->readers) {
63792+ if (!atomic_read(&opipe->readers)) {
63793 send_sig(SIGPIPE, current, 0);
63794 if (!ret)
63795 ret = -EPIPE;
63796@@ -2011,7 +2012,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
63797 * return EAGAIN if we have the potential of some data in the
63798 * future, otherwise just return 0
63799 */
63800- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
63801+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
63802 ret = -EAGAIN;
63803
63804 pipe_unlock(ipipe);
63805diff --git a/fs/stat.c b/fs/stat.c
63806index ae0c3ce..9ee641c 100644
63807--- a/fs/stat.c
63808+++ b/fs/stat.c
63809@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
63810 stat->gid = inode->i_gid;
63811 stat->rdev = inode->i_rdev;
63812 stat->size = i_size_read(inode);
63813- stat->atime = inode->i_atime;
63814- stat->mtime = inode->i_mtime;
63815+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
63816+ stat->atime = inode->i_ctime;
63817+ stat->mtime = inode->i_ctime;
63818+ } else {
63819+ stat->atime = inode->i_atime;
63820+ stat->mtime = inode->i_mtime;
63821+ }
63822 stat->ctime = inode->i_ctime;
63823 stat->blksize = (1 << inode->i_blkbits);
63824 stat->blocks = inode->i_blocks;
63825@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
63826 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
63827 {
63828 struct inode *inode = path->dentry->d_inode;
63829+ int retval;
63830
63831- if (inode->i_op->getattr)
63832- return inode->i_op->getattr(path->mnt, path->dentry, stat);
63833+ if (inode->i_op->getattr) {
63834+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
63835+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
63836+ stat->atime = stat->ctime;
63837+ stat->mtime = stat->ctime;
63838+ }
63839+ return retval;
63840+ }
63841
63842 generic_fillattr(inode, stat);
63843 return 0;
63844diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
63845index 5e73d66..4f165fd 100644
63846--- a/fs/sysfs/dir.c
63847+++ b/fs/sysfs/dir.c
63848@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
63849 *
63850 * Returns 31 bit hash of ns + name (so it fits in an off_t )
63851 */
63852-static unsigned int sysfs_name_hash(const char *name, const void *ns)
63853+static unsigned int sysfs_name_hash(const unsigned char *name, const void *ns)
63854 {
63855 unsigned long hash = init_name_hash();
63856 unsigned int len = strlen(name);
63857@@ -676,6 +676,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
63858 struct sysfs_dirent *sd;
63859 int rc;
63860
63861+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
63862+ const char *parent_name = parent_sd->s_name;
63863+
63864+ mode = S_IFDIR | S_IRWXU;
63865+
63866+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
63867+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
63868+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
63869+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
63870+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
63871+#endif
63872+
63873 /* allocate */
63874 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
63875 if (!sd)
63876diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
63877index 35e7d08..4d6e676 100644
63878--- a/fs/sysfs/file.c
63879+++ b/fs/sysfs/file.c
63880@@ -42,7 +42,7 @@ static DEFINE_MUTEX(sysfs_open_file_mutex);
63881
63882 struct sysfs_open_dirent {
63883 atomic_t refcnt;
63884- atomic_t event;
63885+ atomic_unchecked_t event;
63886 wait_queue_head_t poll;
63887 struct list_head files; /* goes through sysfs_open_file.list */
63888 };
63889@@ -112,7 +112,7 @@ static int sysfs_seq_show(struct seq_file *sf, void *v)
63890 return -ENODEV;
63891 }
63892
63893- of->event = atomic_read(&of->sd->s_attr.open->event);
63894+ of->event = atomic_read_unchecked(&of->sd->s_attr.open->event);
63895
63896 /*
63897 * Lookup @ops and invoke show(). Control may reach here via seq
63898@@ -365,12 +365,12 @@ static int sysfs_bin_page_mkwrite(struct vm_area_struct *vma,
63899 return ret;
63900 }
63901
63902-static int sysfs_bin_access(struct vm_area_struct *vma, unsigned long addr,
63903- void *buf, int len, int write)
63904+static ssize_t sysfs_bin_access(struct vm_area_struct *vma, unsigned long addr,
63905+ void *buf, size_t len, int write)
63906 {
63907 struct file *file = vma->vm_file;
63908 struct sysfs_open_file *of = sysfs_of(file);
63909- int ret;
63910+ ssize_t ret;
63911
63912 if (!of->vm_ops)
63913 return -EINVAL;
63914@@ -564,7 +564,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
63915 return -ENOMEM;
63916
63917 atomic_set(&new_od->refcnt, 0);
63918- atomic_set(&new_od->event, 1);
63919+ atomic_set_unchecked(&new_od->event, 1);
63920 init_waitqueue_head(&new_od->poll);
63921 INIT_LIST_HEAD(&new_od->files);
63922 goto retry;
63923@@ -768,7 +768,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
63924
63925 sysfs_put_active(attr_sd);
63926
63927- if (of->event != atomic_read(&od->event))
63928+ if (of->event != atomic_read_unchecked(&od->event))
63929 goto trigger;
63930
63931 return DEFAULT_POLLMASK;
63932@@ -787,7 +787,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
63933 if (!WARN_ON(sysfs_type(sd) != SYSFS_KOBJ_ATTR)) {
63934 od = sd->s_attr.open;
63935 if (od) {
63936- atomic_inc(&od->event);
63937+ atomic_inc_unchecked(&od->event);
63938 wake_up_interruptible(&od->poll);
63939 }
63940 }
63941diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
63942index 3ae3f1b..081a26c 100644
63943--- a/fs/sysfs/symlink.c
63944+++ b/fs/sysfs/symlink.c
63945@@ -314,7 +314,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
63946 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd,
63947 void *cookie)
63948 {
63949- char *page = nd_get_link(nd);
63950+ const char *page = nd_get_link(nd);
63951 if (!IS_ERR(page))
63952 free_page((unsigned long)page);
63953 }
63954diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
63955index 69d4889..a810bd4 100644
63956--- a/fs/sysv/sysv.h
63957+++ b/fs/sysv/sysv.h
63958@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
63959 #endif
63960 }
63961
63962-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
63963+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
63964 {
63965 if (sbi->s_bytesex == BYTESEX_PDP)
63966 return PDP_swab((__force __u32)n);
63967diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
63968index e18b988..f1d4ad0f 100644
63969--- a/fs/ubifs/io.c
63970+++ b/fs/ubifs/io.c
63971@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
63972 return err;
63973 }
63974
63975-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
63976+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
63977 {
63978 int err;
63979
63980diff --git a/fs/udf/misc.c b/fs/udf/misc.c
63981index c175b4d..8f36a16 100644
63982--- a/fs/udf/misc.c
63983+++ b/fs/udf/misc.c
63984@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
63985
63986 u8 udf_tag_checksum(const struct tag *t)
63987 {
63988- u8 *data = (u8 *)t;
63989+ const u8 *data = (const u8 *)t;
63990 u8 checksum = 0;
63991 int i;
63992 for (i = 0; i < sizeof(struct tag); ++i)
63993diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
63994index 8d974c4..b82f6ec 100644
63995--- a/fs/ufs/swab.h
63996+++ b/fs/ufs/swab.h
63997@@ -22,7 +22,7 @@ enum {
63998 BYTESEX_BE
63999 };
64000
64001-static inline u64
64002+static inline u64 __intentional_overflow(-1)
64003 fs64_to_cpu(struct super_block *sbp, __fs64 n)
64004 {
64005 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
64006@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
64007 return (__force __fs64)cpu_to_be64(n);
64008 }
64009
64010-static inline u32
64011+static inline u32 __intentional_overflow(-1)
64012 fs32_to_cpu(struct super_block *sbp, __fs32 n)
64013 {
64014 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
64015diff --git a/fs/utimes.c b/fs/utimes.c
64016index aa138d6..5f3a811 100644
64017--- a/fs/utimes.c
64018+++ b/fs/utimes.c
64019@@ -1,6 +1,7 @@
64020 #include <linux/compiler.h>
64021 #include <linux/file.h>
64022 #include <linux/fs.h>
64023+#include <linux/security.h>
64024 #include <linux/linkage.h>
64025 #include <linux/mount.h>
64026 #include <linux/namei.h>
64027@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
64028 }
64029 }
64030 retry_deleg:
64031+
64032+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
64033+ error = -EACCES;
64034+ goto mnt_drop_write_and_out;
64035+ }
64036+
64037 mutex_lock(&inode->i_mutex);
64038 error = notify_change(path->dentry, &newattrs, &delegated_inode);
64039 mutex_unlock(&inode->i_mutex);
64040diff --git a/fs/xattr.c b/fs/xattr.c
64041index 3377dff..f394815 100644
64042--- a/fs/xattr.c
64043+++ b/fs/xattr.c
64044@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
64045 return rc;
64046 }
64047
64048+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
64049+ssize_t
64050+pax_getxattr(struct dentry *dentry, void *value, size_t size)
64051+{
64052+ struct inode *inode = dentry->d_inode;
64053+ ssize_t error;
64054+
64055+ error = inode_permission(inode, MAY_EXEC);
64056+ if (error)
64057+ return error;
64058+
64059+ if (inode->i_op->getxattr)
64060+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
64061+ else
64062+ error = -EOPNOTSUPP;
64063+
64064+ return error;
64065+}
64066+EXPORT_SYMBOL(pax_getxattr);
64067+#endif
64068+
64069 ssize_t
64070 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
64071 {
64072@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
64073 * Extended attribute SET operations
64074 */
64075 static long
64076-setxattr(struct dentry *d, const char __user *name, const void __user *value,
64077+setxattr(struct path *path, const char __user *name, const void __user *value,
64078 size_t size, int flags)
64079 {
64080 int error;
64081@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
64082 posix_acl_fix_xattr_from_user(kvalue, size);
64083 }
64084
64085- error = vfs_setxattr(d, kname, kvalue, size, flags);
64086+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
64087+ error = -EACCES;
64088+ goto out;
64089+ }
64090+
64091+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
64092 out:
64093 if (vvalue)
64094 vfree(vvalue);
64095@@ -377,7 +403,7 @@ retry:
64096 return error;
64097 error = mnt_want_write(path.mnt);
64098 if (!error) {
64099- error = setxattr(path.dentry, name, value, size, flags);
64100+ error = setxattr(&path, name, value, size, flags);
64101 mnt_drop_write(path.mnt);
64102 }
64103 path_put(&path);
64104@@ -401,7 +427,7 @@ retry:
64105 return error;
64106 error = mnt_want_write(path.mnt);
64107 if (!error) {
64108- error = setxattr(path.dentry, name, value, size, flags);
64109+ error = setxattr(&path, name, value, size, flags);
64110 mnt_drop_write(path.mnt);
64111 }
64112 path_put(&path);
64113@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
64114 const void __user *,value, size_t, size, int, flags)
64115 {
64116 struct fd f = fdget(fd);
64117- struct dentry *dentry;
64118 int error = -EBADF;
64119
64120 if (!f.file)
64121 return error;
64122- dentry = f.file->f_path.dentry;
64123- audit_inode(NULL, dentry, 0);
64124+ audit_inode(NULL, f.file->f_path.dentry, 0);
64125 error = mnt_want_write_file(f.file);
64126 if (!error) {
64127- error = setxattr(dentry, name, value, size, flags);
64128+ error = setxattr(&f.file->f_path, name, value, size, flags);
64129 mnt_drop_write_file(f.file);
64130 }
64131 fdput(f);
64132@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
64133 * Extended attribute REMOVE operations
64134 */
64135 static long
64136-removexattr(struct dentry *d, const char __user *name)
64137+removexattr(struct path *path, const char __user *name)
64138 {
64139 int error;
64140 char kname[XATTR_NAME_MAX + 1];
64141@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
64142 if (error < 0)
64143 return error;
64144
64145- return vfs_removexattr(d, kname);
64146+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
64147+ return -EACCES;
64148+
64149+ return vfs_removexattr(path->dentry, kname);
64150 }
64151
64152 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
64153@@ -652,7 +679,7 @@ retry:
64154 return error;
64155 error = mnt_want_write(path.mnt);
64156 if (!error) {
64157- error = removexattr(path.dentry, name);
64158+ error = removexattr(&path, name);
64159 mnt_drop_write(path.mnt);
64160 }
64161 path_put(&path);
64162@@ -675,7 +702,7 @@ retry:
64163 return error;
64164 error = mnt_want_write(path.mnt);
64165 if (!error) {
64166- error = removexattr(path.dentry, name);
64167+ error = removexattr(&path, name);
64168 mnt_drop_write(path.mnt);
64169 }
64170 path_put(&path);
64171@@ -689,16 +716,16 @@ retry:
64172 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
64173 {
64174 struct fd f = fdget(fd);
64175- struct dentry *dentry;
64176+ struct path *path;
64177 int error = -EBADF;
64178
64179 if (!f.file)
64180 return error;
64181- dentry = f.file->f_path.dentry;
64182- audit_inode(NULL, dentry, 0);
64183+ path = &f.file->f_path;
64184+ audit_inode(NULL, path->dentry, 0);
64185 error = mnt_want_write_file(f.file);
64186 if (!error) {
64187- error = removexattr(dentry, name);
64188+ error = removexattr(path, name);
64189 mnt_drop_write_file(f.file);
64190 }
64191 fdput(f);
64192diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
64193index 9fbea87..6b19972 100644
64194--- a/fs/xattr_acl.c
64195+++ b/fs/xattr_acl.c
64196@@ -76,8 +76,8 @@ struct posix_acl *
64197 posix_acl_from_xattr(struct user_namespace *user_ns,
64198 const void *value, size_t size)
64199 {
64200- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
64201- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
64202+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
64203+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
64204 int count;
64205 struct posix_acl *acl;
64206 struct posix_acl_entry *acl_e;
64207diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
64208index 3b2c14b..de031fe 100644
64209--- a/fs/xfs/xfs_bmap.c
64210+++ b/fs/xfs/xfs_bmap.c
64211@@ -584,7 +584,7 @@ xfs_bmap_validate_ret(
64212
64213 #else
64214 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
64215-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
64216+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
64217 #endif /* DEBUG */
64218
64219 /*
64220diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
64221index c4e50c6..8ba93e3 100644
64222--- a/fs/xfs/xfs_dir2_readdir.c
64223+++ b/fs/xfs/xfs_dir2_readdir.c
64224@@ -160,7 +160,12 @@ xfs_dir2_sf_getdents(
64225 ino = dp->d_ops->sf_get_ino(sfp, sfep);
64226 filetype = dp->d_ops->sf_get_ftype(sfep);
64227 ctx->pos = off & 0x7fffffff;
64228- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
64229+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
64230+ char name[sfep->namelen];
64231+ memcpy(name, sfep->name, sfep->namelen);
64232+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(mp, filetype)))
64233+ return 0;
64234+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
64235 xfs_dir3_get_dtype(mp, filetype)))
64236 return 0;
64237 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
64238diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
64239index 33ad9a7..82c18ba 100644
64240--- a/fs/xfs/xfs_ioctl.c
64241+++ b/fs/xfs/xfs_ioctl.c
64242@@ -126,7 +126,7 @@ xfs_find_handle(
64243 }
64244
64245 error = -EFAULT;
64246- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
64247+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
64248 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
64249 goto out_put;
64250
64251diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
64252index 104455b..764c512 100644
64253--- a/fs/xfs/xfs_iops.c
64254+++ b/fs/xfs/xfs_iops.c
64255@@ -397,7 +397,7 @@ xfs_vn_put_link(
64256 struct nameidata *nd,
64257 void *p)
64258 {
64259- char *s = nd_get_link(nd);
64260+ const char *s = nd_get_link(nd);
64261
64262 if (!IS_ERR(s))
64263 kfree(s);
64264diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
64265new file mode 100644
64266index 0000000..e98584b
64267--- /dev/null
64268+++ b/grsecurity/Kconfig
64269@@ -0,0 +1,1147 @@
64270+#
64271+# grecurity configuration
64272+#
64273+menu "Memory Protections"
64274+depends on GRKERNSEC
64275+
64276+config GRKERNSEC_KMEM
64277+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
64278+ default y if GRKERNSEC_CONFIG_AUTO
64279+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
64280+ help
64281+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
64282+ be written to or read from to modify or leak the contents of the running
64283+ kernel. /dev/port will also not be allowed to be opened, and support
64284+ for /dev/cpu/*/msr and kexec will be removed. If you have module
64285+ support disabled, enabling this will close up six ways that are
64286+ currently used to insert malicious code into the running kernel.
64287+
64288+ Even with this feature enabled, we still highly recommend that
64289+ you use the RBAC system, as it is still possible for an attacker to
64290+ modify the running kernel through other more obscure methods.
64291+
64292+ Enabling this feature will prevent the "cpupower" and "powertop" tools
64293+ from working.
64294+
64295+ It is highly recommended that you say Y here if you meet all the
64296+ conditions above.
64297+
64298+config GRKERNSEC_VM86
64299+ bool "Restrict VM86 mode"
64300+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
64301+ depends on X86_32
64302+
64303+ help
64304+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
64305+ make use of a special execution mode on 32bit x86 processors called
64306+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
64307+ video cards and will still work with this option enabled. The purpose
64308+ of the option is to prevent exploitation of emulation errors in
64309+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
64310+ Nearly all users should be able to enable this option.
64311+
64312+config GRKERNSEC_IO
64313+ bool "Disable privileged I/O"
64314+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
64315+ depends on X86
64316+ select RTC_CLASS
64317+ select RTC_INTF_DEV
64318+ select RTC_DRV_CMOS
64319+
64320+ help
64321+ If you say Y here, all ioperm and iopl calls will return an error.
64322+ Ioperm and iopl can be used to modify the running kernel.
64323+ Unfortunately, some programs need this access to operate properly,
64324+ the most notable of which are XFree86 and hwclock. hwclock can be
64325+ remedied by having RTC support in the kernel, so real-time
64326+ clock support is enabled if this option is enabled, to ensure
64327+ that hwclock operates correctly.
64328+
64329+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
64330+ you may not be able to boot into a graphical environment with this
64331+ option enabled. In this case, you should use the RBAC system instead.
64332+
64333+config GRKERNSEC_JIT_HARDEN
64334+ bool "Harden BPF JIT against spray attacks"
64335+ default y if GRKERNSEC_CONFIG_AUTO
64336+ depends on BPF_JIT && X86
64337+ help
64338+ If you say Y here, the native code generated by the kernel's Berkeley
64339+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
64340+ attacks that attempt to fit attacker-beneficial instructions in
64341+ 32bit immediate fields of JIT-generated native instructions. The
64342+ attacker will generally aim to cause an unintended instruction sequence
64343+ of JIT-generated native code to execute by jumping into the middle of
64344+ a generated instruction. This feature effectively randomizes the 32bit
64345+ immediate constants present in the generated code to thwart such attacks.
64346+
64347+ If you're using KERNEXEC, it's recommended that you enable this option
64348+ to supplement the hardening of the kernel.
64349+
64350+config GRKERNSEC_PERF_HARDEN
64351+ bool "Disable unprivileged PERF_EVENTS usage by default"
64352+ default y if GRKERNSEC_CONFIG_AUTO
64353+ depends on PERF_EVENTS
64354+ help
64355+ If you say Y here, the range of acceptable values for the
64356+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
64357+ default to a new value: 3. When the sysctl is set to this value, no
64358+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
64359+
64360+ Though PERF_EVENTS can be used legitimately for performance monitoring
64361+ and low-level application profiling, it is forced on regardless of
64362+ configuration, has been at fault for several vulnerabilities, and
64363+ creates new opportunities for side channels and other information leaks.
64364+
64365+ This feature puts PERF_EVENTS into a secure default state and permits
64366+ the administrator to change out of it temporarily if unprivileged
64367+ application profiling is needed.
64368+
64369+config GRKERNSEC_RAND_THREADSTACK
64370+ bool "Insert random gaps between thread stacks"
64371+ default y if GRKERNSEC_CONFIG_AUTO
64372+ depends on PAX_RANDMMAP && !PPC
64373+ help
64374+ If you say Y here, a random-sized gap will be enforced between allocated
64375+ thread stacks. Glibc's NPTL and other threading libraries that
64376+ pass MAP_STACK to the kernel for thread stack allocation are supported.
64377+ The implementation currently provides 8 bits of entropy for the gap.
64378+
64379+ Many distributions do not compile threaded remote services with the
64380+ -fstack-check argument to GCC, causing the variable-sized stack-based
64381+ allocator, alloca(), to not probe the stack on allocation. This
64382+ permits an unbounded alloca() to skip over any guard page and potentially
64383+ modify another thread's stack reliably. An enforced random gap
64384+ reduces the reliability of such an attack and increases the chance
64385+ that such a read/write to another thread's stack instead lands in
64386+ an unmapped area, causing a crash and triggering grsecurity's
64387+ anti-bruteforcing logic.
64388+
64389+config GRKERNSEC_PROC_MEMMAP
64390+ bool "Harden ASLR against information leaks and entropy reduction"
64391+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
64392+ depends on PAX_NOEXEC || PAX_ASLR
64393+ help
64394+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
64395+ give no information about the addresses of its mappings if
64396+ PaX features that rely on random addresses are enabled on the task.
64397+ In addition to sanitizing this information and disabling other
64398+ dangerous sources of information, this option causes reads of sensitive
64399+ /proc/<pid> entries where the file descriptor was opened in a different
64400+ task than the one performing the read. Such attempts are logged.
64401+ This option also limits argv/env strings for suid/sgid binaries
64402+ to 512KB to prevent a complete exhaustion of the stack entropy provided
64403+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
64404+ binaries to prevent alternative mmap layouts from being abused.
64405+
64406+ If you use PaX it is essential that you say Y here as it closes up
64407+ several holes that make full ASLR useless locally.
64408+
64409+config GRKERNSEC_BRUTE
64410+ bool "Deter exploit bruteforcing"
64411+ default y if GRKERNSEC_CONFIG_AUTO
64412+ help
64413+ If you say Y here, attempts to bruteforce exploits against forking
64414+ daemons such as apache or sshd, as well as against suid/sgid binaries
64415+ will be deterred. When a child of a forking daemon is killed by PaX
64416+ or crashes due to an illegal instruction or other suspicious signal,
64417+ the parent process will be delayed 30 seconds upon every subsequent
64418+ fork until the administrator is able to assess the situation and
64419+ restart the daemon.
64420+ In the suid/sgid case, the attempt is logged, the user has all their
64421+ existing instances of the suid/sgid binary terminated and will
64422+ be unable to execute any suid/sgid binaries for 15 minutes.
64423+
64424+ It is recommended that you also enable signal logging in the auditing
64425+ section so that logs are generated when a process triggers a suspicious
64426+ signal.
64427+ If the sysctl option is enabled, a sysctl option with name
64428+ "deter_bruteforce" is created.
64429+
64430+config GRKERNSEC_MODHARDEN
64431+ bool "Harden module auto-loading"
64432+ default y if GRKERNSEC_CONFIG_AUTO
64433+ depends on MODULES
64434+ help
64435+ If you say Y here, module auto-loading in response to use of some
64436+ feature implemented by an unloaded module will be restricted to
64437+ root users. Enabling this option helps defend against attacks
64438+ by unprivileged users who abuse the auto-loading behavior to
64439+ cause a vulnerable module to load that is then exploited.
64440+
64441+ If this option prevents a legitimate use of auto-loading for a
64442+ non-root user, the administrator can execute modprobe manually
64443+ with the exact name of the module mentioned in the alert log.
64444+ Alternatively, the administrator can add the module to the list
64445+ of modules loaded at boot by modifying init scripts.
64446+
64447+ Modification of init scripts will most likely be needed on
64448+ Ubuntu servers with encrypted home directory support enabled,
64449+ as the first non-root user logging in will cause the ecb(aes),
64450+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
64451+
64452+config GRKERNSEC_HIDESYM
64453+ bool "Hide kernel symbols"
64454+ default y if GRKERNSEC_CONFIG_AUTO
64455+ select PAX_USERCOPY_SLABS
64456+ help
64457+ If you say Y here, getting information on loaded modules, and
64458+ displaying all kernel symbols through a syscall will be restricted
64459+ to users with CAP_SYS_MODULE. For software compatibility reasons,
64460+ /proc/kallsyms will be restricted to the root user. The RBAC
64461+ system can hide that entry even from root.
64462+
64463+ This option also prevents leaking of kernel addresses through
64464+ several /proc entries.
64465+
64466+ Note that this option is only effective provided the following
64467+ conditions are met:
64468+ 1) The kernel using grsecurity is not precompiled by some distribution
64469+ 2) You have also enabled GRKERNSEC_DMESG
64470+ 3) You are using the RBAC system and hiding other files such as your
64471+ kernel image and System.map. Alternatively, enabling this option
64472+ causes the permissions on /boot, /lib/modules, and the kernel
64473+ source directory to change at compile time to prevent
64474+ reading by non-root users.
64475+ If the above conditions are met, this option will aid in providing a
64476+ useful protection against local kernel exploitation of overflows
64477+ and arbitrary read/write vulnerabilities.
64478+
64479+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
64480+ in addition to this feature.
64481+
64482+config GRKERNSEC_RANDSTRUCT
64483+ bool "Randomize layout of sensitive kernel structures"
64484+ default y if GRKERNSEC_CONFIG_AUTO
64485+ select GRKERNSEC_HIDESYM
64486+ select MODVERSIONS if MODULES
64487+ help
64488+ If you say Y here, the layouts of a number of sensitive kernel
64489+ structures (task, fs, cred, etc) and all structures composed entirely
64490+ of function pointers (aka "ops" structs) will be randomized at compile-time.
64491+ This can introduce the requirement of an additional infoleak
64492+ vulnerability for exploits targeting these structure types.
64493+
64494+ Enabling this feature will introduce some performance impact, slightly
64495+ increase memory usage, and prevent the use of forensic tools like
64496+ Volatility against the system (unless the kernel source tree isn't
64497+ cleaned after kernel installation).
64498+
64499+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
64500+ It remains after a make clean to allow for external modules to be compiled
64501+ with the existing seed and will be removed by a make mrproper or
64502+ make distclean.
64503+
64504+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
64505+ bool "Use cacheline-aware structure randomization"
64506+ depends on GRKERNSEC_RANDSTRUCT
64507+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
64508+ help
64509+ If you say Y here, the RANDSTRUCT randomization will make a best effort
64510+ at restricting randomization to cacheline-sized groups of elements. It
64511+ will further not randomize bitfields in structures. This reduces the
64512+ performance hit of RANDSTRUCT at the cost of weakened randomization.
64513+
64514+config GRKERNSEC_KERN_LOCKOUT
64515+ bool "Active kernel exploit response"
64516+ default y if GRKERNSEC_CONFIG_AUTO
64517+ depends on X86 || ARM || PPC || SPARC
64518+ help
64519+ If you say Y here, when a PaX alert is triggered due to suspicious
64520+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
64521+ or an OOPS occurs due to bad memory accesses, instead of just
64522+ terminating the offending process (and potentially allowing
64523+ a subsequent exploit from the same user), we will take one of two
64524+ actions:
64525+ If the user was root, we will panic the system
64526+ If the user was non-root, we will log the attempt, terminate
64527+ all processes owned by the user, then prevent them from creating
64528+ any new processes until the system is restarted
64529+ This deters repeated kernel exploitation/bruteforcing attempts
64530+ and is useful for later forensics.
64531+
64532+config GRKERNSEC_OLD_ARM_USERLAND
64533+ bool "Old ARM userland compatibility"
64534+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
64535+ help
64536+ If you say Y here, stubs of executable code to perform such operations
64537+ as "compare-exchange" will be placed at fixed locations in the ARM vector
64538+ table. This is unfortunately needed for old ARM userland meant to run
64539+ across a wide range of processors. Without this option enabled,
64540+ the get_tls and data memory barrier stubs will be emulated by the kernel,
64541+ which is enough for Linaro userlands or other userlands designed for v6
64542+ and newer ARM CPUs. It's recommended that you try without this option enabled
64543+ first, and only enable it if your userland does not boot (it will likely fail
64544+ at init time).
64545+
64546+endmenu
64547+menu "Role Based Access Control Options"
64548+depends on GRKERNSEC
64549+
64550+config GRKERNSEC_RBAC_DEBUG
64551+ bool
64552+
64553+config GRKERNSEC_NO_RBAC
64554+ bool "Disable RBAC system"
64555+ help
64556+ If you say Y here, the /dev/grsec device will be removed from the kernel,
64557+ preventing the RBAC system from being enabled. You should only say Y
64558+ here if you have no intention of using the RBAC system, so as to prevent
64559+ an attacker with root access from misusing the RBAC system to hide files
64560+ and processes when loadable module support and /dev/[k]mem have been
64561+ locked down.
64562+
64563+config GRKERNSEC_ACL_HIDEKERN
64564+ bool "Hide kernel processes"
64565+ help
64566+ If you say Y here, all kernel threads will be hidden to all
64567+ processes but those whose subject has the "view hidden processes"
64568+ flag.
64569+
64570+config GRKERNSEC_ACL_MAXTRIES
64571+ int "Maximum tries before password lockout"
64572+ default 3
64573+ help
64574+ This option enforces the maximum number of times a user can attempt
64575+ to authorize themselves with the grsecurity RBAC system before being
64576+ denied the ability to attempt authorization again for a specified time.
64577+ The lower the number, the harder it will be to brute-force a password.
64578+
64579+config GRKERNSEC_ACL_TIMEOUT
64580+ int "Time to wait after max password tries, in seconds"
64581+ default 30
64582+ help
64583+ This option specifies the time the user must wait after attempting to
64584+ authorize to the RBAC system with the maximum number of invalid
64585+ passwords. The higher the number, the harder it will be to brute-force
64586+ a password.
64587+
64588+endmenu
64589+menu "Filesystem Protections"
64590+depends on GRKERNSEC
64591+
64592+config GRKERNSEC_PROC
64593+ bool "Proc restrictions"
64594+ default y if GRKERNSEC_CONFIG_AUTO
64595+ help
64596+ If you say Y here, the permissions of the /proc filesystem
64597+ will be altered to enhance system security and privacy. You MUST
64598+ choose either a user only restriction or a user and group restriction.
64599+ Depending upon the option you choose, you can either restrict users to
64600+ see only the processes they themselves run, or choose a group that can
64601+ view all processes and files normally restricted to root if you choose
64602+ the "restrict to user only" option. NOTE: If you're running identd or
64603+ ntpd as a non-root user, you will have to run it as the group you
64604+ specify here.
64605+
64606+config GRKERNSEC_PROC_USER
64607+ bool "Restrict /proc to user only"
64608+ depends on GRKERNSEC_PROC
64609+ help
64610+ If you say Y here, non-root users will only be able to view their own
64611+ processes, and restricts them from viewing network-related information,
64612+ and viewing kernel symbol and module information.
64613+
64614+config GRKERNSEC_PROC_USERGROUP
64615+ bool "Allow special group"
64616+ default y if GRKERNSEC_CONFIG_AUTO
64617+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
64618+ help
64619+ If you say Y here, you will be able to select a group that will be
64620+ able to view all processes and network-related information. If you've
64621+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
64622+ remain hidden. This option is useful if you want to run identd as
64623+ a non-root user. The group you select may also be chosen at boot time
64624+ via "grsec_proc_gid=" on the kernel commandline.
64625+
64626+config GRKERNSEC_PROC_GID
64627+ int "GID for special group"
64628+ depends on GRKERNSEC_PROC_USERGROUP
64629+ default 1001
64630+
64631+config GRKERNSEC_PROC_ADD
64632+ bool "Additional restrictions"
64633+ default y if GRKERNSEC_CONFIG_AUTO
64634+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
64635+ help
64636+ If you say Y here, additional restrictions will be placed on
64637+ /proc that keep normal users from viewing device information and
64638+ slabinfo information that could be useful for exploits.
64639+
64640+config GRKERNSEC_LINK
64641+ bool "Linking restrictions"
64642+ default y if GRKERNSEC_CONFIG_AUTO
64643+ help
64644+ If you say Y here, /tmp race exploits will be prevented, since users
64645+ will no longer be able to follow symlinks owned by other users in
64646+ world-writable +t directories (e.g. /tmp), unless the owner of the
64647+ symlink is the owner of the directory. users will also not be
64648+ able to hardlink to files they do not own. If the sysctl option is
64649+ enabled, a sysctl option with name "linking_restrictions" is created.
64650+
64651+config GRKERNSEC_SYMLINKOWN
64652+ bool "Kernel-enforced SymlinksIfOwnerMatch"
64653+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
64654+ help
64655+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
64656+ that prevents it from being used as a security feature. As Apache
64657+ verifies the symlink by performing a stat() against the target of
64658+ the symlink before it is followed, an attacker can setup a symlink
64659+ to point to a same-owned file, then replace the symlink with one
64660+ that targets another user's file just after Apache "validates" the
64661+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
64662+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
64663+ will be in place for the group you specify. If the sysctl option
64664+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
64665+ created.
64666+
64667+config GRKERNSEC_SYMLINKOWN_GID
64668+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
64669+ depends on GRKERNSEC_SYMLINKOWN
64670+ default 1006
64671+ help
64672+ Setting this GID determines what group kernel-enforced
64673+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
64674+ is enabled, a sysctl option with name "symlinkown_gid" is created.
64675+
64676+config GRKERNSEC_FIFO
64677+ bool "FIFO restrictions"
64678+ default y if GRKERNSEC_CONFIG_AUTO
64679+ help
64680+ If you say Y here, users will not be able to write to FIFOs they don't
64681+ own in world-writable +t directories (e.g. /tmp), unless the owner of
64682+ the FIFO is the same owner of the directory it's held in. If the sysctl
64683+ option is enabled, a sysctl option with name "fifo_restrictions" is
64684+ created.
64685+
64686+config GRKERNSEC_SYSFS_RESTRICT
64687+ bool "Sysfs/debugfs restriction"
64688+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
64689+ depends on SYSFS
64690+ help
64691+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
64692+ any filesystem normally mounted under it (e.g. debugfs) will be
64693+ mostly accessible only by root. These filesystems generally provide access
64694+ to hardware and debug information that isn't appropriate for unprivileged
64695+ users of the system. Sysfs and debugfs have also become a large source
64696+ of new vulnerabilities, ranging from infoleaks to local compromise.
64697+ There has been very little oversight with an eye toward security involved
64698+ in adding new exporters of information to these filesystems, so their
64699+ use is discouraged.
64700+ For reasons of compatibility, a few directories have been whitelisted
64701+ for access by non-root users:
64702+ /sys/fs/selinux
64703+ /sys/fs/fuse
64704+ /sys/devices/system/cpu
64705+
64706+config GRKERNSEC_ROFS
64707+ bool "Runtime read-only mount protection"
64708+ depends on SYSCTL
64709+ help
64710+ If you say Y here, a sysctl option with name "romount_protect" will
64711+ be created. By setting this option to 1 at runtime, filesystems
64712+ will be protected in the following ways:
64713+ * No new writable mounts will be allowed
64714+ * Existing read-only mounts won't be able to be remounted read/write
64715+ * Write operations will be denied on all block devices
64716+ This option acts independently of grsec_lock: once it is set to 1,
64717+ it cannot be turned off. Therefore, please be mindful of the resulting
64718+ behavior if this option is enabled in an init script on a read-only
64719+ filesystem.
64720+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
64721+ and GRKERNSEC_IO should be enabled and module loading disabled via
64722+ config or at runtime.
64723+ This feature is mainly intended for secure embedded systems.
64724+
64725+
64726+config GRKERNSEC_DEVICE_SIDECHANNEL
64727+ bool "Eliminate stat/notify-based device sidechannels"
64728+ default y if GRKERNSEC_CONFIG_AUTO
64729+ help
64730+ If you say Y here, timing analyses on block or character
64731+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
64732+ will be thwarted for unprivileged users. If a process without
64733+ CAP_MKNOD stats such a device, the last access and last modify times
64734+ will match the device's create time. No access or modify events
64735+ will be triggered through inotify/dnotify/fanotify for such devices.
64736+ This feature will prevent attacks that may at a minimum
64737+ allow an attacker to determine the administrator's password length.
64738+
64739+config GRKERNSEC_CHROOT
64740+ bool "Chroot jail restrictions"
64741+ default y if GRKERNSEC_CONFIG_AUTO
64742+ help
64743+ If you say Y here, you will be able to choose several options that will
64744+ make breaking out of a chrooted jail much more difficult. If you
64745+ encounter no software incompatibilities with the following options, it
64746+ is recommended that you enable each one.
64747+
64748+config GRKERNSEC_CHROOT_MOUNT
64749+ bool "Deny mounts"
64750+ default y if GRKERNSEC_CONFIG_AUTO
64751+ depends on GRKERNSEC_CHROOT
64752+ help
64753+ If you say Y here, processes inside a chroot will not be able to
64754+ mount or remount filesystems. If the sysctl option is enabled, a
64755+ sysctl option with name "chroot_deny_mount" is created.
64756+
64757+config GRKERNSEC_CHROOT_DOUBLE
64758+ bool "Deny double-chroots"
64759+ default y if GRKERNSEC_CONFIG_AUTO
64760+ depends on GRKERNSEC_CHROOT
64761+ help
64762+ If you say Y here, processes inside a chroot will not be able to chroot
64763+ again outside the chroot. This is a widely used method of breaking
64764+ out of a chroot jail and should not be allowed. If the sysctl
64765+ option is enabled, a sysctl option with name
64766+ "chroot_deny_chroot" is created.
64767+
64768+config GRKERNSEC_CHROOT_PIVOT
64769+ bool "Deny pivot_root in chroot"
64770+ default y if GRKERNSEC_CONFIG_AUTO
64771+ depends on GRKERNSEC_CHROOT
64772+ help
64773+ If you say Y here, processes inside a chroot will not be able to use
64774+ a function called pivot_root() that was introduced in Linux 2.3.41. It
64775+ works similar to chroot in that it changes the root filesystem. This
64776+ function could be misused in a chrooted process to attempt to break out
64777+ of the chroot, and therefore should not be allowed. If the sysctl
64778+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
64779+ created.
64780+
64781+config GRKERNSEC_CHROOT_CHDIR
64782+ bool "Enforce chdir(\"/\") on all chroots"
64783+ default y if GRKERNSEC_CONFIG_AUTO
64784+ depends on GRKERNSEC_CHROOT
64785+ help
64786+ If you say Y here, the current working directory of all newly-chrooted
64787+ applications will be set to the the root directory of the chroot.
64788+ The man page on chroot(2) states:
64789+ Note that this call does not change the current working
64790+ directory, so that `.' can be outside the tree rooted at
64791+ `/'. In particular, the super-user can escape from a
64792+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
64793+
64794+ It is recommended that you say Y here, since it's not known to break
64795+ any software. If the sysctl option is enabled, a sysctl option with
64796+ name "chroot_enforce_chdir" is created.
64797+
64798+config GRKERNSEC_CHROOT_CHMOD
64799+ bool "Deny (f)chmod +s"
64800+ default y if GRKERNSEC_CONFIG_AUTO
64801+ depends on GRKERNSEC_CHROOT
64802+ help
64803+ If you say Y here, processes inside a chroot will not be able to chmod
64804+ or fchmod files to make them have suid or sgid bits. This protects
64805+ against another published method of breaking a chroot. If the sysctl
64806+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
64807+ created.
64808+
64809+config GRKERNSEC_CHROOT_FCHDIR
64810+ bool "Deny fchdir out of chroot"
64811+ default y if GRKERNSEC_CONFIG_AUTO
64812+ depends on GRKERNSEC_CHROOT
64813+ help
64814+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
64815+ to a file descriptor of the chrooting process that points to a directory
64816+ outside the filesystem will be stopped. If the sysctl option
64817+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
64818+
64819+config GRKERNSEC_CHROOT_MKNOD
64820+ bool "Deny mknod"
64821+ default y if GRKERNSEC_CONFIG_AUTO
64822+ depends on GRKERNSEC_CHROOT
64823+ help
64824+ If you say Y here, processes inside a chroot will not be allowed to
64825+ mknod. The problem with using mknod inside a chroot is that it
64826+ would allow an attacker to create a device entry that is the same
64827+ as one on the physical root of your system, which could range from
64828+ anything from the console device to a device for your harddrive (which
64829+ they could then use to wipe the drive or steal data). It is recommended
64830+ that you say Y here, unless you run into software incompatibilities.
64831+ If the sysctl option is enabled, a sysctl option with name
64832+ "chroot_deny_mknod" is created.
64833+
64834+config GRKERNSEC_CHROOT_SHMAT
64835+ bool "Deny shmat() out of chroot"
64836+ default y if GRKERNSEC_CONFIG_AUTO
64837+ depends on GRKERNSEC_CHROOT
64838+ help
64839+ If you say Y here, processes inside a chroot will not be able to attach
64840+ to shared memory segments that were created outside of the chroot jail.
64841+ It is recommended that you say Y here. If the sysctl option is enabled,
64842+ a sysctl option with name "chroot_deny_shmat" is created.
64843+
64844+config GRKERNSEC_CHROOT_UNIX
64845+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
64846+ default y if GRKERNSEC_CONFIG_AUTO
64847+ depends on GRKERNSEC_CHROOT
64848+ help
64849+ If you say Y here, processes inside a chroot will not be able to
64850+ connect to abstract (meaning not belonging to a filesystem) Unix
64851+ domain sockets that were bound outside of a chroot. It is recommended
64852+ that you say Y here. If the sysctl option is enabled, a sysctl option
64853+ with name "chroot_deny_unix" is created.
64854+
64855+config GRKERNSEC_CHROOT_FINDTASK
64856+ bool "Protect outside processes"
64857+ default y if GRKERNSEC_CONFIG_AUTO
64858+ depends on GRKERNSEC_CHROOT
64859+ help
64860+ If you say Y here, processes inside a chroot will not be able to
64861+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
64862+ getsid, or view any process outside of the chroot. If the sysctl
64863+ option is enabled, a sysctl option with name "chroot_findtask" is
64864+ created.
64865+
64866+config GRKERNSEC_CHROOT_NICE
64867+ bool "Restrict priority changes"
64868+ default y if GRKERNSEC_CONFIG_AUTO
64869+ depends on GRKERNSEC_CHROOT
64870+ help
64871+ If you say Y here, processes inside a chroot will not be able to raise
64872+ the priority of processes in the chroot, or alter the priority of
64873+ processes outside the chroot. This provides more security than simply
64874+ removing CAP_SYS_NICE from the process' capability set. If the
64875+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
64876+ is created.
64877+
64878+config GRKERNSEC_CHROOT_SYSCTL
64879+ bool "Deny sysctl writes"
64880+ default y if GRKERNSEC_CONFIG_AUTO
64881+ depends on GRKERNSEC_CHROOT
64882+ help
64883+ If you say Y here, an attacker in a chroot will not be able to
64884+ write to sysctl entries, either by sysctl(2) or through a /proc
64885+ interface. It is strongly recommended that you say Y here. If the
64886+ sysctl option is enabled, a sysctl option with name
64887+ "chroot_deny_sysctl" is created.
64888+
64889+config GRKERNSEC_CHROOT_CAPS
64890+ bool "Capability restrictions"
64891+ default y if GRKERNSEC_CONFIG_AUTO
64892+ depends on GRKERNSEC_CHROOT
64893+ help
64894+ If you say Y here, the capabilities on all processes within a
64895+ chroot jail will be lowered to stop module insertion, raw i/o,
64896+ system and net admin tasks, rebooting the system, modifying immutable
64897+ files, modifying IPC owned by another, and changing the system time.
64898+ This is left an option because it can break some apps. Disable this
64899+ if your chrooted apps are having problems performing those kinds of
64900+ tasks. If the sysctl option is enabled, a sysctl option with
64901+ name "chroot_caps" is created.
64902+
64903+config GRKERNSEC_CHROOT_INITRD
64904+ bool "Exempt initrd tasks from restrictions"
64905+ default y if GRKERNSEC_CONFIG_AUTO
64906+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
64907+ help
64908+ If you say Y here, tasks started prior to init will be exempted from
64909+ grsecurity's chroot restrictions. This option is mainly meant to
64910+ resolve Plymouth's performing privileged operations unnecessarily
64911+ in a chroot.
64912+
64913+endmenu
64914+menu "Kernel Auditing"
64915+depends on GRKERNSEC
64916+
64917+config GRKERNSEC_AUDIT_GROUP
64918+ bool "Single group for auditing"
64919+ help
64920+ If you say Y here, the exec and chdir logging features will only operate
64921+ on a group you specify. This option is recommended if you only want to
64922+ watch certain users instead of having a large amount of logs from the
64923+ entire system. If the sysctl option is enabled, a sysctl option with
64924+ name "audit_group" is created.
64925+
64926+config GRKERNSEC_AUDIT_GID
64927+ int "GID for auditing"
64928+ depends on GRKERNSEC_AUDIT_GROUP
64929+ default 1007
64930+
64931+config GRKERNSEC_EXECLOG
64932+ bool "Exec logging"
64933+ help
64934+ If you say Y here, all execve() calls will be logged (since the
64935+ other exec*() calls are frontends to execve(), all execution
64936+ will be logged). Useful for shell-servers that like to keep track
64937+ of their users. If the sysctl option is enabled, a sysctl option with
64938+ name "exec_logging" is created.
64939+ WARNING: This option when enabled will produce a LOT of logs, especially
64940+ on an active system.
64941+
64942+config GRKERNSEC_RESLOG
64943+ bool "Resource logging"
64944+ default y if GRKERNSEC_CONFIG_AUTO
64945+ help
64946+ If you say Y here, all attempts to overstep resource limits will
64947+ be logged with the resource name, the requested size, and the current
64948+ limit. It is highly recommended that you say Y here. If the sysctl
64949+ option is enabled, a sysctl option with name "resource_logging" is
64950+ created. If the RBAC system is enabled, the sysctl value is ignored.
64951+
64952+config GRKERNSEC_CHROOT_EXECLOG
64953+ bool "Log execs within chroot"
64954+ help
64955+ If you say Y here, all executions inside a chroot jail will be logged
64956+ to syslog. This can cause a large amount of logs if certain
64957+ applications (eg. djb's daemontools) are installed on the system, and
64958+ is therefore left as an option. If the sysctl option is enabled, a
64959+ sysctl option with name "chroot_execlog" is created.
64960+
64961+config GRKERNSEC_AUDIT_PTRACE
64962+ bool "Ptrace logging"
64963+ help
64964+ If you say Y here, all attempts to attach to a process via ptrace
64965+ will be logged. If the sysctl option is enabled, a sysctl option
64966+ with name "audit_ptrace" is created.
64967+
64968+config GRKERNSEC_AUDIT_CHDIR
64969+ bool "Chdir logging"
64970+ help
64971+ If you say Y here, all chdir() calls will be logged. If the sysctl
64972+ option is enabled, a sysctl option with name "audit_chdir" is created.
64973+
64974+config GRKERNSEC_AUDIT_MOUNT
64975+ bool "(Un)Mount logging"
64976+ help
64977+ If you say Y here, all mounts and unmounts will be logged. If the
64978+ sysctl option is enabled, a sysctl option with name "audit_mount" is
64979+ created.
64980+
64981+config GRKERNSEC_SIGNAL
64982+ bool "Signal logging"
64983+ default y if GRKERNSEC_CONFIG_AUTO
64984+ help
64985+ If you say Y here, certain important signals will be logged, such as
64986+ SIGSEGV, which will as a result inform you of when a error in a program
64987+ occurred, which in some cases could mean a possible exploit attempt.
64988+ If the sysctl option is enabled, a sysctl option with name
64989+ "signal_logging" is created.
64990+
64991+config GRKERNSEC_FORKFAIL
64992+ bool "Fork failure logging"
64993+ help
64994+ If you say Y here, all failed fork() attempts will be logged.
64995+ This could suggest a fork bomb, or someone attempting to overstep
64996+ their process limit. If the sysctl option is enabled, a sysctl option
64997+ with name "forkfail_logging" is created.
64998+
64999+config GRKERNSEC_TIME
65000+ bool "Time change logging"
65001+ default y if GRKERNSEC_CONFIG_AUTO
65002+ help
65003+ If you say Y here, any changes of the system clock will be logged.
65004+ If the sysctl option is enabled, a sysctl option with name
65005+ "timechange_logging" is created.
65006+
65007+config GRKERNSEC_PROC_IPADDR
65008+ bool "/proc/<pid>/ipaddr support"
65009+ default y if GRKERNSEC_CONFIG_AUTO
65010+ help
65011+ If you say Y here, a new entry will be added to each /proc/<pid>
65012+ directory that contains the IP address of the person using the task.
65013+ The IP is carried across local TCP and AF_UNIX stream sockets.
65014+ This information can be useful for IDS/IPSes to perform remote response
65015+ to a local attack. The entry is readable by only the owner of the
65016+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
65017+ the RBAC system), and thus does not create privacy concerns.
65018+
65019+config GRKERNSEC_RWXMAP_LOG
65020+ bool 'Denied RWX mmap/mprotect logging'
65021+ default y if GRKERNSEC_CONFIG_AUTO
65022+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
65023+ help
65024+ If you say Y here, calls to mmap() and mprotect() with explicit
65025+ usage of PROT_WRITE and PROT_EXEC together will be logged when
65026+ denied by the PAX_MPROTECT feature. This feature will also
65027+ log other problematic scenarios that can occur when PAX_MPROTECT
65028+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
65029+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
65030+ is created.
65031+
65032+endmenu
65033+
65034+menu "Executable Protections"
65035+depends on GRKERNSEC
65036+
65037+config GRKERNSEC_DMESG
65038+ bool "Dmesg(8) restriction"
65039+ default y if GRKERNSEC_CONFIG_AUTO
65040+ help
65041+ If you say Y here, non-root users will not be able to use dmesg(8)
65042+ to view the contents of the kernel's circular log buffer.
65043+ The kernel's log buffer often contains kernel addresses and other
65044+ identifying information useful to an attacker in fingerprinting a
65045+ system for a targeted exploit.
65046+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
65047+ created.
65048+
65049+config GRKERNSEC_HARDEN_PTRACE
65050+ bool "Deter ptrace-based process snooping"
65051+ default y if GRKERNSEC_CONFIG_AUTO
65052+ help
65053+ If you say Y here, TTY sniffers and other malicious monitoring
65054+ programs implemented through ptrace will be defeated. If you
65055+ have been using the RBAC system, this option has already been
65056+ enabled for several years for all users, with the ability to make
65057+ fine-grained exceptions.
65058+
65059+ This option only affects the ability of non-root users to ptrace
65060+ processes that are not a descendent of the ptracing process.
65061+ This means that strace ./binary and gdb ./binary will still work,
65062+ but attaching to arbitrary processes will not. If the sysctl
65063+ option is enabled, a sysctl option with name "harden_ptrace" is
65064+ created.
65065+
65066+config GRKERNSEC_PTRACE_READEXEC
65067+ bool "Require read access to ptrace sensitive binaries"
65068+ default y if GRKERNSEC_CONFIG_AUTO
65069+ help
65070+ If you say Y here, unprivileged users will not be able to ptrace unreadable
65071+ binaries. This option is useful in environments that
65072+ remove the read bits (e.g. file mode 4711) from suid binaries to
65073+ prevent infoleaking of their contents. This option adds
65074+ consistency to the use of that file mode, as the binary could normally
65075+ be read out when run without privileges while ptracing.
65076+
65077+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
65078+ is created.
65079+
65080+config GRKERNSEC_SETXID
65081+ bool "Enforce consistent multithreaded privileges"
65082+ default y if GRKERNSEC_CONFIG_AUTO
65083+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
65084+ help
65085+ If you say Y here, a change from a root uid to a non-root uid
65086+ in a multithreaded application will cause the resulting uids,
65087+ gids, supplementary groups, and capabilities in that thread
65088+ to be propagated to the other threads of the process. In most
65089+ cases this is unnecessary, as glibc will emulate this behavior
65090+ on behalf of the application. Other libcs do not act in the
65091+ same way, allowing the other threads of the process to continue
65092+ running with root privileges. If the sysctl option is enabled,
65093+ a sysctl option with name "consistent_setxid" is created.
65094+
65095+config GRKERNSEC_HARDEN_IPC
65096+ bool "Disallow access to overly-permissive IPC objects"
65097+ default y if GRKERNSEC_CONFIG_AUTO
65098+ depends on SYSVIPC
65099+ help
65100+ If you say Y here, access to overly-permissive IPC objects (shared
65101+ memory, message queues, and semaphores) will be denied for processes
65102+ given the following criteria beyond normal permission checks:
65103+ 1) If the IPC object is world-accessible and the euid doesn't match
65104+ that of the creator or current uid for the IPC object
65105+ 2) If the IPC object is group-accessible and the egid doesn't
65106+ match that of the creator or current gid for the IPC object
65107+ It's a common error to grant too much permission to these objects,
65108+ with impact ranging from denial of service and information leaking to
65109+ privilege escalation. This feature was developed in response to
65110+ research by Tim Brown:
65111+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
65112+ who found hundreds of such insecure usages. Processes with
65113+ CAP_IPC_OWNER are still permitted to access these IPC objects.
65114+ If the sysctl option is enabled, a sysctl option with name
65115+ "harden_ipc" is created.
65116+
65117+config GRKERNSEC_TPE
65118+ bool "Trusted Path Execution (TPE)"
65119+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
65120+ help
65121+ If you say Y here, you will be able to choose a gid to add to the
65122+ supplementary groups of users you want to mark as "untrusted."
65123+ These users will not be able to execute any files that are not in
65124+ root-owned directories writable only by root. If the sysctl option
65125+ is enabled, a sysctl option with name "tpe" is created.
65126+
65127+config GRKERNSEC_TPE_ALL
65128+ bool "Partially restrict all non-root users"
65129+ depends on GRKERNSEC_TPE
65130+ help
65131+ If you say Y here, all non-root users will be covered under
65132+ a weaker TPE restriction. This is separate from, and in addition to,
65133+ the main TPE options that you have selected elsewhere. Thus, if a
65134+ "trusted" GID is chosen, this restriction applies to even that GID.
65135+ Under this restriction, all non-root users will only be allowed to
65136+ execute files in directories they own that are not group or
65137+ world-writable, or in directories owned by root and writable only by
65138+ root. If the sysctl option is enabled, a sysctl option with name
65139+ "tpe_restrict_all" is created.
65140+
65141+config GRKERNSEC_TPE_INVERT
65142+ bool "Invert GID option"
65143+ depends on GRKERNSEC_TPE
65144+ help
65145+ If you say Y here, the group you specify in the TPE configuration will
65146+ decide what group TPE restrictions will be *disabled* for. This
65147+ option is useful if you want TPE restrictions to be applied to most
65148+ users on the system. If the sysctl option is enabled, a sysctl option
65149+ with name "tpe_invert" is created. Unlike other sysctl options, this
65150+ entry will default to on for backward-compatibility.
65151+
65152+config GRKERNSEC_TPE_GID
65153+ int
65154+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
65155+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
65156+
65157+config GRKERNSEC_TPE_UNTRUSTED_GID
65158+ int "GID for TPE-untrusted users"
65159+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
65160+ default 1005
65161+ help
65162+ Setting this GID determines what group TPE restrictions will be
65163+ *enabled* for. If the sysctl option is enabled, a sysctl option
65164+ with name "tpe_gid" is created.
65165+
65166+config GRKERNSEC_TPE_TRUSTED_GID
65167+ int "GID for TPE-trusted users"
65168+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
65169+ default 1005
65170+ help
65171+ Setting this GID determines what group TPE restrictions will be
65172+ *disabled* for. If the sysctl option is enabled, a sysctl option
65173+ with name "tpe_gid" is created.
65174+
65175+endmenu
65176+menu "Network Protections"
65177+depends on GRKERNSEC
65178+
65179+config GRKERNSEC_RANDNET
65180+ bool "Larger entropy pools"
65181+ default y if GRKERNSEC_CONFIG_AUTO
65182+ help
65183+ If you say Y here, the entropy pools used for many features of Linux
65184+ and grsecurity will be doubled in size. Since several grsecurity
65185+ features use additional randomness, it is recommended that you say Y
65186+ here. Saying Y here has a similar effect as modifying
65187+ /proc/sys/kernel/random/poolsize.
65188+
65189+config GRKERNSEC_BLACKHOLE
65190+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
65191+ default y if GRKERNSEC_CONFIG_AUTO
65192+ depends on NET
65193+ help
65194+ If you say Y here, neither TCP resets nor ICMP
65195+ destination-unreachable packets will be sent in response to packets
65196+ sent to ports for which no associated listening process exists.
65197+ This feature supports both IPV4 and IPV6 and exempts the
65198+ loopback interface from blackholing. Enabling this feature
65199+ makes a host more resilient to DoS attacks and reduces network
65200+ visibility against scanners.
65201+
65202+ The blackhole feature as-implemented is equivalent to the FreeBSD
65203+ blackhole feature, as it prevents RST responses to all packets, not
65204+ just SYNs. Under most application behavior this causes no
65205+ problems, but applications (like haproxy) may not close certain
65206+ connections in a way that cleanly terminates them on the remote
65207+ end, leaving the remote host in LAST_ACK state. Because of this
65208+ side-effect and to prevent intentional LAST_ACK DoSes, this
65209+ feature also adds automatic mitigation against such attacks.
65210+ The mitigation drastically reduces the amount of time a socket
65211+ can spend in LAST_ACK state. If you're using haproxy and not
65212+ all servers it connects to have this option enabled, consider
65213+ disabling this feature on the haproxy host.
65214+
65215+ If the sysctl option is enabled, two sysctl options with names
65216+ "ip_blackhole" and "lastack_retries" will be created.
65217+ While "ip_blackhole" takes the standard zero/non-zero on/off
65218+ toggle, "lastack_retries" uses the same kinds of values as
65219+ "tcp_retries1" and "tcp_retries2". The default value of 4
65220+ prevents a socket from lasting more than 45 seconds in LAST_ACK
65221+ state.
65222+
65223+config GRKERNSEC_NO_SIMULT_CONNECT
65224+ bool "Disable TCP Simultaneous Connect"
65225+ default y if GRKERNSEC_CONFIG_AUTO
65226+ depends on NET
65227+ help
65228+ If you say Y here, a feature by Willy Tarreau will be enabled that
65229+ removes a weakness in Linux's strict implementation of TCP that
65230+ allows two clients to connect to each other without either entering
65231+ a listening state. The weakness allows an attacker to easily prevent
65232+ a client from connecting to a known server provided the source port
65233+ for the connection is guessed correctly.
65234+
65235+ As the weakness could be used to prevent an antivirus or IPS from
65236+ fetching updates, or prevent an SSL gateway from fetching a CRL,
65237+ it should be eliminated by enabling this option. Though Linux is
65238+ one of few operating systems supporting simultaneous connect, it
65239+ has no legitimate use in practice and is rarely supported by firewalls.
65240+
65241+config GRKERNSEC_SOCKET
65242+ bool "Socket restrictions"
65243+ depends on NET
65244+ help
65245+ If you say Y here, you will be able to choose from several options.
65246+ If you assign a GID on your system and add it to the supplementary
65247+ groups of users you want to restrict socket access to, this patch
65248+ will perform up to three things, based on the option(s) you choose.
65249+
65250+config GRKERNSEC_SOCKET_ALL
65251+ bool "Deny any sockets to group"
65252+ depends on GRKERNSEC_SOCKET
65253+ help
65254+ If you say Y here, you will be able to choose a GID of whose users will
65255+ be unable to connect to other hosts from your machine or run server
65256+ applications from your machine. If the sysctl option is enabled, a
65257+ sysctl option with name "socket_all" is created.
65258+
65259+config GRKERNSEC_SOCKET_ALL_GID
65260+ int "GID to deny all sockets for"
65261+ depends on GRKERNSEC_SOCKET_ALL
65262+ default 1004
65263+ help
65264+ Here you can choose the GID to disable socket access for. Remember to
65265+ add the users you want socket access disabled for to the GID
65266+ specified here. If the sysctl option is enabled, a sysctl option
65267+ with name "socket_all_gid" is created.
65268+
65269+config GRKERNSEC_SOCKET_CLIENT
65270+ bool "Deny client sockets to group"
65271+ depends on GRKERNSEC_SOCKET
65272+ help
65273+ If you say Y here, you will be able to choose a GID of whose users will
65274+ be unable to connect to other hosts from your machine, but will be
65275+ able to run servers. If this option is enabled, all users in the group
65276+ you specify will have to use passive mode when initiating ftp transfers
65277+ from the shell on your machine. If the sysctl option is enabled, a
65278+ sysctl option with name "socket_client" is created.
65279+
65280+config GRKERNSEC_SOCKET_CLIENT_GID
65281+ int "GID to deny client sockets for"
65282+ depends on GRKERNSEC_SOCKET_CLIENT
65283+ default 1003
65284+ help
65285+ Here you can choose the GID to disable client socket access for.
65286+ Remember to add the users you want client socket access disabled for to
65287+ the GID specified here. If the sysctl option is enabled, a sysctl
65288+ option with name "socket_client_gid" is created.
65289+
65290+config GRKERNSEC_SOCKET_SERVER
65291+ bool "Deny server sockets to group"
65292+ depends on GRKERNSEC_SOCKET
65293+ help
65294+ If you say Y here, you will be able to choose a GID of whose users will
65295+ be unable to run server applications from your machine. If the sysctl
65296+ option is enabled, a sysctl option with name "socket_server" is created.
65297+
65298+config GRKERNSEC_SOCKET_SERVER_GID
65299+ int "GID to deny server sockets for"
65300+ depends on GRKERNSEC_SOCKET_SERVER
65301+ default 1002
65302+ help
65303+ Here you can choose the GID to disable server socket access for.
65304+ Remember to add the users you want server socket access disabled for to
65305+ the GID specified here. If the sysctl option is enabled, a sysctl
65306+ option with name "socket_server_gid" is created.
65307+
65308+endmenu
65309+
65310+menu "Physical Protections"
65311+depends on GRKERNSEC
65312+
65313+config GRKERNSEC_DENYUSB
65314+ bool "Deny new USB connections after toggle"
65315+ default y if GRKERNSEC_CONFIG_AUTO
65316+ depends on SYSCTL && USB_SUPPORT
65317+ help
65318+ If you say Y here, a new sysctl option with name "deny_new_usb"
65319+ will be created. Setting its value to 1 will prevent any new
65320+ USB devices from being recognized by the OS. Any attempted USB
65321+ device insertion will be logged. This option is intended to be
65322+ used against custom USB devices designed to exploit vulnerabilities
65323+ in various USB device drivers.
65324+
65325+ For greatest effectiveness, this sysctl should be set after any
65326+ relevant init scripts. This option is safe to enable in distros
65327+ as each user can choose whether or not to toggle the sysctl.
65328+
65329+config GRKERNSEC_DENYUSB_FORCE
65330+ bool "Reject all USB devices not connected at boot"
65331+ select USB
65332+ depends on GRKERNSEC_DENYUSB
65333+ help
65334+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
65335+ that doesn't involve a sysctl entry. This option should only be
65336+ enabled if you're sure you want to deny all new USB connections
65337+ at runtime and don't want to modify init scripts. This should not
65338+ be enabled by distros. It forces the core USB code to be built
65339+ into the kernel image so that all devices connected at boot time
65340+ can be recognized and new USB device connections can be prevented
65341+ prior to init running.
65342+
65343+endmenu
65344+
65345+menu "Sysctl Support"
65346+depends on GRKERNSEC && SYSCTL
65347+
65348+config GRKERNSEC_SYSCTL
65349+ bool "Sysctl support"
65350+ default y if GRKERNSEC_CONFIG_AUTO
65351+ help
65352+ If you say Y here, you will be able to change the options that
65353+ grsecurity runs with at bootup, without having to recompile your
65354+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
65355+ to enable (1) or disable (0) various features. All the sysctl entries
65356+ are mutable until the "grsec_lock" entry is set to a non-zero value.
65357+ All features enabled in the kernel configuration are disabled at boot
65358+ if you do not say Y to the "Turn on features by default" option.
65359+ All options should be set at startup, and the grsec_lock entry should
65360+ be set to a non-zero value after all the options are set.
65361+ *THIS IS EXTREMELY IMPORTANT*
65362+
65363+config GRKERNSEC_SYSCTL_DISTRO
65364+ bool "Extra sysctl support for distro makers (READ HELP)"
65365+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
65366+ help
65367+ If you say Y here, additional sysctl options will be created
65368+ for features that affect processes running as root. Therefore,
65369+ it is critical when using this option that the grsec_lock entry be
65370+ enabled after boot. Only distros with prebuilt kernel packages
65371+ with this option enabled that can ensure grsec_lock is enabled
65372+ after boot should use this option.
65373+ *Failure to set grsec_lock after boot makes all grsec features
65374+ this option covers useless*
65375+
65376+ Currently this option creates the following sysctl entries:
65377+ "Disable Privileged I/O": "disable_priv_io"
65378+
65379+config GRKERNSEC_SYSCTL_ON
65380+ bool "Turn on features by default"
65381+ default y if GRKERNSEC_CONFIG_AUTO
65382+ depends on GRKERNSEC_SYSCTL
65383+ help
65384+ If you say Y here, instead of having all features enabled in the
65385+ kernel configuration disabled at boot time, the features will be
65386+ enabled at boot time. It is recommended you say Y here unless
65387+ there is some reason you would want all sysctl-tunable features to
65388+ be disabled by default. As mentioned elsewhere, it is important
65389+ to enable the grsec_lock entry once you have finished modifying
65390+ the sysctl entries.
65391+
65392+endmenu
65393+menu "Logging Options"
65394+depends on GRKERNSEC
65395+
65396+config GRKERNSEC_FLOODTIME
65397+ int "Seconds in between log messages (minimum)"
65398+ default 10
65399+ help
65400+ This option allows you to enforce the number of seconds between
65401+ grsecurity log messages. The default should be suitable for most
65402+ people, however, if you choose to change it, choose a value small enough
65403+ to allow informative logs to be produced, but large enough to
65404+ prevent flooding.
65405+
65406+config GRKERNSEC_FLOODBURST
65407+ int "Number of messages in a burst (maximum)"
65408+ default 6
65409+ help
65410+ This option allows you to choose the maximum number of messages allowed
65411+ within the flood time interval you chose in a separate option. The
65412+ default should be suitable for most people, however if you find that
65413+ many of your logs are being interpreted as flooding, you may want to
65414+ raise this value.
65415+
65416+endmenu
65417diff --git a/grsecurity/Makefile b/grsecurity/Makefile
65418new file mode 100644
65419index 0000000..5307c8a
65420--- /dev/null
65421+++ b/grsecurity/Makefile
65422@@ -0,0 +1,54 @@
65423+# grsecurity – access control and security hardening for Linux
65424+# All code in this directory and various hooks located throughout the Linux kernel are
65425+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
65426+# http://www.grsecurity.net spender@grsecurity.net
65427+#
65428+# This program is free software; you can redistribute it and/or
65429+# modify it under the terms of the GNU General Public License version 2
65430+# as published by the Free Software Foundation.
65431+#
65432+# This program is distributed in the hope that it will be useful,
65433+# but WITHOUT ANY WARRANTY; without even the implied warranty of
65434+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
65435+# GNU General Public License for more details.
65436+#
65437+# You should have received a copy of the GNU General Public License
65438+# along with this program; if not, write to the Free Software
65439+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
65440+
65441+KBUILD_CFLAGS += -Werror
65442+
65443+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
65444+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
65445+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
65446+ grsec_usb.o grsec_ipc.o
65447+
65448+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
65449+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
65450+ gracl_learn.o grsec_log.o gracl_policy.o
65451+ifdef CONFIG_COMPAT
65452+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
65453+endif
65454+
65455+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
65456+
65457+ifdef CONFIG_NET
65458+obj-y += grsec_sock.o
65459+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
65460+endif
65461+
65462+ifndef CONFIG_GRKERNSEC
65463+obj-y += grsec_disabled.o
65464+endif
65465+
65466+ifdef CONFIG_GRKERNSEC_HIDESYM
65467+extra-y := grsec_hidesym.o
65468+$(obj)/grsec_hidesym.o:
65469+ @-chmod -f 500 /boot
65470+ @-chmod -f 500 /lib/modules
65471+ @-chmod -f 500 /lib64/modules
65472+ @-chmod -f 500 /lib32/modules
65473+ @-chmod -f 700 .
65474+ @-chmod -f 700 $(objtree)
65475+ @echo ' grsec: protected kernel image paths'
65476+endif
65477diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
65478new file mode 100644
65479index 0000000..364a9d7
65480--- /dev/null
65481+++ b/grsecurity/gracl.c
65482@@ -0,0 +1,2678 @@
65483+#include <linux/kernel.h>
65484+#include <linux/module.h>
65485+#include <linux/sched.h>
65486+#include <linux/mm.h>
65487+#include <linux/file.h>
65488+#include <linux/fs.h>
65489+#include <linux/namei.h>
65490+#include <linux/mount.h>
65491+#include <linux/tty.h>
65492+#include <linux/proc_fs.h>
65493+#include <linux/lglock.h>
65494+#include <linux/slab.h>
65495+#include <linux/vmalloc.h>
65496+#include <linux/types.h>
65497+#include <linux/sysctl.h>
65498+#include <linux/netdevice.h>
65499+#include <linux/ptrace.h>
65500+#include <linux/gracl.h>
65501+#include <linux/gralloc.h>
65502+#include <linux/security.h>
65503+#include <linux/grinternal.h>
65504+#include <linux/pid_namespace.h>
65505+#include <linux/stop_machine.h>
65506+#include <linux/fdtable.h>
65507+#include <linux/percpu.h>
65508+#include <linux/lglock.h>
65509+#include <linux/hugetlb.h>
65510+#include <linux/posix-timers.h>
65511+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
65512+#include <linux/magic.h>
65513+#include <linux/pagemap.h>
65514+#include "../fs/btrfs/async-thread.h"
65515+#include "../fs/btrfs/ctree.h"
65516+#include "../fs/btrfs/btrfs_inode.h"
65517+#endif
65518+#include "../fs/mount.h"
65519+
65520+#include <asm/uaccess.h>
65521+#include <asm/errno.h>
65522+#include <asm/mman.h>
65523+
65524+#define FOR_EACH_ROLE_START(role) \
65525+ role = running_polstate.role_list; \
65526+ while (role) {
65527+
65528+#define FOR_EACH_ROLE_END(role) \
65529+ role = role->prev; \
65530+ }
65531+
65532+extern struct path gr_real_root;
65533+
65534+static struct gr_policy_state running_polstate;
65535+struct gr_policy_state *polstate = &running_polstate;
65536+extern struct gr_alloc_state *current_alloc_state;
65537+
65538+extern char *gr_shared_page[4];
65539+DEFINE_RWLOCK(gr_inode_lock);
65540+
65541+static unsigned int gr_status __read_only = GR_STATUS_INIT;
65542+
65543+#ifdef CONFIG_NET
65544+extern struct vfsmount *sock_mnt;
65545+#endif
65546+
65547+extern struct vfsmount *pipe_mnt;
65548+extern struct vfsmount *shm_mnt;
65549+
65550+#ifdef CONFIG_HUGETLBFS
65551+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
65552+#endif
65553+
65554+extern u16 acl_sp_role_value;
65555+extern struct acl_object_label *fakefs_obj_rw;
65556+extern struct acl_object_label *fakefs_obj_rwx;
65557+
65558+int gr_acl_is_enabled(void)
65559+{
65560+ return (gr_status & GR_READY);
65561+}
65562+
65563+void gr_enable_rbac_system(void)
65564+{
65565+ pax_open_kernel();
65566+ gr_status |= GR_READY;
65567+ pax_close_kernel();
65568+}
65569+
65570+int gr_rbac_disable(void *unused)
65571+{
65572+ pax_open_kernel();
65573+ gr_status &= ~GR_READY;
65574+ pax_close_kernel();
65575+
65576+ return 0;
65577+}
65578+
65579+static inline dev_t __get_dev(const struct dentry *dentry)
65580+{
65581+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
65582+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
65583+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
65584+ else
65585+#endif
65586+ return dentry->d_sb->s_dev;
65587+}
65588+
65589+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
65590+{
65591+ return __get_dev(dentry);
65592+}
65593+
65594+static char gr_task_roletype_to_char(struct task_struct *task)
65595+{
65596+ switch (task->role->roletype &
65597+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
65598+ GR_ROLE_SPECIAL)) {
65599+ case GR_ROLE_DEFAULT:
65600+ return 'D';
65601+ case GR_ROLE_USER:
65602+ return 'U';
65603+ case GR_ROLE_GROUP:
65604+ return 'G';
65605+ case GR_ROLE_SPECIAL:
65606+ return 'S';
65607+ }
65608+
65609+ return 'X';
65610+}
65611+
65612+char gr_roletype_to_char(void)
65613+{
65614+ return gr_task_roletype_to_char(current);
65615+}
65616+
65617+__inline__ int
65618+gr_acl_tpe_check(void)
65619+{
65620+ if (unlikely(!(gr_status & GR_READY)))
65621+ return 0;
65622+ if (current->role->roletype & GR_ROLE_TPE)
65623+ return 1;
65624+ else
65625+ return 0;
65626+}
65627+
65628+int
65629+gr_handle_rawio(const struct inode *inode)
65630+{
65631+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65632+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
65633+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
65634+ !capable(CAP_SYS_RAWIO))
65635+ return 1;
65636+#endif
65637+ return 0;
65638+}
65639+
65640+int
65641+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
65642+{
65643+ if (likely(lena != lenb))
65644+ return 0;
65645+
65646+ return !memcmp(a, b, lena);
65647+}
65648+
65649+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
65650+{
65651+ *buflen -= namelen;
65652+ if (*buflen < 0)
65653+ return -ENAMETOOLONG;
65654+ *buffer -= namelen;
65655+ memcpy(*buffer, str, namelen);
65656+ return 0;
65657+}
65658+
65659+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
65660+{
65661+ return prepend(buffer, buflen, name->name, name->len);
65662+}
65663+
65664+static int prepend_path(const struct path *path, struct path *root,
65665+ char **buffer, int *buflen)
65666+{
65667+ struct dentry *dentry = path->dentry;
65668+ struct vfsmount *vfsmnt = path->mnt;
65669+ struct mount *mnt = real_mount(vfsmnt);
65670+ bool slash = false;
65671+ int error = 0;
65672+
65673+ while (dentry != root->dentry || vfsmnt != root->mnt) {
65674+ struct dentry * parent;
65675+
65676+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
65677+ /* Global root? */
65678+ if (!mnt_has_parent(mnt)) {
65679+ goto out;
65680+ }
65681+ dentry = mnt->mnt_mountpoint;
65682+ mnt = mnt->mnt_parent;
65683+ vfsmnt = &mnt->mnt;
65684+ continue;
65685+ }
65686+ parent = dentry->d_parent;
65687+ prefetch(parent);
65688+ spin_lock(&dentry->d_lock);
65689+ error = prepend_name(buffer, buflen, &dentry->d_name);
65690+ spin_unlock(&dentry->d_lock);
65691+ if (!error)
65692+ error = prepend(buffer, buflen, "/", 1);
65693+ if (error)
65694+ break;
65695+
65696+ slash = true;
65697+ dentry = parent;
65698+ }
65699+
65700+out:
65701+ if (!error && !slash)
65702+ error = prepend(buffer, buflen, "/", 1);
65703+
65704+ return error;
65705+}
65706+
65707+/* this must be called with mount_lock and rename_lock held */
65708+
65709+static char *__our_d_path(const struct path *path, struct path *root,
65710+ char *buf, int buflen)
65711+{
65712+ char *res = buf + buflen;
65713+ int error;
65714+
65715+ prepend(&res, &buflen, "\0", 1);
65716+ error = prepend_path(path, root, &res, &buflen);
65717+ if (error)
65718+ return ERR_PTR(error);
65719+
65720+ return res;
65721+}
65722+
65723+static char *
65724+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
65725+{
65726+ char *retval;
65727+
65728+ retval = __our_d_path(path, root, buf, buflen);
65729+ if (unlikely(IS_ERR(retval)))
65730+ retval = strcpy(buf, "<path too long>");
65731+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
65732+ retval[1] = '\0';
65733+
65734+ return retval;
65735+}
65736+
65737+static char *
65738+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
65739+ char *buf, int buflen)
65740+{
65741+ struct path path;
65742+ char *res;
65743+
65744+ path.dentry = (struct dentry *)dentry;
65745+ path.mnt = (struct vfsmount *)vfsmnt;
65746+
65747+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
65748+ by the RBAC system */
65749+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
65750+
65751+ return res;
65752+}
65753+
65754+static char *
65755+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
65756+ char *buf, int buflen)
65757+{
65758+ char *res;
65759+ struct path path;
65760+ struct path root;
65761+ struct task_struct *reaper = init_pid_ns.child_reaper;
65762+
65763+ path.dentry = (struct dentry *)dentry;
65764+ path.mnt = (struct vfsmount *)vfsmnt;
65765+
65766+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
65767+ get_fs_root(reaper->fs, &root);
65768+
65769+ read_seqlock_excl(&mount_lock);
65770+ write_seqlock(&rename_lock);
65771+ res = gen_full_path(&path, &root, buf, buflen);
65772+ write_sequnlock(&rename_lock);
65773+ read_sequnlock_excl(&mount_lock);
65774+
65775+ path_put(&root);
65776+ return res;
65777+}
65778+
65779+char *
65780+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
65781+{
65782+ char *ret;
65783+ read_seqlock_excl(&mount_lock);
65784+ write_seqlock(&rename_lock);
65785+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
65786+ PAGE_SIZE);
65787+ write_sequnlock(&rename_lock);
65788+ read_sequnlock_excl(&mount_lock);
65789+ return ret;
65790+}
65791+
65792+static char *
65793+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
65794+{
65795+ char *ret;
65796+ char *buf;
65797+ int buflen;
65798+
65799+ read_seqlock_excl(&mount_lock);
65800+ write_seqlock(&rename_lock);
65801+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
65802+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
65803+ buflen = (int)(ret - buf);
65804+ if (buflen >= 5)
65805+ prepend(&ret, &buflen, "/proc", 5);
65806+ else
65807+ ret = strcpy(buf, "<path too long>");
65808+ write_sequnlock(&rename_lock);
65809+ read_sequnlock_excl(&mount_lock);
65810+ return ret;
65811+}
65812+
65813+char *
65814+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
65815+{
65816+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
65817+ PAGE_SIZE);
65818+}
65819+
65820+char *
65821+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
65822+{
65823+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
65824+ PAGE_SIZE);
65825+}
65826+
65827+char *
65828+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
65829+{
65830+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
65831+ PAGE_SIZE);
65832+}
65833+
65834+char *
65835+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
65836+{
65837+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
65838+ PAGE_SIZE);
65839+}
65840+
65841+char *
65842+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
65843+{
65844+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
65845+ PAGE_SIZE);
65846+}
65847+
65848+__inline__ __u32
65849+to_gr_audit(const __u32 reqmode)
65850+{
65851+ /* masks off auditable permission flags, then shifts them to create
65852+ auditing flags, and adds the special case of append auditing if
65853+ we're requesting write */
65854+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
65855+}
65856+
65857+struct acl_role_label *
65858+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
65859+ const gid_t gid)
65860+{
65861+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
65862+ struct acl_role_label *match;
65863+ struct role_allowed_ip *ipp;
65864+ unsigned int x;
65865+ u32 curr_ip = task->signal->saved_ip;
65866+
65867+ match = state->acl_role_set.r_hash[index];
65868+
65869+ while (match) {
65870+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
65871+ for (x = 0; x < match->domain_child_num; x++) {
65872+ if (match->domain_children[x] == uid)
65873+ goto found;
65874+ }
65875+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
65876+ break;
65877+ match = match->next;
65878+ }
65879+found:
65880+ if (match == NULL) {
65881+ try_group:
65882+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
65883+ match = state->acl_role_set.r_hash[index];
65884+
65885+ while (match) {
65886+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
65887+ for (x = 0; x < match->domain_child_num; x++) {
65888+ if (match->domain_children[x] == gid)
65889+ goto found2;
65890+ }
65891+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
65892+ break;
65893+ match = match->next;
65894+ }
65895+found2:
65896+ if (match == NULL)
65897+ match = state->default_role;
65898+ if (match->allowed_ips == NULL)
65899+ return match;
65900+ else {
65901+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
65902+ if (likely
65903+ ((ntohl(curr_ip) & ipp->netmask) ==
65904+ (ntohl(ipp->addr) & ipp->netmask)))
65905+ return match;
65906+ }
65907+ match = state->default_role;
65908+ }
65909+ } else if (match->allowed_ips == NULL) {
65910+ return match;
65911+ } else {
65912+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
65913+ if (likely
65914+ ((ntohl(curr_ip) & ipp->netmask) ==
65915+ (ntohl(ipp->addr) & ipp->netmask)))
65916+ return match;
65917+ }
65918+ goto try_group;
65919+ }
65920+
65921+ return match;
65922+}
65923+
65924+static struct acl_role_label *
65925+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
65926+ const gid_t gid)
65927+{
65928+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
65929+}
65930+
65931+struct acl_subject_label *
65932+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
65933+ const struct acl_role_label *role)
65934+{
65935+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
65936+ struct acl_subject_label *match;
65937+
65938+ match = role->subj_hash[index];
65939+
65940+ while (match && (match->inode != ino || match->device != dev ||
65941+ (match->mode & GR_DELETED))) {
65942+ match = match->next;
65943+ }
65944+
65945+ if (match && !(match->mode & GR_DELETED))
65946+ return match;
65947+ else
65948+ return NULL;
65949+}
65950+
65951+struct acl_subject_label *
65952+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
65953+ const struct acl_role_label *role)
65954+{
65955+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
65956+ struct acl_subject_label *match;
65957+
65958+ match = role->subj_hash[index];
65959+
65960+ while (match && (match->inode != ino || match->device != dev ||
65961+ !(match->mode & GR_DELETED))) {
65962+ match = match->next;
65963+ }
65964+
65965+ if (match && (match->mode & GR_DELETED))
65966+ return match;
65967+ else
65968+ return NULL;
65969+}
65970+
65971+static struct acl_object_label *
65972+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
65973+ const struct acl_subject_label *subj)
65974+{
65975+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
65976+ struct acl_object_label *match;
65977+
65978+ match = subj->obj_hash[index];
65979+
65980+ while (match && (match->inode != ino || match->device != dev ||
65981+ (match->mode & GR_DELETED))) {
65982+ match = match->next;
65983+ }
65984+
65985+ if (match && !(match->mode & GR_DELETED))
65986+ return match;
65987+ else
65988+ return NULL;
65989+}
65990+
65991+static struct acl_object_label *
65992+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
65993+ const struct acl_subject_label *subj)
65994+{
65995+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
65996+ struct acl_object_label *match;
65997+
65998+ match = subj->obj_hash[index];
65999+
66000+ while (match && (match->inode != ino || match->device != dev ||
66001+ !(match->mode & GR_DELETED))) {
66002+ match = match->next;
66003+ }
66004+
66005+ if (match && (match->mode & GR_DELETED))
66006+ return match;
66007+
66008+ match = subj->obj_hash[index];
66009+
66010+ while (match && (match->inode != ino || match->device != dev ||
66011+ (match->mode & GR_DELETED))) {
66012+ match = match->next;
66013+ }
66014+
66015+ if (match && !(match->mode & GR_DELETED))
66016+ return match;
66017+ else
66018+ return NULL;
66019+}
66020+
66021+struct name_entry *
66022+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
66023+{
66024+ unsigned int len = strlen(name);
66025+ unsigned int key = full_name_hash(name, len);
66026+ unsigned int index = key % state->name_set.n_size;
66027+ struct name_entry *match;
66028+
66029+ match = state->name_set.n_hash[index];
66030+
66031+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
66032+ match = match->next;
66033+
66034+ return match;
66035+}
66036+
66037+static struct name_entry *
66038+lookup_name_entry(const char *name)
66039+{
66040+ return __lookup_name_entry(&running_polstate, name);
66041+}
66042+
66043+static struct name_entry *
66044+lookup_name_entry_create(const char *name)
66045+{
66046+ unsigned int len = strlen(name);
66047+ unsigned int key = full_name_hash(name, len);
66048+ unsigned int index = key % running_polstate.name_set.n_size;
66049+ struct name_entry *match;
66050+
66051+ match = running_polstate.name_set.n_hash[index];
66052+
66053+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
66054+ !match->deleted))
66055+ match = match->next;
66056+
66057+ if (match && match->deleted)
66058+ return match;
66059+
66060+ match = running_polstate.name_set.n_hash[index];
66061+
66062+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
66063+ match->deleted))
66064+ match = match->next;
66065+
66066+ if (match && !match->deleted)
66067+ return match;
66068+ else
66069+ return NULL;
66070+}
66071+
66072+static struct inodev_entry *
66073+lookup_inodev_entry(const ino_t ino, const dev_t dev)
66074+{
66075+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
66076+ struct inodev_entry *match;
66077+
66078+ match = running_polstate.inodev_set.i_hash[index];
66079+
66080+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
66081+ match = match->next;
66082+
66083+ return match;
66084+}
66085+
66086+void
66087+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
66088+{
66089+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
66090+ state->inodev_set.i_size);
66091+ struct inodev_entry **curr;
66092+
66093+ entry->prev = NULL;
66094+
66095+ curr = &state->inodev_set.i_hash[index];
66096+ if (*curr != NULL)
66097+ (*curr)->prev = entry;
66098+
66099+ entry->next = *curr;
66100+ *curr = entry;
66101+
66102+ return;
66103+}
66104+
66105+static void
66106+insert_inodev_entry(struct inodev_entry *entry)
66107+{
66108+ __insert_inodev_entry(&running_polstate, entry);
66109+}
66110+
66111+void
66112+insert_acl_obj_label(struct acl_object_label *obj,
66113+ struct acl_subject_label *subj)
66114+{
66115+ unsigned int index =
66116+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
66117+ struct acl_object_label **curr;
66118+
66119+ obj->prev = NULL;
66120+
66121+ curr = &subj->obj_hash[index];
66122+ if (*curr != NULL)
66123+ (*curr)->prev = obj;
66124+
66125+ obj->next = *curr;
66126+ *curr = obj;
66127+
66128+ return;
66129+}
66130+
66131+void
66132+insert_acl_subj_label(struct acl_subject_label *obj,
66133+ struct acl_role_label *role)
66134+{
66135+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
66136+ struct acl_subject_label **curr;
66137+
66138+ obj->prev = NULL;
66139+
66140+ curr = &role->subj_hash[index];
66141+ if (*curr != NULL)
66142+ (*curr)->prev = obj;
66143+
66144+ obj->next = *curr;
66145+ *curr = obj;
66146+
66147+ return;
66148+}
66149+
66150+/* derived from glibc fnmatch() 0: match, 1: no match*/
66151+
66152+static int
66153+glob_match(const char *p, const char *n)
66154+{
66155+ char c;
66156+
66157+ while ((c = *p++) != '\0') {
66158+ switch (c) {
66159+ case '?':
66160+ if (*n == '\0')
66161+ return 1;
66162+ else if (*n == '/')
66163+ return 1;
66164+ break;
66165+ case '\\':
66166+ if (*n != c)
66167+ return 1;
66168+ break;
66169+ case '*':
66170+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
66171+ if (*n == '/')
66172+ return 1;
66173+ else if (c == '?') {
66174+ if (*n == '\0')
66175+ return 1;
66176+ else
66177+ ++n;
66178+ }
66179+ }
66180+ if (c == '\0') {
66181+ return 0;
66182+ } else {
66183+ const char *endp;
66184+
66185+ if ((endp = strchr(n, '/')) == NULL)
66186+ endp = n + strlen(n);
66187+
66188+ if (c == '[') {
66189+ for (--p; n < endp; ++n)
66190+ if (!glob_match(p, n))
66191+ return 0;
66192+ } else if (c == '/') {
66193+ while (*n != '\0' && *n != '/')
66194+ ++n;
66195+ if (*n == '/' && !glob_match(p, n + 1))
66196+ return 0;
66197+ } else {
66198+ for (--p; n < endp; ++n)
66199+ if (*n == c && !glob_match(p, n))
66200+ return 0;
66201+ }
66202+
66203+ return 1;
66204+ }
66205+ case '[':
66206+ {
66207+ int not;
66208+ char cold;
66209+
66210+ if (*n == '\0' || *n == '/')
66211+ return 1;
66212+
66213+ not = (*p == '!' || *p == '^');
66214+ if (not)
66215+ ++p;
66216+
66217+ c = *p++;
66218+ for (;;) {
66219+ unsigned char fn = (unsigned char)*n;
66220+
66221+ if (c == '\0')
66222+ return 1;
66223+ else {
66224+ if (c == fn)
66225+ goto matched;
66226+ cold = c;
66227+ c = *p++;
66228+
66229+ if (c == '-' && *p != ']') {
66230+ unsigned char cend = *p++;
66231+
66232+ if (cend == '\0')
66233+ return 1;
66234+
66235+ if (cold <= fn && fn <= cend)
66236+ goto matched;
66237+
66238+ c = *p++;
66239+ }
66240+ }
66241+
66242+ if (c == ']')
66243+ break;
66244+ }
66245+ if (!not)
66246+ return 1;
66247+ break;
66248+ matched:
66249+ while (c != ']') {
66250+ if (c == '\0')
66251+ return 1;
66252+
66253+ c = *p++;
66254+ }
66255+ if (not)
66256+ return 1;
66257+ }
66258+ break;
66259+ default:
66260+ if (c != *n)
66261+ return 1;
66262+ }
66263+
66264+ ++n;
66265+ }
66266+
66267+ if (*n == '\0')
66268+ return 0;
66269+
66270+ if (*n == '/')
66271+ return 0;
66272+
66273+ return 1;
66274+}
66275+
66276+static struct acl_object_label *
66277+chk_glob_label(struct acl_object_label *globbed,
66278+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
66279+{
66280+ struct acl_object_label *tmp;
66281+
66282+ if (*path == NULL)
66283+ *path = gr_to_filename_nolock(dentry, mnt);
66284+
66285+ tmp = globbed;
66286+
66287+ while (tmp) {
66288+ if (!glob_match(tmp->filename, *path))
66289+ return tmp;
66290+ tmp = tmp->next;
66291+ }
66292+
66293+ return NULL;
66294+}
66295+
66296+static struct acl_object_label *
66297+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
66298+ const ino_t curr_ino, const dev_t curr_dev,
66299+ const struct acl_subject_label *subj, char **path, const int checkglob)
66300+{
66301+ struct acl_subject_label *tmpsubj;
66302+ struct acl_object_label *retval;
66303+ struct acl_object_label *retval2;
66304+
66305+ tmpsubj = (struct acl_subject_label *) subj;
66306+ read_lock(&gr_inode_lock);
66307+ do {
66308+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
66309+ if (retval) {
66310+ if (checkglob && retval->globbed) {
66311+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
66312+ if (retval2)
66313+ retval = retval2;
66314+ }
66315+ break;
66316+ }
66317+ } while ((tmpsubj = tmpsubj->parent_subject));
66318+ read_unlock(&gr_inode_lock);
66319+
66320+ return retval;
66321+}
66322+
66323+static __inline__ struct acl_object_label *
66324+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
66325+ struct dentry *curr_dentry,
66326+ const struct acl_subject_label *subj, char **path, const int checkglob)
66327+{
66328+ int newglob = checkglob;
66329+ ino_t inode;
66330+ dev_t device;
66331+
66332+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
66333+ as we don't want a / * rule to match instead of the / object
66334+ don't do this for create lookups that call this function though, since they're looking up
66335+ on the parent and thus need globbing checks on all paths
66336+ */
66337+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
66338+ newglob = GR_NO_GLOB;
66339+
66340+ spin_lock(&curr_dentry->d_lock);
66341+ inode = curr_dentry->d_inode->i_ino;
66342+ device = __get_dev(curr_dentry);
66343+ spin_unlock(&curr_dentry->d_lock);
66344+
66345+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
66346+}
66347+
66348+#ifdef CONFIG_HUGETLBFS
66349+static inline bool
66350+is_hugetlbfs_mnt(const struct vfsmount *mnt)
66351+{
66352+ int i;
66353+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
66354+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
66355+ return true;
66356+ }
66357+
66358+ return false;
66359+}
66360+#endif
66361+
66362+static struct acl_object_label *
66363+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
66364+ const struct acl_subject_label *subj, char *path, const int checkglob)
66365+{
66366+ struct dentry *dentry = (struct dentry *) l_dentry;
66367+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
66368+ struct mount *real_mnt = real_mount(mnt);
66369+ struct acl_object_label *retval;
66370+ struct dentry *parent;
66371+
66372+ read_seqlock_excl(&mount_lock);
66373+ write_seqlock(&rename_lock);
66374+
66375+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
66376+#ifdef CONFIG_NET
66377+ mnt == sock_mnt ||
66378+#endif
66379+#ifdef CONFIG_HUGETLBFS
66380+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
66381+#endif
66382+ /* ignore Eric Biederman */
66383+ IS_PRIVATE(l_dentry->d_inode))) {
66384+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
66385+ goto out;
66386+ }
66387+
66388+ for (;;) {
66389+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
66390+ break;
66391+
66392+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
66393+ if (!mnt_has_parent(real_mnt))
66394+ break;
66395+
66396+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
66397+ if (retval != NULL)
66398+ goto out;
66399+
66400+ dentry = real_mnt->mnt_mountpoint;
66401+ real_mnt = real_mnt->mnt_parent;
66402+ mnt = &real_mnt->mnt;
66403+ continue;
66404+ }
66405+
66406+ parent = dentry->d_parent;
66407+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
66408+ if (retval != NULL)
66409+ goto out;
66410+
66411+ dentry = parent;
66412+ }
66413+
66414+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
66415+
66416+ /* gr_real_root is pinned so we don't have to hold a reference */
66417+ if (retval == NULL)
66418+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
66419+out:
66420+ write_sequnlock(&rename_lock);
66421+ read_sequnlock_excl(&mount_lock);
66422+
66423+ BUG_ON(retval == NULL);
66424+
66425+ return retval;
66426+}
66427+
66428+static __inline__ struct acl_object_label *
66429+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
66430+ const struct acl_subject_label *subj)
66431+{
66432+ char *path = NULL;
66433+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
66434+}
66435+
66436+static __inline__ struct acl_object_label *
66437+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
66438+ const struct acl_subject_label *subj)
66439+{
66440+ char *path = NULL;
66441+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
66442+}
66443+
66444+static __inline__ struct acl_object_label *
66445+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
66446+ const struct acl_subject_label *subj, char *path)
66447+{
66448+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
66449+}
66450+
66451+struct acl_subject_label *
66452+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
66453+ const struct acl_role_label *role)
66454+{
66455+ struct dentry *dentry = (struct dentry *) l_dentry;
66456+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
66457+ struct mount *real_mnt = real_mount(mnt);
66458+ struct acl_subject_label *retval;
66459+ struct dentry *parent;
66460+
66461+ read_seqlock_excl(&mount_lock);
66462+ write_seqlock(&rename_lock);
66463+
66464+ for (;;) {
66465+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
66466+ break;
66467+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
66468+ if (!mnt_has_parent(real_mnt))
66469+ break;
66470+
66471+ spin_lock(&dentry->d_lock);
66472+ read_lock(&gr_inode_lock);
66473+ retval =
66474+ lookup_acl_subj_label(dentry->d_inode->i_ino,
66475+ __get_dev(dentry), role);
66476+ read_unlock(&gr_inode_lock);
66477+ spin_unlock(&dentry->d_lock);
66478+ if (retval != NULL)
66479+ goto out;
66480+
66481+ dentry = real_mnt->mnt_mountpoint;
66482+ real_mnt = real_mnt->mnt_parent;
66483+ mnt = &real_mnt->mnt;
66484+ continue;
66485+ }
66486+
66487+ spin_lock(&dentry->d_lock);
66488+ read_lock(&gr_inode_lock);
66489+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
66490+ __get_dev(dentry), role);
66491+ read_unlock(&gr_inode_lock);
66492+ parent = dentry->d_parent;
66493+ spin_unlock(&dentry->d_lock);
66494+
66495+ if (retval != NULL)
66496+ goto out;
66497+
66498+ dentry = parent;
66499+ }
66500+
66501+ spin_lock(&dentry->d_lock);
66502+ read_lock(&gr_inode_lock);
66503+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
66504+ __get_dev(dentry), role);
66505+ read_unlock(&gr_inode_lock);
66506+ spin_unlock(&dentry->d_lock);
66507+
66508+ if (unlikely(retval == NULL)) {
66509+ /* gr_real_root is pinned, we don't need to hold a reference */
66510+ read_lock(&gr_inode_lock);
66511+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
66512+ __get_dev(gr_real_root.dentry), role);
66513+ read_unlock(&gr_inode_lock);
66514+ }
66515+out:
66516+ write_sequnlock(&rename_lock);
66517+ read_sequnlock_excl(&mount_lock);
66518+
66519+ BUG_ON(retval == NULL);
66520+
66521+ return retval;
66522+}
66523+
66524+void
66525+assign_special_role(const char *rolename)
66526+{
66527+ struct acl_object_label *obj;
66528+ struct acl_role_label *r;
66529+ struct acl_role_label *assigned = NULL;
66530+ struct task_struct *tsk;
66531+ struct file *filp;
66532+
66533+ FOR_EACH_ROLE_START(r)
66534+ if (!strcmp(rolename, r->rolename) &&
66535+ (r->roletype & GR_ROLE_SPECIAL)) {
66536+ assigned = r;
66537+ break;
66538+ }
66539+ FOR_EACH_ROLE_END(r)
66540+
66541+ if (!assigned)
66542+ return;
66543+
66544+ read_lock(&tasklist_lock);
66545+ read_lock(&grsec_exec_file_lock);
66546+
66547+ tsk = current->real_parent;
66548+ if (tsk == NULL)
66549+ goto out_unlock;
66550+
66551+ filp = tsk->exec_file;
66552+ if (filp == NULL)
66553+ goto out_unlock;
66554+
66555+ tsk->is_writable = 0;
66556+ tsk->inherited = 0;
66557+
66558+ tsk->acl_sp_role = 1;
66559+ tsk->acl_role_id = ++acl_sp_role_value;
66560+ tsk->role = assigned;
66561+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
66562+
66563+ /* ignore additional mmap checks for processes that are writable
66564+ by the default ACL */
66565+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
66566+ if (unlikely(obj->mode & GR_WRITE))
66567+ tsk->is_writable = 1;
66568+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
66569+ if (unlikely(obj->mode & GR_WRITE))
66570+ tsk->is_writable = 1;
66571+
66572+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
66573+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
66574+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
66575+#endif
66576+
66577+out_unlock:
66578+ read_unlock(&grsec_exec_file_lock);
66579+ read_unlock(&tasklist_lock);
66580+ return;
66581+}
66582+
66583+
66584+static void
66585+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
66586+{
66587+ struct task_struct *task = current;
66588+ const struct cred *cred = current_cred();
66589+
66590+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
66591+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
66592+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
66593+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
66594+
66595+ return;
66596+}
66597+
66598+static void
66599+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
66600+{
66601+ struct task_struct *task = current;
66602+ const struct cred *cred = current_cred();
66603+
66604+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
66605+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
66606+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
66607+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
66608+
66609+ return;
66610+}
66611+
66612+static void
66613+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
66614+{
66615+ struct task_struct *task = current;
66616+ const struct cred *cred = current_cred();
66617+
66618+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
66619+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
66620+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
66621+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
66622+
66623+ return;
66624+}
66625+
66626+static void
66627+gr_set_proc_res(struct task_struct *task)
66628+{
66629+ struct acl_subject_label *proc;
66630+ unsigned short i;
66631+
66632+ proc = task->acl;
66633+
66634+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
66635+ return;
66636+
66637+ for (i = 0; i < RLIM_NLIMITS; i++) {
66638+ if (!(proc->resmask & (1U << i)))
66639+ continue;
66640+
66641+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
66642+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
66643+
66644+ if (i == RLIMIT_CPU)
66645+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
66646+ }
66647+
66648+ return;
66649+}
66650+
66651+/* both of the below must be called with
66652+ rcu_read_lock();
66653+ read_lock(&tasklist_lock);
66654+ read_lock(&grsec_exec_file_lock);
66655+*/
66656+
66657+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
66658+{
66659+ char *tmpname;
66660+ struct acl_subject_label *tmpsubj;
66661+ struct file *filp;
66662+ struct name_entry *nmatch;
66663+
66664+ filp = task->exec_file;
66665+ if (filp == NULL)
66666+ return NULL;
66667+
66668+ /* the following is to apply the correct subject
66669+ on binaries running when the RBAC system
66670+ is enabled, when the binaries have been
66671+ replaced or deleted since their execution
66672+ -----
66673+ when the RBAC system starts, the inode/dev
66674+ from exec_file will be one the RBAC system
66675+ is unaware of. It only knows the inode/dev
66676+ of the present file on disk, or the absence
66677+ of it.
66678+ */
66679+
66680+ if (filename)
66681+ nmatch = __lookup_name_entry(state, filename);
66682+ else {
66683+ preempt_disable();
66684+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
66685+
66686+ nmatch = __lookup_name_entry(state, tmpname);
66687+ preempt_enable();
66688+ }
66689+ tmpsubj = NULL;
66690+ if (nmatch) {
66691+ if (nmatch->deleted)
66692+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
66693+ else
66694+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
66695+ }
66696+ /* this also works for the reload case -- if we don't match a potentially inherited subject
66697+ then we fall back to a normal lookup based on the binary's ino/dev
66698+ */
66699+ if (tmpsubj == NULL)
66700+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
66701+
66702+ return tmpsubj;
66703+}
66704+
66705+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
66706+{
66707+ return __gr_get_subject_for_task(&running_polstate, task, filename);
66708+}
66709+
66710+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
66711+{
66712+ struct acl_object_label *obj;
66713+ struct file *filp;
66714+
66715+ filp = task->exec_file;
66716+
66717+ task->acl = subj;
66718+ task->is_writable = 0;
66719+ /* ignore additional mmap checks for processes that are writable
66720+ by the default ACL */
66721+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
66722+ if (unlikely(obj->mode & GR_WRITE))
66723+ task->is_writable = 1;
66724+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
66725+ if (unlikely(obj->mode & GR_WRITE))
66726+ task->is_writable = 1;
66727+
66728+ gr_set_proc_res(task);
66729+
66730+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
66731+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
66732+#endif
66733+}
66734+
66735+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
66736+{
66737+ __gr_apply_subject_to_task(&running_polstate, task, subj);
66738+}
66739+
66740+__u32
66741+gr_search_file(const struct dentry * dentry, const __u32 mode,
66742+ const struct vfsmount * mnt)
66743+{
66744+ __u32 retval = mode;
66745+ struct acl_subject_label *curracl;
66746+ struct acl_object_label *currobj;
66747+
66748+ if (unlikely(!(gr_status & GR_READY)))
66749+ return (mode & ~GR_AUDITS);
66750+
66751+ curracl = current->acl;
66752+
66753+ currobj = chk_obj_label(dentry, mnt, curracl);
66754+ retval = currobj->mode & mode;
66755+
66756+ /* if we're opening a specified transfer file for writing
66757+ (e.g. /dev/initctl), then transfer our role to init
66758+ */
66759+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
66760+ current->role->roletype & GR_ROLE_PERSIST)) {
66761+ struct task_struct *task = init_pid_ns.child_reaper;
66762+
66763+ if (task->role != current->role) {
66764+ struct acl_subject_label *subj;
66765+
66766+ task->acl_sp_role = 0;
66767+ task->acl_role_id = current->acl_role_id;
66768+ task->role = current->role;
66769+ rcu_read_lock();
66770+ read_lock(&grsec_exec_file_lock);
66771+ subj = gr_get_subject_for_task(task, NULL);
66772+ gr_apply_subject_to_task(task, subj);
66773+ read_unlock(&grsec_exec_file_lock);
66774+ rcu_read_unlock();
66775+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
66776+ }
66777+ }
66778+
66779+ if (unlikely
66780+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
66781+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
66782+ __u32 new_mode = mode;
66783+
66784+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
66785+
66786+ retval = new_mode;
66787+
66788+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
66789+ new_mode |= GR_INHERIT;
66790+
66791+ if (!(mode & GR_NOLEARN))
66792+ gr_log_learn(dentry, mnt, new_mode);
66793+ }
66794+
66795+ return retval;
66796+}
66797+
66798+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
66799+ const struct dentry *parent,
66800+ const struct vfsmount *mnt)
66801+{
66802+ struct name_entry *match;
66803+ struct acl_object_label *matchpo;
66804+ struct acl_subject_label *curracl;
66805+ char *path;
66806+
66807+ if (unlikely(!(gr_status & GR_READY)))
66808+ return NULL;
66809+
66810+ preempt_disable();
66811+ path = gr_to_filename_rbac(new_dentry, mnt);
66812+ match = lookup_name_entry_create(path);
66813+
66814+ curracl = current->acl;
66815+
66816+ if (match) {
66817+ read_lock(&gr_inode_lock);
66818+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
66819+ read_unlock(&gr_inode_lock);
66820+
66821+ if (matchpo) {
66822+ preempt_enable();
66823+ return matchpo;
66824+ }
66825+ }
66826+
66827+ // lookup parent
66828+
66829+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
66830+
66831+ preempt_enable();
66832+ return matchpo;
66833+}
66834+
66835+__u32
66836+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
66837+ const struct vfsmount * mnt, const __u32 mode)
66838+{
66839+ struct acl_object_label *matchpo;
66840+ __u32 retval;
66841+
66842+ if (unlikely(!(gr_status & GR_READY)))
66843+ return (mode & ~GR_AUDITS);
66844+
66845+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
66846+
66847+ retval = matchpo->mode & mode;
66848+
66849+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
66850+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
66851+ __u32 new_mode = mode;
66852+
66853+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
66854+
66855+ gr_log_learn(new_dentry, mnt, new_mode);
66856+ return new_mode;
66857+ }
66858+
66859+ return retval;
66860+}
66861+
66862+__u32
66863+gr_check_link(const struct dentry * new_dentry,
66864+ const struct dentry * parent_dentry,
66865+ const struct vfsmount * parent_mnt,
66866+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
66867+{
66868+ struct acl_object_label *obj;
66869+ __u32 oldmode, newmode;
66870+ __u32 needmode;
66871+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
66872+ GR_DELETE | GR_INHERIT;
66873+
66874+ if (unlikely(!(gr_status & GR_READY)))
66875+ return (GR_CREATE | GR_LINK);
66876+
66877+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
66878+ oldmode = obj->mode;
66879+
66880+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
66881+ newmode = obj->mode;
66882+
66883+ needmode = newmode & checkmodes;
66884+
66885+ // old name for hardlink must have at least the permissions of the new name
66886+ if ((oldmode & needmode) != needmode)
66887+ goto bad;
66888+
66889+ // if old name had restrictions/auditing, make sure the new name does as well
66890+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
66891+
66892+ // don't allow hardlinking of suid/sgid/fcapped files without permission
66893+ if (is_privileged_binary(old_dentry))
66894+ needmode |= GR_SETID;
66895+
66896+ if ((newmode & needmode) != needmode)
66897+ goto bad;
66898+
66899+ // enforce minimum permissions
66900+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
66901+ return newmode;
66902+bad:
66903+ needmode = oldmode;
66904+ if (is_privileged_binary(old_dentry))
66905+ needmode |= GR_SETID;
66906+
66907+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
66908+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
66909+ return (GR_CREATE | GR_LINK);
66910+ } else if (newmode & GR_SUPPRESS)
66911+ return GR_SUPPRESS;
66912+ else
66913+ return 0;
66914+}
66915+
66916+int
66917+gr_check_hidden_task(const struct task_struct *task)
66918+{
66919+ if (unlikely(!(gr_status & GR_READY)))
66920+ return 0;
66921+
66922+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
66923+ return 1;
66924+
66925+ return 0;
66926+}
66927+
66928+int
66929+gr_check_protected_task(const struct task_struct *task)
66930+{
66931+ if (unlikely(!(gr_status & GR_READY) || !task))
66932+ return 0;
66933+
66934+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
66935+ task->acl != current->acl)
66936+ return 1;
66937+
66938+ return 0;
66939+}
66940+
66941+int
66942+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
66943+{
66944+ struct task_struct *p;
66945+ int ret = 0;
66946+
66947+ if (unlikely(!(gr_status & GR_READY) || !pid))
66948+ return ret;
66949+
66950+ read_lock(&tasklist_lock);
66951+ do_each_pid_task(pid, type, p) {
66952+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
66953+ p->acl != current->acl) {
66954+ ret = 1;
66955+ goto out;
66956+ }
66957+ } while_each_pid_task(pid, type, p);
66958+out:
66959+ read_unlock(&tasklist_lock);
66960+
66961+ return ret;
66962+}
66963+
66964+void
66965+gr_copy_label(struct task_struct *tsk)
66966+{
66967+ struct task_struct *p = current;
66968+
66969+ tsk->inherited = p->inherited;
66970+ tsk->acl_sp_role = 0;
66971+ tsk->acl_role_id = p->acl_role_id;
66972+ tsk->acl = p->acl;
66973+ tsk->role = p->role;
66974+ tsk->signal->used_accept = 0;
66975+ tsk->signal->curr_ip = p->signal->curr_ip;
66976+ tsk->signal->saved_ip = p->signal->saved_ip;
66977+ if (p->exec_file)
66978+ get_file(p->exec_file);
66979+ tsk->exec_file = p->exec_file;
66980+ tsk->is_writable = p->is_writable;
66981+ if (unlikely(p->signal->used_accept)) {
66982+ p->signal->curr_ip = 0;
66983+ p->signal->saved_ip = 0;
66984+ }
66985+
66986+ return;
66987+}
66988+
66989+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
66990+
66991+int
66992+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
66993+{
66994+ unsigned int i;
66995+ __u16 num;
66996+ uid_t *uidlist;
66997+ uid_t curuid;
66998+ int realok = 0;
66999+ int effectiveok = 0;
67000+ int fsok = 0;
67001+ uid_t globalreal, globaleffective, globalfs;
67002+
67003+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
67004+ struct user_struct *user;
67005+
67006+ if (!uid_valid(real))
67007+ goto skipit;
67008+
67009+ /* find user based on global namespace */
67010+
67011+ globalreal = GR_GLOBAL_UID(real);
67012+
67013+ user = find_user(make_kuid(&init_user_ns, globalreal));
67014+ if (user == NULL)
67015+ goto skipit;
67016+
67017+ if (gr_process_kernel_setuid_ban(user)) {
67018+ /* for find_user */
67019+ free_uid(user);
67020+ return 1;
67021+ }
67022+
67023+ /* for find_user */
67024+ free_uid(user);
67025+
67026+skipit:
67027+#endif
67028+
67029+ if (unlikely(!(gr_status & GR_READY)))
67030+ return 0;
67031+
67032+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
67033+ gr_log_learn_uid_change(real, effective, fs);
67034+
67035+ num = current->acl->user_trans_num;
67036+ uidlist = current->acl->user_transitions;
67037+
67038+ if (uidlist == NULL)
67039+ return 0;
67040+
67041+ if (!uid_valid(real)) {
67042+ realok = 1;
67043+ globalreal = (uid_t)-1;
67044+ } else {
67045+ globalreal = GR_GLOBAL_UID(real);
67046+ }
67047+ if (!uid_valid(effective)) {
67048+ effectiveok = 1;
67049+ globaleffective = (uid_t)-1;
67050+ } else {
67051+ globaleffective = GR_GLOBAL_UID(effective);
67052+ }
67053+ if (!uid_valid(fs)) {
67054+ fsok = 1;
67055+ globalfs = (uid_t)-1;
67056+ } else {
67057+ globalfs = GR_GLOBAL_UID(fs);
67058+ }
67059+
67060+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
67061+ for (i = 0; i < num; i++) {
67062+ curuid = uidlist[i];
67063+ if (globalreal == curuid)
67064+ realok = 1;
67065+ if (globaleffective == curuid)
67066+ effectiveok = 1;
67067+ if (globalfs == curuid)
67068+ fsok = 1;
67069+ }
67070+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
67071+ for (i = 0; i < num; i++) {
67072+ curuid = uidlist[i];
67073+ if (globalreal == curuid)
67074+ break;
67075+ if (globaleffective == curuid)
67076+ break;
67077+ if (globalfs == curuid)
67078+ break;
67079+ }
67080+ /* not in deny list */
67081+ if (i == num) {
67082+ realok = 1;
67083+ effectiveok = 1;
67084+ fsok = 1;
67085+ }
67086+ }
67087+
67088+ if (realok && effectiveok && fsok)
67089+ return 0;
67090+ else {
67091+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
67092+ return 1;
67093+ }
67094+}
67095+
67096+int
67097+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
67098+{
67099+ unsigned int i;
67100+ __u16 num;
67101+ gid_t *gidlist;
67102+ gid_t curgid;
67103+ int realok = 0;
67104+ int effectiveok = 0;
67105+ int fsok = 0;
67106+ gid_t globalreal, globaleffective, globalfs;
67107+
67108+ if (unlikely(!(gr_status & GR_READY)))
67109+ return 0;
67110+
67111+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
67112+ gr_log_learn_gid_change(real, effective, fs);
67113+
67114+ num = current->acl->group_trans_num;
67115+ gidlist = current->acl->group_transitions;
67116+
67117+ if (gidlist == NULL)
67118+ return 0;
67119+
67120+ if (!gid_valid(real)) {
67121+ realok = 1;
67122+ globalreal = (gid_t)-1;
67123+ } else {
67124+ globalreal = GR_GLOBAL_GID(real);
67125+ }
67126+ if (!gid_valid(effective)) {
67127+ effectiveok = 1;
67128+ globaleffective = (gid_t)-1;
67129+ } else {
67130+ globaleffective = GR_GLOBAL_GID(effective);
67131+ }
67132+ if (!gid_valid(fs)) {
67133+ fsok = 1;
67134+ globalfs = (gid_t)-1;
67135+ } else {
67136+ globalfs = GR_GLOBAL_GID(fs);
67137+ }
67138+
67139+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
67140+ for (i = 0; i < num; i++) {
67141+ curgid = gidlist[i];
67142+ if (globalreal == curgid)
67143+ realok = 1;
67144+ if (globaleffective == curgid)
67145+ effectiveok = 1;
67146+ if (globalfs == curgid)
67147+ fsok = 1;
67148+ }
67149+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
67150+ for (i = 0; i < num; i++) {
67151+ curgid = gidlist[i];
67152+ if (globalreal == curgid)
67153+ break;
67154+ if (globaleffective == curgid)
67155+ break;
67156+ if (globalfs == curgid)
67157+ break;
67158+ }
67159+ /* not in deny list */
67160+ if (i == num) {
67161+ realok = 1;
67162+ effectiveok = 1;
67163+ fsok = 1;
67164+ }
67165+ }
67166+
67167+ if (realok && effectiveok && fsok)
67168+ return 0;
67169+ else {
67170+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
67171+ return 1;
67172+ }
67173+}
67174+
67175+extern int gr_acl_is_capable(const int cap);
67176+
67177+void
67178+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
67179+{
67180+ struct acl_role_label *role = task->role;
67181+ struct acl_subject_label *subj = NULL;
67182+ struct acl_object_label *obj;
67183+ struct file *filp;
67184+ uid_t uid;
67185+ gid_t gid;
67186+
67187+ if (unlikely(!(gr_status & GR_READY)))
67188+ return;
67189+
67190+ uid = GR_GLOBAL_UID(kuid);
67191+ gid = GR_GLOBAL_GID(kgid);
67192+
67193+ filp = task->exec_file;
67194+
67195+ /* kernel process, we'll give them the kernel role */
67196+ if (unlikely(!filp)) {
67197+ task->role = running_polstate.kernel_role;
67198+ task->acl = running_polstate.kernel_role->root_label;
67199+ return;
67200+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
67201+ /* save the current ip at time of role lookup so that the proper
67202+ IP will be learned for role_allowed_ip */
67203+ task->signal->saved_ip = task->signal->curr_ip;
67204+ role = lookup_acl_role_label(task, uid, gid);
67205+ }
67206+
67207+ /* don't change the role if we're not a privileged process */
67208+ if (role && task->role != role &&
67209+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
67210+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
67211+ return;
67212+
67213+ /* perform subject lookup in possibly new role
67214+ we can use this result below in the case where role == task->role
67215+ */
67216+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
67217+
67218+ /* if we changed uid/gid, but result in the same role
67219+ and are using inheritance, don't lose the inherited subject
67220+ if current subject is other than what normal lookup
67221+ would result in, we arrived via inheritance, don't
67222+ lose subject
67223+ */
67224+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
67225+ (subj == task->acl)))
67226+ task->acl = subj;
67227+
67228+ /* leave task->inherited unaffected */
67229+
67230+ task->role = role;
67231+
67232+ task->is_writable = 0;
67233+
67234+ /* ignore additional mmap checks for processes that are writable
67235+ by the default ACL */
67236+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
67237+ if (unlikely(obj->mode & GR_WRITE))
67238+ task->is_writable = 1;
67239+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
67240+ if (unlikely(obj->mode & GR_WRITE))
67241+ task->is_writable = 1;
67242+
67243+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
67244+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
67245+#endif
67246+
67247+ gr_set_proc_res(task);
67248+
67249+ return;
67250+}
67251+
67252+int
67253+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
67254+ const int unsafe_flags)
67255+{
67256+ struct task_struct *task = current;
67257+ struct acl_subject_label *newacl;
67258+ struct acl_object_label *obj;
67259+ __u32 retmode;
67260+
67261+ if (unlikely(!(gr_status & GR_READY)))
67262+ return 0;
67263+
67264+ newacl = chk_subj_label(dentry, mnt, task->role);
67265+
67266+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
67267+ did an exec
67268+ */
67269+ rcu_read_lock();
67270+ read_lock(&tasklist_lock);
67271+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
67272+ (task->parent->acl->mode & GR_POVERRIDE))) {
67273+ read_unlock(&tasklist_lock);
67274+ rcu_read_unlock();
67275+ goto skip_check;
67276+ }
67277+ read_unlock(&tasklist_lock);
67278+ rcu_read_unlock();
67279+
67280+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
67281+ !(task->role->roletype & GR_ROLE_GOD) &&
67282+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
67283+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
67284+ if (unsafe_flags & LSM_UNSAFE_SHARE)
67285+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
67286+ else
67287+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
67288+ return -EACCES;
67289+ }
67290+
67291+skip_check:
67292+
67293+ obj = chk_obj_label(dentry, mnt, task->acl);
67294+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
67295+
67296+ if (!(task->acl->mode & GR_INHERITLEARN) &&
67297+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
67298+ if (obj->nested)
67299+ task->acl = obj->nested;
67300+ else
67301+ task->acl = newacl;
67302+ task->inherited = 0;
67303+ } else {
67304+ task->inherited = 1;
67305+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
67306+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
67307+ }
67308+
67309+ task->is_writable = 0;
67310+
67311+ /* ignore additional mmap checks for processes that are writable
67312+ by the default ACL */
67313+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
67314+ if (unlikely(obj->mode & GR_WRITE))
67315+ task->is_writable = 1;
67316+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
67317+ if (unlikely(obj->mode & GR_WRITE))
67318+ task->is_writable = 1;
67319+
67320+ gr_set_proc_res(task);
67321+
67322+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
67323+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
67324+#endif
67325+ return 0;
67326+}
67327+
67328+/* always called with valid inodev ptr */
67329+static void
67330+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
67331+{
67332+ struct acl_object_label *matchpo;
67333+ struct acl_subject_label *matchps;
67334+ struct acl_subject_label *subj;
67335+ struct acl_role_label *role;
67336+ unsigned int x;
67337+
67338+ FOR_EACH_ROLE_START(role)
67339+ FOR_EACH_SUBJECT_START(role, subj, x)
67340+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
67341+ matchpo->mode |= GR_DELETED;
67342+ FOR_EACH_SUBJECT_END(subj,x)
67343+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
67344+ /* nested subjects aren't in the role's subj_hash table */
67345+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
67346+ matchpo->mode |= GR_DELETED;
67347+ FOR_EACH_NESTED_SUBJECT_END(subj)
67348+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
67349+ matchps->mode |= GR_DELETED;
67350+ FOR_EACH_ROLE_END(role)
67351+
67352+ inodev->nentry->deleted = 1;
67353+
67354+ return;
67355+}
67356+
67357+void
67358+gr_handle_delete(const ino_t ino, const dev_t dev)
67359+{
67360+ struct inodev_entry *inodev;
67361+
67362+ if (unlikely(!(gr_status & GR_READY)))
67363+ return;
67364+
67365+ write_lock(&gr_inode_lock);
67366+ inodev = lookup_inodev_entry(ino, dev);
67367+ if (inodev != NULL)
67368+ do_handle_delete(inodev, ino, dev);
67369+ write_unlock(&gr_inode_lock);
67370+
67371+ return;
67372+}
67373+
67374+static void
67375+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
67376+ const ino_t newinode, const dev_t newdevice,
67377+ struct acl_subject_label *subj)
67378+{
67379+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
67380+ struct acl_object_label *match;
67381+
67382+ match = subj->obj_hash[index];
67383+
67384+ while (match && (match->inode != oldinode ||
67385+ match->device != olddevice ||
67386+ !(match->mode & GR_DELETED)))
67387+ match = match->next;
67388+
67389+ if (match && (match->inode == oldinode)
67390+ && (match->device == olddevice)
67391+ && (match->mode & GR_DELETED)) {
67392+ if (match->prev == NULL) {
67393+ subj->obj_hash[index] = match->next;
67394+ if (match->next != NULL)
67395+ match->next->prev = NULL;
67396+ } else {
67397+ match->prev->next = match->next;
67398+ if (match->next != NULL)
67399+ match->next->prev = match->prev;
67400+ }
67401+ match->prev = NULL;
67402+ match->next = NULL;
67403+ match->inode = newinode;
67404+ match->device = newdevice;
67405+ match->mode &= ~GR_DELETED;
67406+
67407+ insert_acl_obj_label(match, subj);
67408+ }
67409+
67410+ return;
67411+}
67412+
67413+static void
67414+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
67415+ const ino_t newinode, const dev_t newdevice,
67416+ struct acl_role_label *role)
67417+{
67418+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
67419+ struct acl_subject_label *match;
67420+
67421+ match = role->subj_hash[index];
67422+
67423+ while (match && (match->inode != oldinode ||
67424+ match->device != olddevice ||
67425+ !(match->mode & GR_DELETED)))
67426+ match = match->next;
67427+
67428+ if (match && (match->inode == oldinode)
67429+ && (match->device == olddevice)
67430+ && (match->mode & GR_DELETED)) {
67431+ if (match->prev == NULL) {
67432+ role->subj_hash[index] = match->next;
67433+ if (match->next != NULL)
67434+ match->next->prev = NULL;
67435+ } else {
67436+ match->prev->next = match->next;
67437+ if (match->next != NULL)
67438+ match->next->prev = match->prev;
67439+ }
67440+ match->prev = NULL;
67441+ match->next = NULL;
67442+ match->inode = newinode;
67443+ match->device = newdevice;
67444+ match->mode &= ~GR_DELETED;
67445+
67446+ insert_acl_subj_label(match, role);
67447+ }
67448+
67449+ return;
67450+}
67451+
67452+static void
67453+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
67454+ const ino_t newinode, const dev_t newdevice)
67455+{
67456+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
67457+ struct inodev_entry *match;
67458+
67459+ match = running_polstate.inodev_set.i_hash[index];
67460+
67461+ while (match && (match->nentry->inode != oldinode ||
67462+ match->nentry->device != olddevice || !match->nentry->deleted))
67463+ match = match->next;
67464+
67465+ if (match && (match->nentry->inode == oldinode)
67466+ && (match->nentry->device == olddevice) &&
67467+ match->nentry->deleted) {
67468+ if (match->prev == NULL) {
67469+ running_polstate.inodev_set.i_hash[index] = match->next;
67470+ if (match->next != NULL)
67471+ match->next->prev = NULL;
67472+ } else {
67473+ match->prev->next = match->next;
67474+ if (match->next != NULL)
67475+ match->next->prev = match->prev;
67476+ }
67477+ match->prev = NULL;
67478+ match->next = NULL;
67479+ match->nentry->inode = newinode;
67480+ match->nentry->device = newdevice;
67481+ match->nentry->deleted = 0;
67482+
67483+ insert_inodev_entry(match);
67484+ }
67485+
67486+ return;
67487+}
67488+
67489+static void
67490+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
67491+{
67492+ struct acl_subject_label *subj;
67493+ struct acl_role_label *role;
67494+ unsigned int x;
67495+
67496+ FOR_EACH_ROLE_START(role)
67497+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
67498+
67499+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
67500+ if ((subj->inode == ino) && (subj->device == dev)) {
67501+ subj->inode = ino;
67502+ subj->device = dev;
67503+ }
67504+ /* nested subjects aren't in the role's subj_hash table */
67505+ update_acl_obj_label(matchn->inode, matchn->device,
67506+ ino, dev, subj);
67507+ FOR_EACH_NESTED_SUBJECT_END(subj)
67508+ FOR_EACH_SUBJECT_START(role, subj, x)
67509+ update_acl_obj_label(matchn->inode, matchn->device,
67510+ ino, dev, subj);
67511+ FOR_EACH_SUBJECT_END(subj,x)
67512+ FOR_EACH_ROLE_END(role)
67513+
67514+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
67515+
67516+ return;
67517+}
67518+
67519+static void
67520+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
67521+ const struct vfsmount *mnt)
67522+{
67523+ ino_t ino = dentry->d_inode->i_ino;
67524+ dev_t dev = __get_dev(dentry);
67525+
67526+ __do_handle_create(matchn, ino, dev);
67527+
67528+ return;
67529+}
67530+
67531+void
67532+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
67533+{
67534+ struct name_entry *matchn;
67535+
67536+ if (unlikely(!(gr_status & GR_READY)))
67537+ return;
67538+
67539+ preempt_disable();
67540+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
67541+
67542+ if (unlikely((unsigned long)matchn)) {
67543+ write_lock(&gr_inode_lock);
67544+ do_handle_create(matchn, dentry, mnt);
67545+ write_unlock(&gr_inode_lock);
67546+ }
67547+ preempt_enable();
67548+
67549+ return;
67550+}
67551+
67552+void
67553+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
67554+{
67555+ struct name_entry *matchn;
67556+
67557+ if (unlikely(!(gr_status & GR_READY)))
67558+ return;
67559+
67560+ preempt_disable();
67561+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
67562+
67563+ if (unlikely((unsigned long)matchn)) {
67564+ write_lock(&gr_inode_lock);
67565+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
67566+ write_unlock(&gr_inode_lock);
67567+ }
67568+ preempt_enable();
67569+
67570+ return;
67571+}
67572+
67573+void
67574+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67575+ struct dentry *old_dentry,
67576+ struct dentry *new_dentry,
67577+ struct vfsmount *mnt, const __u8 replace)
67578+{
67579+ struct name_entry *matchn;
67580+ struct inodev_entry *inodev;
67581+ struct inode *inode = new_dentry->d_inode;
67582+ ino_t old_ino = old_dentry->d_inode->i_ino;
67583+ dev_t old_dev = __get_dev(old_dentry);
67584+
67585+ /* vfs_rename swaps the name and parent link for old_dentry and
67586+ new_dentry
67587+ at this point, old_dentry has the new name, parent link, and inode
67588+ for the renamed file
67589+ if a file is being replaced by a rename, new_dentry has the inode
67590+ and name for the replaced file
67591+ */
67592+
67593+ if (unlikely(!(gr_status & GR_READY)))
67594+ return;
67595+
67596+ preempt_disable();
67597+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
67598+
67599+ /* we wouldn't have to check d_inode if it weren't for
67600+ NFS silly-renaming
67601+ */
67602+
67603+ write_lock(&gr_inode_lock);
67604+ if (unlikely(replace && inode)) {
67605+ ino_t new_ino = inode->i_ino;
67606+ dev_t new_dev = __get_dev(new_dentry);
67607+
67608+ inodev = lookup_inodev_entry(new_ino, new_dev);
67609+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
67610+ do_handle_delete(inodev, new_ino, new_dev);
67611+ }
67612+
67613+ inodev = lookup_inodev_entry(old_ino, old_dev);
67614+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
67615+ do_handle_delete(inodev, old_ino, old_dev);
67616+
67617+ if (unlikely((unsigned long)matchn))
67618+ do_handle_create(matchn, old_dentry, mnt);
67619+
67620+ write_unlock(&gr_inode_lock);
67621+ preempt_enable();
67622+
67623+ return;
67624+}
67625+
67626+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
67627+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
67628+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
67629+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
67630+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
67631+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
67632+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
67633+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
67634+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
67635+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
67636+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
67637+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
67638+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
67639+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
67640+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
67641+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
67642+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
67643+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
67644+};
67645+
67646+void
67647+gr_learn_resource(const struct task_struct *task,
67648+ const int res, const unsigned long wanted, const int gt)
67649+{
67650+ struct acl_subject_label *acl;
67651+ const struct cred *cred;
67652+
67653+ if (unlikely((gr_status & GR_READY) &&
67654+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
67655+ goto skip_reslog;
67656+
67657+ gr_log_resource(task, res, wanted, gt);
67658+skip_reslog:
67659+
67660+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
67661+ return;
67662+
67663+ acl = task->acl;
67664+
67665+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
67666+ !(acl->resmask & (1U << (unsigned short) res))))
67667+ return;
67668+
67669+ if (wanted >= acl->res[res].rlim_cur) {
67670+ unsigned long res_add;
67671+
67672+ res_add = wanted + res_learn_bumps[res];
67673+
67674+ acl->res[res].rlim_cur = res_add;
67675+
67676+ if (wanted > acl->res[res].rlim_max)
67677+ acl->res[res].rlim_max = res_add;
67678+
67679+ /* only log the subject filename, since resource logging is supported for
67680+ single-subject learning only */
67681+ rcu_read_lock();
67682+ cred = __task_cred(task);
67683+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
67684+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
67685+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
67686+ "", (unsigned long) res, &task->signal->saved_ip);
67687+ rcu_read_unlock();
67688+ }
67689+
67690+ return;
67691+}
67692+EXPORT_SYMBOL_GPL(gr_learn_resource);
67693+#endif
67694+
67695+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
67696+void
67697+pax_set_initial_flags(struct linux_binprm *bprm)
67698+{
67699+ struct task_struct *task = current;
67700+ struct acl_subject_label *proc;
67701+ unsigned long flags;
67702+
67703+ if (unlikely(!(gr_status & GR_READY)))
67704+ return;
67705+
67706+ flags = pax_get_flags(task);
67707+
67708+ proc = task->acl;
67709+
67710+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
67711+ flags &= ~MF_PAX_PAGEEXEC;
67712+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
67713+ flags &= ~MF_PAX_SEGMEXEC;
67714+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
67715+ flags &= ~MF_PAX_RANDMMAP;
67716+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
67717+ flags &= ~MF_PAX_EMUTRAMP;
67718+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
67719+ flags &= ~MF_PAX_MPROTECT;
67720+
67721+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
67722+ flags |= MF_PAX_PAGEEXEC;
67723+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
67724+ flags |= MF_PAX_SEGMEXEC;
67725+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
67726+ flags |= MF_PAX_RANDMMAP;
67727+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
67728+ flags |= MF_PAX_EMUTRAMP;
67729+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
67730+ flags |= MF_PAX_MPROTECT;
67731+
67732+ pax_set_flags(task, flags);
67733+
67734+ return;
67735+}
67736+#endif
67737+
67738+int
67739+gr_handle_proc_ptrace(struct task_struct *task)
67740+{
67741+ struct file *filp;
67742+ struct task_struct *tmp = task;
67743+ struct task_struct *curtemp = current;
67744+ __u32 retmode;
67745+
67746+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
67747+ if (unlikely(!(gr_status & GR_READY)))
67748+ return 0;
67749+#endif
67750+
67751+ read_lock(&tasklist_lock);
67752+ read_lock(&grsec_exec_file_lock);
67753+ filp = task->exec_file;
67754+
67755+ while (task_pid_nr(tmp) > 0) {
67756+ if (tmp == curtemp)
67757+ break;
67758+ tmp = tmp->real_parent;
67759+ }
67760+
67761+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
67762+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
67763+ read_unlock(&grsec_exec_file_lock);
67764+ read_unlock(&tasklist_lock);
67765+ return 1;
67766+ }
67767+
67768+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
67769+ if (!(gr_status & GR_READY)) {
67770+ read_unlock(&grsec_exec_file_lock);
67771+ read_unlock(&tasklist_lock);
67772+ return 0;
67773+ }
67774+#endif
67775+
67776+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
67777+ read_unlock(&grsec_exec_file_lock);
67778+ read_unlock(&tasklist_lock);
67779+
67780+ if (retmode & GR_NOPTRACE)
67781+ return 1;
67782+
67783+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
67784+ && (current->acl != task->acl || (current->acl != current->role->root_label
67785+ && task_pid_nr(current) != task_pid_nr(task))))
67786+ return 1;
67787+
67788+ return 0;
67789+}
67790+
67791+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
67792+{
67793+ if (unlikely(!(gr_status & GR_READY)))
67794+ return;
67795+
67796+ if (!(current->role->roletype & GR_ROLE_GOD))
67797+ return;
67798+
67799+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
67800+ p->role->rolename, gr_task_roletype_to_char(p),
67801+ p->acl->filename);
67802+}
67803+
67804+int
67805+gr_handle_ptrace(struct task_struct *task, const long request)
67806+{
67807+ struct task_struct *tmp = task;
67808+ struct task_struct *curtemp = current;
67809+ __u32 retmode;
67810+
67811+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
67812+ if (unlikely(!(gr_status & GR_READY)))
67813+ return 0;
67814+#endif
67815+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67816+ read_lock(&tasklist_lock);
67817+ while (task_pid_nr(tmp) > 0) {
67818+ if (tmp == curtemp)
67819+ break;
67820+ tmp = tmp->real_parent;
67821+ }
67822+
67823+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
67824+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
67825+ read_unlock(&tasklist_lock);
67826+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
67827+ return 1;
67828+ }
67829+ read_unlock(&tasklist_lock);
67830+ }
67831+
67832+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
67833+ if (!(gr_status & GR_READY))
67834+ return 0;
67835+#endif
67836+
67837+ read_lock(&grsec_exec_file_lock);
67838+ if (unlikely(!task->exec_file)) {
67839+ read_unlock(&grsec_exec_file_lock);
67840+ return 0;
67841+ }
67842+
67843+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
67844+ read_unlock(&grsec_exec_file_lock);
67845+
67846+ if (retmode & GR_NOPTRACE) {
67847+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
67848+ return 1;
67849+ }
67850+
67851+ if (retmode & GR_PTRACERD) {
67852+ switch (request) {
67853+ case PTRACE_SEIZE:
67854+ case PTRACE_POKETEXT:
67855+ case PTRACE_POKEDATA:
67856+ case PTRACE_POKEUSR:
67857+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
67858+ case PTRACE_SETREGS:
67859+ case PTRACE_SETFPREGS:
67860+#endif
67861+#ifdef CONFIG_X86
67862+ case PTRACE_SETFPXREGS:
67863+#endif
67864+#ifdef CONFIG_ALTIVEC
67865+ case PTRACE_SETVRREGS:
67866+#endif
67867+ return 1;
67868+ default:
67869+ return 0;
67870+ }
67871+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
67872+ !(current->role->roletype & GR_ROLE_GOD) &&
67873+ (current->acl != task->acl)) {
67874+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
67875+ return 1;
67876+ }
67877+
67878+ return 0;
67879+}
67880+
67881+static int is_writable_mmap(const struct file *filp)
67882+{
67883+ struct task_struct *task = current;
67884+ struct acl_object_label *obj, *obj2;
67885+
67886+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
67887+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
67888+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
67889+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
67890+ task->role->root_label);
67891+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
67892+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
67893+ return 1;
67894+ }
67895+ }
67896+ return 0;
67897+}
67898+
67899+int
67900+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
67901+{
67902+ __u32 mode;
67903+
67904+ if (unlikely(!file || !(prot & PROT_EXEC)))
67905+ return 1;
67906+
67907+ if (is_writable_mmap(file))
67908+ return 0;
67909+
67910+ mode =
67911+ gr_search_file(file->f_path.dentry,
67912+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
67913+ file->f_path.mnt);
67914+
67915+ if (!gr_tpe_allow(file))
67916+ return 0;
67917+
67918+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
67919+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
67920+ return 0;
67921+ } else if (unlikely(!(mode & GR_EXEC))) {
67922+ return 0;
67923+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
67924+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
67925+ return 1;
67926+ }
67927+
67928+ return 1;
67929+}
67930+
67931+int
67932+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
67933+{
67934+ __u32 mode;
67935+
67936+ if (unlikely(!file || !(prot & PROT_EXEC)))
67937+ return 1;
67938+
67939+ if (is_writable_mmap(file))
67940+ return 0;
67941+
67942+ mode =
67943+ gr_search_file(file->f_path.dentry,
67944+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
67945+ file->f_path.mnt);
67946+
67947+ if (!gr_tpe_allow(file))
67948+ return 0;
67949+
67950+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
67951+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
67952+ return 0;
67953+ } else if (unlikely(!(mode & GR_EXEC))) {
67954+ return 0;
67955+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
67956+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
67957+ return 1;
67958+ }
67959+
67960+ return 1;
67961+}
67962+
67963+void
67964+gr_acl_handle_psacct(struct task_struct *task, const long code)
67965+{
67966+ unsigned long runtime, cputime;
67967+ cputime_t utime, stime;
67968+ unsigned int wday, cday;
67969+ __u8 whr, chr;
67970+ __u8 wmin, cmin;
67971+ __u8 wsec, csec;
67972+ struct timespec timeval;
67973+
67974+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
67975+ !(task->acl->mode & GR_PROCACCT)))
67976+ return;
67977+
67978+ do_posix_clock_monotonic_gettime(&timeval);
67979+ runtime = timeval.tv_sec - task->start_time.tv_sec;
67980+ wday = runtime / (60 * 60 * 24);
67981+ runtime -= wday * (60 * 60 * 24);
67982+ whr = runtime / (60 * 60);
67983+ runtime -= whr * (60 * 60);
67984+ wmin = runtime / 60;
67985+ runtime -= wmin * 60;
67986+ wsec = runtime;
67987+
67988+ task_cputime(task, &utime, &stime);
67989+ cputime = cputime_to_secs(utime + stime);
67990+ cday = cputime / (60 * 60 * 24);
67991+ cputime -= cday * (60 * 60 * 24);
67992+ chr = cputime / (60 * 60);
67993+ cputime -= chr * (60 * 60);
67994+ cmin = cputime / 60;
67995+ cputime -= cmin * 60;
67996+ csec = cputime;
67997+
67998+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
67999+
68000+ return;
68001+}
68002+
68003+#ifdef CONFIG_TASKSTATS
68004+int gr_is_taskstats_denied(int pid)
68005+{
68006+ struct task_struct *task;
68007+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68008+ const struct cred *cred;
68009+#endif
68010+ int ret = 0;
68011+
68012+ /* restrict taskstats viewing to un-chrooted root users
68013+ who have the 'view' subject flag if the RBAC system is enabled
68014+ */
68015+
68016+ rcu_read_lock();
68017+ read_lock(&tasklist_lock);
68018+ task = find_task_by_vpid(pid);
68019+ if (task) {
68020+#ifdef CONFIG_GRKERNSEC_CHROOT
68021+ if (proc_is_chrooted(task))
68022+ ret = -EACCES;
68023+#endif
68024+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68025+ cred = __task_cred(task);
68026+#ifdef CONFIG_GRKERNSEC_PROC_USER
68027+ if (gr_is_global_nonroot(cred->uid))
68028+ ret = -EACCES;
68029+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68030+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
68031+ ret = -EACCES;
68032+#endif
68033+#endif
68034+ if (gr_status & GR_READY) {
68035+ if (!(task->acl->mode & GR_VIEW))
68036+ ret = -EACCES;
68037+ }
68038+ } else
68039+ ret = -ENOENT;
68040+
68041+ read_unlock(&tasklist_lock);
68042+ rcu_read_unlock();
68043+
68044+ return ret;
68045+}
68046+#endif
68047+
68048+/* AUXV entries are filled via a descendant of search_binary_handler
68049+ after we've already applied the subject for the target
68050+*/
68051+int gr_acl_enable_at_secure(void)
68052+{
68053+ if (unlikely(!(gr_status & GR_READY)))
68054+ return 0;
68055+
68056+ if (current->acl->mode & GR_ATSECURE)
68057+ return 1;
68058+
68059+ return 0;
68060+}
68061+
68062+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
68063+{
68064+ struct task_struct *task = current;
68065+ struct dentry *dentry = file->f_path.dentry;
68066+ struct vfsmount *mnt = file->f_path.mnt;
68067+ struct acl_object_label *obj, *tmp;
68068+ struct acl_subject_label *subj;
68069+ unsigned int bufsize;
68070+ int is_not_root;
68071+ char *path;
68072+ dev_t dev = __get_dev(dentry);
68073+
68074+ if (unlikely(!(gr_status & GR_READY)))
68075+ return 1;
68076+
68077+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
68078+ return 1;
68079+
68080+ /* ignore Eric Biederman */
68081+ if (IS_PRIVATE(dentry->d_inode))
68082+ return 1;
68083+
68084+ subj = task->acl;
68085+ read_lock(&gr_inode_lock);
68086+ do {
68087+ obj = lookup_acl_obj_label(ino, dev, subj);
68088+ if (obj != NULL) {
68089+ read_unlock(&gr_inode_lock);
68090+ return (obj->mode & GR_FIND) ? 1 : 0;
68091+ }
68092+ } while ((subj = subj->parent_subject));
68093+ read_unlock(&gr_inode_lock);
68094+
68095+ /* this is purely an optimization since we're looking for an object
68096+ for the directory we're doing a readdir on
68097+ if it's possible for any globbed object to match the entry we're
68098+ filling into the directory, then the object we find here will be
68099+ an anchor point with attached globbed objects
68100+ */
68101+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
68102+ if (obj->globbed == NULL)
68103+ return (obj->mode & GR_FIND) ? 1 : 0;
68104+
68105+ is_not_root = ((obj->filename[0] == '/') &&
68106+ (obj->filename[1] == '\0')) ? 0 : 1;
68107+ bufsize = PAGE_SIZE - namelen - is_not_root;
68108+
68109+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
68110+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
68111+ return 1;
68112+
68113+ preempt_disable();
68114+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
68115+ bufsize);
68116+
68117+ bufsize = strlen(path);
68118+
68119+ /* if base is "/", don't append an additional slash */
68120+ if (is_not_root)
68121+ *(path + bufsize) = '/';
68122+ memcpy(path + bufsize + is_not_root, name, namelen);
68123+ *(path + bufsize + namelen + is_not_root) = '\0';
68124+
68125+ tmp = obj->globbed;
68126+ while (tmp) {
68127+ if (!glob_match(tmp->filename, path)) {
68128+ preempt_enable();
68129+ return (tmp->mode & GR_FIND) ? 1 : 0;
68130+ }
68131+ tmp = tmp->next;
68132+ }
68133+ preempt_enable();
68134+ return (obj->mode & GR_FIND) ? 1 : 0;
68135+}
68136+
68137+void gr_put_exec_file(struct task_struct *task)
68138+{
68139+ struct file *filp;
68140+
68141+ write_lock(&grsec_exec_file_lock);
68142+ filp = task->exec_file;
68143+ task->exec_file = NULL;
68144+ write_unlock(&grsec_exec_file_lock);
68145+
68146+ if (filp)
68147+ fput(filp);
68148+
68149+ return;
68150+}
68151+
68152+
68153+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
68154+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
68155+#endif
68156+#ifdef CONFIG_SECURITY
68157+EXPORT_SYMBOL_GPL(gr_check_user_change);
68158+EXPORT_SYMBOL_GPL(gr_check_group_change);
68159+#endif
68160+
68161diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
68162new file mode 100644
68163index 0000000..18ffbbd
68164--- /dev/null
68165+++ b/grsecurity/gracl_alloc.c
68166@@ -0,0 +1,105 @@
68167+#include <linux/kernel.h>
68168+#include <linux/mm.h>
68169+#include <linux/slab.h>
68170+#include <linux/vmalloc.h>
68171+#include <linux/gracl.h>
68172+#include <linux/grsecurity.h>
68173+
68174+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
68175+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
68176+
68177+static __inline__ int
68178+alloc_pop(void)
68179+{
68180+ if (current_alloc_state->alloc_stack_next == 1)
68181+ return 0;
68182+
68183+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
68184+
68185+ current_alloc_state->alloc_stack_next--;
68186+
68187+ return 1;
68188+}
68189+
68190+static __inline__ int
68191+alloc_push(void *buf)
68192+{
68193+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
68194+ return 1;
68195+
68196+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
68197+
68198+ current_alloc_state->alloc_stack_next++;
68199+
68200+ return 0;
68201+}
68202+
68203+void *
68204+acl_alloc(unsigned long len)
68205+{
68206+ void *ret = NULL;
68207+
68208+ if (!len || len > PAGE_SIZE)
68209+ goto out;
68210+
68211+ ret = kmalloc(len, GFP_KERNEL);
68212+
68213+ if (ret) {
68214+ if (alloc_push(ret)) {
68215+ kfree(ret);
68216+ ret = NULL;
68217+ }
68218+ }
68219+
68220+out:
68221+ return ret;
68222+}
68223+
68224+void *
68225+acl_alloc_num(unsigned long num, unsigned long len)
68226+{
68227+ if (!len || (num > (PAGE_SIZE / len)))
68228+ return NULL;
68229+
68230+ return acl_alloc(num * len);
68231+}
68232+
68233+void
68234+acl_free_all(void)
68235+{
68236+ if (!current_alloc_state->alloc_stack)
68237+ return;
68238+
68239+ while (alloc_pop()) ;
68240+
68241+ if (current_alloc_state->alloc_stack) {
68242+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
68243+ kfree(current_alloc_state->alloc_stack);
68244+ else
68245+ vfree(current_alloc_state->alloc_stack);
68246+ }
68247+
68248+ current_alloc_state->alloc_stack = NULL;
68249+ current_alloc_state->alloc_stack_size = 1;
68250+ current_alloc_state->alloc_stack_next = 1;
68251+
68252+ return;
68253+}
68254+
68255+int
68256+acl_alloc_stack_init(unsigned long size)
68257+{
68258+ if ((size * sizeof (void *)) <= PAGE_SIZE)
68259+ current_alloc_state->alloc_stack =
68260+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
68261+ else
68262+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
68263+
68264+ current_alloc_state->alloc_stack_size = size;
68265+ current_alloc_state->alloc_stack_next = 1;
68266+
68267+ if (!current_alloc_state->alloc_stack)
68268+ return 0;
68269+ else
68270+ return 1;
68271+}
68272diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
68273new file mode 100644
68274index 0000000..bdd51ea
68275--- /dev/null
68276+++ b/grsecurity/gracl_cap.c
68277@@ -0,0 +1,110 @@
68278+#include <linux/kernel.h>
68279+#include <linux/module.h>
68280+#include <linux/sched.h>
68281+#include <linux/gracl.h>
68282+#include <linux/grsecurity.h>
68283+#include <linux/grinternal.h>
68284+
68285+extern const char *captab_log[];
68286+extern int captab_log_entries;
68287+
68288+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
68289+{
68290+ struct acl_subject_label *curracl;
68291+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
68292+ kernel_cap_t cap_audit = __cap_empty_set;
68293+
68294+ if (!gr_acl_is_enabled())
68295+ return 1;
68296+
68297+ curracl = task->acl;
68298+
68299+ cap_drop = curracl->cap_lower;
68300+ cap_mask = curracl->cap_mask;
68301+ cap_audit = curracl->cap_invert_audit;
68302+
68303+ while ((curracl = curracl->parent_subject)) {
68304+ /* if the cap isn't specified in the current computed mask but is specified in the
68305+ current level subject, and is lowered in the current level subject, then add
68306+ it to the set of dropped capabilities
68307+ otherwise, add the current level subject's mask to the current computed mask
68308+ */
68309+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
68310+ cap_raise(cap_mask, cap);
68311+ if (cap_raised(curracl->cap_lower, cap))
68312+ cap_raise(cap_drop, cap);
68313+ if (cap_raised(curracl->cap_invert_audit, cap))
68314+ cap_raise(cap_audit, cap);
68315+ }
68316+ }
68317+
68318+ if (!cap_raised(cap_drop, cap)) {
68319+ if (cap_raised(cap_audit, cap))
68320+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
68321+ return 1;
68322+ }
68323+
68324+ curracl = task->acl;
68325+
68326+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
68327+ && cap_raised(cred->cap_effective, cap)) {
68328+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
68329+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
68330+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
68331+ gr_to_filename(task->exec_file->f_path.dentry,
68332+ task->exec_file->f_path.mnt) : curracl->filename,
68333+ curracl->filename, 0UL,
68334+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
68335+ return 1;
68336+ }
68337+
68338+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
68339+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
68340+
68341+ return 0;
68342+}
68343+
68344+int
68345+gr_acl_is_capable(const int cap)
68346+{
68347+ return gr_task_acl_is_capable(current, current_cred(), cap);
68348+}
68349+
68350+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
68351+{
68352+ struct acl_subject_label *curracl;
68353+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
68354+
68355+ if (!gr_acl_is_enabled())
68356+ return 1;
68357+
68358+ curracl = task->acl;
68359+
68360+ cap_drop = curracl->cap_lower;
68361+ cap_mask = curracl->cap_mask;
68362+
68363+ while ((curracl = curracl->parent_subject)) {
68364+ /* if the cap isn't specified in the current computed mask but is specified in the
68365+ current level subject, and is lowered in the current level subject, then add
68366+ it to the set of dropped capabilities
68367+ otherwise, add the current level subject's mask to the current computed mask
68368+ */
68369+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
68370+ cap_raise(cap_mask, cap);
68371+ if (cap_raised(curracl->cap_lower, cap))
68372+ cap_raise(cap_drop, cap);
68373+ }
68374+ }
68375+
68376+ if (!cap_raised(cap_drop, cap))
68377+ return 1;
68378+
68379+ return 0;
68380+}
68381+
68382+int
68383+gr_acl_is_capable_nolog(const int cap)
68384+{
68385+ return gr_task_acl_is_capable_nolog(current, cap);
68386+}
68387+
68388diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
68389new file mode 100644
68390index 0000000..ca25605
68391--- /dev/null
68392+++ b/grsecurity/gracl_compat.c
68393@@ -0,0 +1,270 @@
68394+#include <linux/kernel.h>
68395+#include <linux/gracl.h>
68396+#include <linux/compat.h>
68397+#include <linux/gracl_compat.h>
68398+
68399+#include <asm/uaccess.h>
68400+
68401+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
68402+{
68403+ struct gr_arg_wrapper_compat uwrapcompat;
68404+
68405+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
68406+ return -EFAULT;
68407+
68408+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
68409+ (uwrapcompat.version != 0x2901)) ||
68410+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
68411+ return -EINVAL;
68412+
68413+ uwrap->arg = compat_ptr(uwrapcompat.arg);
68414+ uwrap->version = uwrapcompat.version;
68415+ uwrap->size = sizeof(struct gr_arg);
68416+
68417+ return 0;
68418+}
68419+
68420+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
68421+{
68422+ struct gr_arg_compat argcompat;
68423+
68424+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
68425+ return -EFAULT;
68426+
68427+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
68428+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
68429+ arg->role_db.num_roles = argcompat.role_db.num_roles;
68430+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
68431+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
68432+ arg->role_db.num_objects = argcompat.role_db.num_objects;
68433+
68434+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
68435+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
68436+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
68437+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
68438+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
68439+ arg->segv_device = argcompat.segv_device;
68440+ arg->segv_inode = argcompat.segv_inode;
68441+ arg->segv_uid = argcompat.segv_uid;
68442+ arg->num_sprole_pws = argcompat.num_sprole_pws;
68443+ arg->mode = argcompat.mode;
68444+
68445+ return 0;
68446+}
68447+
68448+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
68449+{
68450+ struct acl_object_label_compat objcompat;
68451+
68452+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
68453+ return -EFAULT;
68454+
68455+ obj->filename = compat_ptr(objcompat.filename);
68456+ obj->inode = objcompat.inode;
68457+ obj->device = objcompat.device;
68458+ obj->mode = objcompat.mode;
68459+
68460+ obj->nested = compat_ptr(objcompat.nested);
68461+ obj->globbed = compat_ptr(objcompat.globbed);
68462+
68463+ obj->prev = compat_ptr(objcompat.prev);
68464+ obj->next = compat_ptr(objcompat.next);
68465+
68466+ return 0;
68467+}
68468+
68469+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
68470+{
68471+ unsigned int i;
68472+ struct acl_subject_label_compat subjcompat;
68473+
68474+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
68475+ return -EFAULT;
68476+
68477+ subj->filename = compat_ptr(subjcompat.filename);
68478+ subj->inode = subjcompat.inode;
68479+ subj->device = subjcompat.device;
68480+ subj->mode = subjcompat.mode;
68481+ subj->cap_mask = subjcompat.cap_mask;
68482+ subj->cap_lower = subjcompat.cap_lower;
68483+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
68484+
68485+ for (i = 0; i < GR_NLIMITS; i++) {
68486+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
68487+ subj->res[i].rlim_cur = RLIM_INFINITY;
68488+ else
68489+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
68490+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
68491+ subj->res[i].rlim_max = RLIM_INFINITY;
68492+ else
68493+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
68494+ }
68495+ subj->resmask = subjcompat.resmask;
68496+
68497+ subj->user_trans_type = subjcompat.user_trans_type;
68498+ subj->group_trans_type = subjcompat.group_trans_type;
68499+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
68500+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
68501+ subj->user_trans_num = subjcompat.user_trans_num;
68502+ subj->group_trans_num = subjcompat.group_trans_num;
68503+
68504+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
68505+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
68506+ subj->ip_type = subjcompat.ip_type;
68507+ subj->ips = compat_ptr(subjcompat.ips);
68508+ subj->ip_num = subjcompat.ip_num;
68509+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
68510+
68511+ subj->crashes = subjcompat.crashes;
68512+ subj->expires = subjcompat.expires;
68513+
68514+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
68515+ subj->hash = compat_ptr(subjcompat.hash);
68516+ subj->prev = compat_ptr(subjcompat.prev);
68517+ subj->next = compat_ptr(subjcompat.next);
68518+
68519+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
68520+ subj->obj_hash_size = subjcompat.obj_hash_size;
68521+ subj->pax_flags = subjcompat.pax_flags;
68522+
68523+ return 0;
68524+}
68525+
68526+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
68527+{
68528+ struct acl_role_label_compat rolecompat;
68529+
68530+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
68531+ return -EFAULT;
68532+
68533+ role->rolename = compat_ptr(rolecompat.rolename);
68534+ role->uidgid = rolecompat.uidgid;
68535+ role->roletype = rolecompat.roletype;
68536+
68537+ role->auth_attempts = rolecompat.auth_attempts;
68538+ role->expires = rolecompat.expires;
68539+
68540+ role->root_label = compat_ptr(rolecompat.root_label);
68541+ role->hash = compat_ptr(rolecompat.hash);
68542+
68543+ role->prev = compat_ptr(rolecompat.prev);
68544+ role->next = compat_ptr(rolecompat.next);
68545+
68546+ role->transitions = compat_ptr(rolecompat.transitions);
68547+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
68548+ role->domain_children = compat_ptr(rolecompat.domain_children);
68549+ role->domain_child_num = rolecompat.domain_child_num;
68550+
68551+ role->umask = rolecompat.umask;
68552+
68553+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
68554+ role->subj_hash_size = rolecompat.subj_hash_size;
68555+
68556+ return 0;
68557+}
68558+
68559+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
68560+{
68561+ struct role_allowed_ip_compat roleip_compat;
68562+
68563+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
68564+ return -EFAULT;
68565+
68566+ roleip->addr = roleip_compat.addr;
68567+ roleip->netmask = roleip_compat.netmask;
68568+
68569+ roleip->prev = compat_ptr(roleip_compat.prev);
68570+ roleip->next = compat_ptr(roleip_compat.next);
68571+
68572+ return 0;
68573+}
68574+
68575+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
68576+{
68577+ struct role_transition_compat trans_compat;
68578+
68579+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
68580+ return -EFAULT;
68581+
68582+ trans->rolename = compat_ptr(trans_compat.rolename);
68583+
68584+ trans->prev = compat_ptr(trans_compat.prev);
68585+ trans->next = compat_ptr(trans_compat.next);
68586+
68587+ return 0;
68588+
68589+}
68590+
68591+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
68592+{
68593+ struct gr_hash_struct_compat hash_compat;
68594+
68595+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
68596+ return -EFAULT;
68597+
68598+ hash->table = compat_ptr(hash_compat.table);
68599+ hash->nametable = compat_ptr(hash_compat.nametable);
68600+ hash->first = compat_ptr(hash_compat.first);
68601+
68602+ hash->table_size = hash_compat.table_size;
68603+ hash->used_size = hash_compat.used_size;
68604+
68605+ hash->type = hash_compat.type;
68606+
68607+ return 0;
68608+}
68609+
68610+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
68611+{
68612+ compat_uptr_t ptrcompat;
68613+
68614+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
68615+ return -EFAULT;
68616+
68617+ *(void **)ptr = compat_ptr(ptrcompat);
68618+
68619+ return 0;
68620+}
68621+
68622+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
68623+{
68624+ struct acl_ip_label_compat ip_compat;
68625+
68626+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
68627+ return -EFAULT;
68628+
68629+ ip->iface = compat_ptr(ip_compat.iface);
68630+ ip->addr = ip_compat.addr;
68631+ ip->netmask = ip_compat.netmask;
68632+ ip->low = ip_compat.low;
68633+ ip->high = ip_compat.high;
68634+ ip->mode = ip_compat.mode;
68635+ ip->type = ip_compat.type;
68636+
68637+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
68638+
68639+ ip->prev = compat_ptr(ip_compat.prev);
68640+ ip->next = compat_ptr(ip_compat.next);
68641+
68642+ return 0;
68643+}
68644+
68645+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
68646+{
68647+ struct sprole_pw_compat pw_compat;
68648+
68649+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
68650+ return -EFAULT;
68651+
68652+ pw->rolename = compat_ptr(pw_compat.rolename);
68653+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
68654+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
68655+
68656+ return 0;
68657+}
68658+
68659+size_t get_gr_arg_wrapper_size_compat(void)
68660+{
68661+ return sizeof(struct gr_arg_wrapper_compat);
68662+}
68663+
68664diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
68665new file mode 100644
68666index 0000000..a89b1f4
68667--- /dev/null
68668+++ b/grsecurity/gracl_fs.c
68669@@ -0,0 +1,437 @@
68670+#include <linux/kernel.h>
68671+#include <linux/sched.h>
68672+#include <linux/types.h>
68673+#include <linux/fs.h>
68674+#include <linux/file.h>
68675+#include <linux/stat.h>
68676+#include <linux/grsecurity.h>
68677+#include <linux/grinternal.h>
68678+#include <linux/gracl.h>
68679+
68680+umode_t
68681+gr_acl_umask(void)
68682+{
68683+ if (unlikely(!gr_acl_is_enabled()))
68684+ return 0;
68685+
68686+ return current->role->umask;
68687+}
68688+
68689+__u32
68690+gr_acl_handle_hidden_file(const struct dentry * dentry,
68691+ const struct vfsmount * mnt)
68692+{
68693+ __u32 mode;
68694+
68695+ if (unlikely(d_is_negative(dentry)))
68696+ return GR_FIND;
68697+
68698+ mode =
68699+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
68700+
68701+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
68702+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
68703+ return mode;
68704+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
68705+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
68706+ return 0;
68707+ } else if (unlikely(!(mode & GR_FIND)))
68708+ return 0;
68709+
68710+ return GR_FIND;
68711+}
68712+
68713+__u32
68714+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
68715+ int acc_mode)
68716+{
68717+ __u32 reqmode = GR_FIND;
68718+ __u32 mode;
68719+
68720+ if (unlikely(d_is_negative(dentry)))
68721+ return reqmode;
68722+
68723+ if (acc_mode & MAY_APPEND)
68724+ reqmode |= GR_APPEND;
68725+ else if (acc_mode & MAY_WRITE)
68726+ reqmode |= GR_WRITE;
68727+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
68728+ reqmode |= GR_READ;
68729+
68730+ mode =
68731+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
68732+ mnt);
68733+
68734+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
68735+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
68736+ reqmode & GR_READ ? " reading" : "",
68737+ reqmode & GR_WRITE ? " writing" : reqmode &
68738+ GR_APPEND ? " appending" : "");
68739+ return reqmode;
68740+ } else
68741+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
68742+ {
68743+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
68744+ reqmode & GR_READ ? " reading" : "",
68745+ reqmode & GR_WRITE ? " writing" : reqmode &
68746+ GR_APPEND ? " appending" : "");
68747+ return 0;
68748+ } else if (unlikely((mode & reqmode) != reqmode))
68749+ return 0;
68750+
68751+ return reqmode;
68752+}
68753+
68754+__u32
68755+gr_acl_handle_creat(const struct dentry * dentry,
68756+ const struct dentry * p_dentry,
68757+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
68758+ const int imode)
68759+{
68760+ __u32 reqmode = GR_WRITE | GR_CREATE;
68761+ __u32 mode;
68762+
68763+ if (acc_mode & MAY_APPEND)
68764+ reqmode |= GR_APPEND;
68765+ // if a directory was required or the directory already exists, then
68766+ // don't count this open as a read
68767+ if ((acc_mode & MAY_READ) &&
68768+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
68769+ reqmode |= GR_READ;
68770+ if ((open_flags & O_CREAT) &&
68771+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
68772+ reqmode |= GR_SETID;
68773+
68774+ mode =
68775+ gr_check_create(dentry, p_dentry, p_mnt,
68776+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
68777+
68778+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
68779+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
68780+ reqmode & GR_READ ? " reading" : "",
68781+ reqmode & GR_WRITE ? " writing" : reqmode &
68782+ GR_APPEND ? " appending" : "");
68783+ return reqmode;
68784+ } else
68785+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
68786+ {
68787+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
68788+ reqmode & GR_READ ? " reading" : "",
68789+ reqmode & GR_WRITE ? " writing" : reqmode &
68790+ GR_APPEND ? " appending" : "");
68791+ return 0;
68792+ } else if (unlikely((mode & reqmode) != reqmode))
68793+ return 0;
68794+
68795+ return reqmode;
68796+}
68797+
68798+__u32
68799+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
68800+ const int fmode)
68801+{
68802+ __u32 mode, reqmode = GR_FIND;
68803+
68804+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
68805+ reqmode |= GR_EXEC;
68806+ if (fmode & S_IWOTH)
68807+ reqmode |= GR_WRITE;
68808+ if (fmode & S_IROTH)
68809+ reqmode |= GR_READ;
68810+
68811+ mode =
68812+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
68813+ mnt);
68814+
68815+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
68816+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
68817+ reqmode & GR_READ ? " reading" : "",
68818+ reqmode & GR_WRITE ? " writing" : "",
68819+ reqmode & GR_EXEC ? " executing" : "");
68820+ return reqmode;
68821+ } else
68822+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
68823+ {
68824+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
68825+ reqmode & GR_READ ? " reading" : "",
68826+ reqmode & GR_WRITE ? " writing" : "",
68827+ reqmode & GR_EXEC ? " executing" : "");
68828+ return 0;
68829+ } else if (unlikely((mode & reqmode) != reqmode))
68830+ return 0;
68831+
68832+ return reqmode;
68833+}
68834+
68835+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
68836+{
68837+ __u32 mode;
68838+
68839+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
68840+
68841+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
68842+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
68843+ return mode;
68844+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
68845+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
68846+ return 0;
68847+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
68848+ return 0;
68849+
68850+ return (reqmode);
68851+}
68852+
68853+__u32
68854+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
68855+{
68856+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
68857+}
68858+
68859+__u32
68860+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
68861+{
68862+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
68863+}
68864+
68865+__u32
68866+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
68867+{
68868+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
68869+}
68870+
68871+__u32
68872+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
68873+{
68874+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
68875+}
68876+
68877+__u32
68878+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
68879+ umode_t *modeptr)
68880+{
68881+ umode_t mode;
68882+
68883+ *modeptr &= ~gr_acl_umask();
68884+ mode = *modeptr;
68885+
68886+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
68887+ return 1;
68888+
68889+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
68890+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
68891+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
68892+ GR_CHMOD_ACL_MSG);
68893+ } else {
68894+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
68895+ }
68896+}
68897+
68898+__u32
68899+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
68900+{
68901+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
68902+}
68903+
68904+__u32
68905+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
68906+{
68907+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
68908+}
68909+
68910+__u32
68911+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
68912+{
68913+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
68914+}
68915+
68916+__u32
68917+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
68918+{
68919+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
68920+}
68921+
68922+__u32
68923+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
68924+{
68925+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
68926+ GR_UNIXCONNECT_ACL_MSG);
68927+}
68928+
68929+/* hardlinks require at minimum create and link permission,
68930+ any additional privilege required is based on the
68931+ privilege of the file being linked to
68932+*/
68933+__u32
68934+gr_acl_handle_link(const struct dentry * new_dentry,
68935+ const struct dentry * parent_dentry,
68936+ const struct vfsmount * parent_mnt,
68937+ const struct dentry * old_dentry,
68938+ const struct vfsmount * old_mnt, const struct filename *to)
68939+{
68940+ __u32 mode;
68941+ __u32 needmode = GR_CREATE | GR_LINK;
68942+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
68943+
68944+ mode =
68945+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
68946+ old_mnt);
68947+
68948+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
68949+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
68950+ return mode;
68951+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
68952+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
68953+ return 0;
68954+ } else if (unlikely((mode & needmode) != needmode))
68955+ return 0;
68956+
68957+ return 1;
68958+}
68959+
68960+__u32
68961+gr_acl_handle_symlink(const struct dentry * new_dentry,
68962+ const struct dentry * parent_dentry,
68963+ const struct vfsmount * parent_mnt, const struct filename *from)
68964+{
68965+ __u32 needmode = GR_WRITE | GR_CREATE;
68966+ __u32 mode;
68967+
68968+ mode =
68969+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
68970+ GR_CREATE | GR_AUDIT_CREATE |
68971+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
68972+
68973+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
68974+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
68975+ return mode;
68976+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
68977+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
68978+ return 0;
68979+ } else if (unlikely((mode & needmode) != needmode))
68980+ return 0;
68981+
68982+ return (GR_WRITE | GR_CREATE);
68983+}
68984+
68985+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
68986+{
68987+ __u32 mode;
68988+
68989+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
68990+
68991+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
68992+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
68993+ return mode;
68994+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
68995+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
68996+ return 0;
68997+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
68998+ return 0;
68999+
69000+ return (reqmode);
69001+}
69002+
69003+__u32
69004+gr_acl_handle_mknod(const struct dentry * new_dentry,
69005+ const struct dentry * parent_dentry,
69006+ const struct vfsmount * parent_mnt,
69007+ const int mode)
69008+{
69009+ __u32 reqmode = GR_WRITE | GR_CREATE;
69010+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
69011+ reqmode |= GR_SETID;
69012+
69013+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
69014+ reqmode, GR_MKNOD_ACL_MSG);
69015+}
69016+
69017+__u32
69018+gr_acl_handle_mkdir(const struct dentry *new_dentry,
69019+ const struct dentry *parent_dentry,
69020+ const struct vfsmount *parent_mnt)
69021+{
69022+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
69023+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
69024+}
69025+
69026+#define RENAME_CHECK_SUCCESS(old, new) \
69027+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
69028+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
69029+
69030+int
69031+gr_acl_handle_rename(struct dentry *new_dentry,
69032+ struct dentry *parent_dentry,
69033+ const struct vfsmount *parent_mnt,
69034+ struct dentry *old_dentry,
69035+ struct inode *old_parent_inode,
69036+ struct vfsmount *old_mnt, const struct filename *newname)
69037+{
69038+ __u32 comp1, comp2;
69039+ int error = 0;
69040+
69041+ if (unlikely(!gr_acl_is_enabled()))
69042+ return 0;
69043+
69044+ if (d_is_negative(new_dentry)) {
69045+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
69046+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
69047+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
69048+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
69049+ GR_DELETE | GR_AUDIT_DELETE |
69050+ GR_AUDIT_READ | GR_AUDIT_WRITE |
69051+ GR_SUPPRESS, old_mnt);
69052+ } else {
69053+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
69054+ GR_CREATE | GR_DELETE |
69055+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
69056+ GR_AUDIT_READ | GR_AUDIT_WRITE |
69057+ GR_SUPPRESS, parent_mnt);
69058+ comp2 =
69059+ gr_search_file(old_dentry,
69060+ GR_READ | GR_WRITE | GR_AUDIT_READ |
69061+ GR_DELETE | GR_AUDIT_DELETE |
69062+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
69063+ }
69064+
69065+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
69066+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
69067+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
69068+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
69069+ && !(comp2 & GR_SUPPRESS)) {
69070+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
69071+ error = -EACCES;
69072+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
69073+ error = -EACCES;
69074+
69075+ return error;
69076+}
69077+
69078+void
69079+gr_acl_handle_exit(void)
69080+{
69081+ u16 id;
69082+ char *rolename;
69083+
69084+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
69085+ !(current->role->roletype & GR_ROLE_PERSIST))) {
69086+ id = current->acl_role_id;
69087+ rolename = current->role->rolename;
69088+ gr_set_acls(1);
69089+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
69090+ }
69091+
69092+ gr_put_exec_file(current);
69093+ return;
69094+}
69095+
69096+int
69097+gr_acl_handle_procpidmem(const struct task_struct *task)
69098+{
69099+ if (unlikely(!gr_acl_is_enabled()))
69100+ return 0;
69101+
69102+ if (task != current && task->acl->mode & GR_PROTPROCFD)
69103+ return -EACCES;
69104+
69105+ return 0;
69106+}
69107diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
69108new file mode 100644
69109index 0000000..f056b81
69110--- /dev/null
69111+++ b/grsecurity/gracl_ip.c
69112@@ -0,0 +1,386 @@
69113+#include <linux/kernel.h>
69114+#include <asm/uaccess.h>
69115+#include <asm/errno.h>
69116+#include <net/sock.h>
69117+#include <linux/file.h>
69118+#include <linux/fs.h>
69119+#include <linux/net.h>
69120+#include <linux/in.h>
69121+#include <linux/skbuff.h>
69122+#include <linux/ip.h>
69123+#include <linux/udp.h>
69124+#include <linux/types.h>
69125+#include <linux/sched.h>
69126+#include <linux/netdevice.h>
69127+#include <linux/inetdevice.h>
69128+#include <linux/gracl.h>
69129+#include <linux/grsecurity.h>
69130+#include <linux/grinternal.h>
69131+
69132+#define GR_BIND 0x01
69133+#define GR_CONNECT 0x02
69134+#define GR_INVERT 0x04
69135+#define GR_BINDOVERRIDE 0x08
69136+#define GR_CONNECTOVERRIDE 0x10
69137+#define GR_SOCK_FAMILY 0x20
69138+
69139+static const char * gr_protocols[IPPROTO_MAX] = {
69140+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
69141+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
69142+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
69143+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
69144+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
69145+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
69146+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
69147+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
69148+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
69149+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
69150+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
69151+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
69152+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
69153+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
69154+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
69155+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
69156+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
69157+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
69158+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
69159+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
69160+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
69161+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
69162+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
69163+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
69164+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
69165+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
69166+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
69167+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
69168+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
69169+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
69170+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
69171+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
69172+ };
69173+
69174+static const char * gr_socktypes[SOCK_MAX] = {
69175+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
69176+ "unknown:7", "unknown:8", "unknown:9", "packet"
69177+ };
69178+
69179+static const char * gr_sockfamilies[AF_MAX+1] = {
69180+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
69181+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
69182+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
69183+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
69184+ };
69185+
69186+const char *
69187+gr_proto_to_name(unsigned char proto)
69188+{
69189+ return gr_protocols[proto];
69190+}
69191+
69192+const char *
69193+gr_socktype_to_name(unsigned char type)
69194+{
69195+ return gr_socktypes[type];
69196+}
69197+
69198+const char *
69199+gr_sockfamily_to_name(unsigned char family)
69200+{
69201+ return gr_sockfamilies[family];
69202+}
69203+
69204+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
69205+
69206+int
69207+gr_search_socket(const int domain, const int type, const int protocol)
69208+{
69209+ struct acl_subject_label *curr;
69210+ const struct cred *cred = current_cred();
69211+
69212+ if (unlikely(!gr_acl_is_enabled()))
69213+ goto exit;
69214+
69215+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
69216+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
69217+ goto exit; // let the kernel handle it
69218+
69219+ curr = current->acl;
69220+
69221+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
69222+ /* the family is allowed, if this is PF_INET allow it only if
69223+ the extra sock type/protocol checks pass */
69224+ if (domain == PF_INET)
69225+ goto inet_check;
69226+ goto exit;
69227+ } else {
69228+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
69229+ __u32 fakeip = 0;
69230+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
69231+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
69232+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
69233+ gr_to_filename(current->exec_file->f_path.dentry,
69234+ current->exec_file->f_path.mnt) :
69235+ curr->filename, curr->filename,
69236+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
69237+ &current->signal->saved_ip);
69238+ goto exit;
69239+ }
69240+ goto exit_fail;
69241+ }
69242+
69243+inet_check:
69244+ /* the rest of this checking is for IPv4 only */
69245+ if (!curr->ips)
69246+ goto exit;
69247+
69248+ if ((curr->ip_type & (1U << type)) &&
69249+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
69250+ goto exit;
69251+
69252+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
69253+ /* we don't place acls on raw sockets , and sometimes
69254+ dgram/ip sockets are opened for ioctl and not
69255+ bind/connect, so we'll fake a bind learn log */
69256+ if (type == SOCK_RAW || type == SOCK_PACKET) {
69257+ __u32 fakeip = 0;
69258+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
69259+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
69260+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
69261+ gr_to_filename(current->exec_file->f_path.dentry,
69262+ current->exec_file->f_path.mnt) :
69263+ curr->filename, curr->filename,
69264+ &fakeip, 0, type,
69265+ protocol, GR_CONNECT, &current->signal->saved_ip);
69266+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
69267+ __u32 fakeip = 0;
69268+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
69269+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
69270+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
69271+ gr_to_filename(current->exec_file->f_path.dentry,
69272+ current->exec_file->f_path.mnt) :
69273+ curr->filename, curr->filename,
69274+ &fakeip, 0, type,
69275+ protocol, GR_BIND, &current->signal->saved_ip);
69276+ }
69277+ /* we'll log when they use connect or bind */
69278+ goto exit;
69279+ }
69280+
69281+exit_fail:
69282+ if (domain == PF_INET)
69283+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
69284+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
69285+ else if (rcu_access_pointer(net_families[domain]) != NULL)
69286+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
69287+ gr_socktype_to_name(type), protocol);
69288+
69289+ return 0;
69290+exit:
69291+ return 1;
69292+}
69293+
69294+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
69295+{
69296+ if ((ip->mode & mode) &&
69297+ (ip_port >= ip->low) &&
69298+ (ip_port <= ip->high) &&
69299+ ((ntohl(ip_addr) & our_netmask) ==
69300+ (ntohl(our_addr) & our_netmask))
69301+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
69302+ && (ip->type & (1U << type))) {
69303+ if (ip->mode & GR_INVERT)
69304+ return 2; // specifically denied
69305+ else
69306+ return 1; // allowed
69307+ }
69308+
69309+ return 0; // not specifically allowed, may continue parsing
69310+}
69311+
69312+static int
69313+gr_search_connectbind(const int full_mode, struct sock *sk,
69314+ struct sockaddr_in *addr, const int type)
69315+{
69316+ char iface[IFNAMSIZ] = {0};
69317+ struct acl_subject_label *curr;
69318+ struct acl_ip_label *ip;
69319+ struct inet_sock *isk;
69320+ struct net_device *dev;
69321+ struct in_device *idev;
69322+ unsigned long i;
69323+ int ret;
69324+ int mode = full_mode & (GR_BIND | GR_CONNECT);
69325+ __u32 ip_addr = 0;
69326+ __u32 our_addr;
69327+ __u32 our_netmask;
69328+ char *p;
69329+ __u16 ip_port = 0;
69330+ const struct cred *cred = current_cred();
69331+
69332+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
69333+ return 0;
69334+
69335+ curr = current->acl;
69336+ isk = inet_sk(sk);
69337+
69338+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
69339+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
69340+ addr->sin_addr.s_addr = curr->inaddr_any_override;
69341+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
69342+ struct sockaddr_in saddr;
69343+ int err;
69344+
69345+ saddr.sin_family = AF_INET;
69346+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
69347+ saddr.sin_port = isk->inet_sport;
69348+
69349+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
69350+ if (err)
69351+ return err;
69352+
69353+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
69354+ if (err)
69355+ return err;
69356+ }
69357+
69358+ if (!curr->ips)
69359+ return 0;
69360+
69361+ ip_addr = addr->sin_addr.s_addr;
69362+ ip_port = ntohs(addr->sin_port);
69363+
69364+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
69365+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
69366+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
69367+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
69368+ gr_to_filename(current->exec_file->f_path.dentry,
69369+ current->exec_file->f_path.mnt) :
69370+ curr->filename, curr->filename,
69371+ &ip_addr, ip_port, type,
69372+ sk->sk_protocol, mode, &current->signal->saved_ip);
69373+ return 0;
69374+ }
69375+
69376+ for (i = 0; i < curr->ip_num; i++) {
69377+ ip = *(curr->ips + i);
69378+ if (ip->iface != NULL) {
69379+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
69380+ p = strchr(iface, ':');
69381+ if (p != NULL)
69382+ *p = '\0';
69383+ dev = dev_get_by_name(sock_net(sk), iface);
69384+ if (dev == NULL)
69385+ continue;
69386+ idev = in_dev_get(dev);
69387+ if (idev == NULL) {
69388+ dev_put(dev);
69389+ continue;
69390+ }
69391+ rcu_read_lock();
69392+ for_ifa(idev) {
69393+ if (!strcmp(ip->iface, ifa->ifa_label)) {
69394+ our_addr = ifa->ifa_address;
69395+ our_netmask = 0xffffffff;
69396+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
69397+ if (ret == 1) {
69398+ rcu_read_unlock();
69399+ in_dev_put(idev);
69400+ dev_put(dev);
69401+ return 0;
69402+ } else if (ret == 2) {
69403+ rcu_read_unlock();
69404+ in_dev_put(idev);
69405+ dev_put(dev);
69406+ goto denied;
69407+ }
69408+ }
69409+ } endfor_ifa(idev);
69410+ rcu_read_unlock();
69411+ in_dev_put(idev);
69412+ dev_put(dev);
69413+ } else {
69414+ our_addr = ip->addr;
69415+ our_netmask = ip->netmask;
69416+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
69417+ if (ret == 1)
69418+ return 0;
69419+ else if (ret == 2)
69420+ goto denied;
69421+ }
69422+ }
69423+
69424+denied:
69425+ if (mode == GR_BIND)
69426+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
69427+ else if (mode == GR_CONNECT)
69428+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
69429+
69430+ return -EACCES;
69431+}
69432+
69433+int
69434+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
69435+{
69436+ /* always allow disconnection of dgram sockets with connect */
69437+ if (addr->sin_family == AF_UNSPEC)
69438+ return 0;
69439+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
69440+}
69441+
69442+int
69443+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
69444+{
69445+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
69446+}
69447+
69448+int gr_search_listen(struct socket *sock)
69449+{
69450+ struct sock *sk = sock->sk;
69451+ struct sockaddr_in addr;
69452+
69453+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
69454+ addr.sin_port = inet_sk(sk)->inet_sport;
69455+
69456+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
69457+}
69458+
69459+int gr_search_accept(struct socket *sock)
69460+{
69461+ struct sock *sk = sock->sk;
69462+ struct sockaddr_in addr;
69463+
69464+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
69465+ addr.sin_port = inet_sk(sk)->inet_sport;
69466+
69467+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
69468+}
69469+
69470+int
69471+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
69472+{
69473+ if (addr)
69474+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
69475+ else {
69476+ struct sockaddr_in sin;
69477+ const struct inet_sock *inet = inet_sk(sk);
69478+
69479+ sin.sin_addr.s_addr = inet->inet_daddr;
69480+ sin.sin_port = inet->inet_dport;
69481+
69482+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
69483+ }
69484+}
69485+
69486+int
69487+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
69488+{
69489+ struct sockaddr_in sin;
69490+
69491+ if (unlikely(skb->len < sizeof (struct udphdr)))
69492+ return 0; // skip this packet
69493+
69494+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
69495+ sin.sin_port = udp_hdr(skb)->source;
69496+
69497+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
69498+}
69499diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
69500new file mode 100644
69501index 0000000..25f54ef
69502--- /dev/null
69503+++ b/grsecurity/gracl_learn.c
69504@@ -0,0 +1,207 @@
69505+#include <linux/kernel.h>
69506+#include <linux/mm.h>
69507+#include <linux/sched.h>
69508+#include <linux/poll.h>
69509+#include <linux/string.h>
69510+#include <linux/file.h>
69511+#include <linux/types.h>
69512+#include <linux/vmalloc.h>
69513+#include <linux/grinternal.h>
69514+
69515+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
69516+ size_t count, loff_t *ppos);
69517+extern int gr_acl_is_enabled(void);
69518+
69519+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
69520+static int gr_learn_attached;
69521+
69522+/* use a 512k buffer */
69523+#define LEARN_BUFFER_SIZE (512 * 1024)
69524+
69525+static DEFINE_SPINLOCK(gr_learn_lock);
69526+static DEFINE_MUTEX(gr_learn_user_mutex);
69527+
69528+/* we need to maintain two buffers, so that the kernel context of grlearn
69529+ uses a semaphore around the userspace copying, and the other kernel contexts
69530+ use a spinlock when copying into the buffer, since they cannot sleep
69531+*/
69532+static char *learn_buffer;
69533+static char *learn_buffer_user;
69534+static int learn_buffer_len;
69535+static int learn_buffer_user_len;
69536+
69537+static ssize_t
69538+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
69539+{
69540+ DECLARE_WAITQUEUE(wait, current);
69541+ ssize_t retval = 0;
69542+
69543+ add_wait_queue(&learn_wait, &wait);
69544+ set_current_state(TASK_INTERRUPTIBLE);
69545+ do {
69546+ mutex_lock(&gr_learn_user_mutex);
69547+ spin_lock(&gr_learn_lock);
69548+ if (learn_buffer_len)
69549+ break;
69550+ spin_unlock(&gr_learn_lock);
69551+ mutex_unlock(&gr_learn_user_mutex);
69552+ if (file->f_flags & O_NONBLOCK) {
69553+ retval = -EAGAIN;
69554+ goto out;
69555+ }
69556+ if (signal_pending(current)) {
69557+ retval = -ERESTARTSYS;
69558+ goto out;
69559+ }
69560+
69561+ schedule();
69562+ } while (1);
69563+
69564+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
69565+ learn_buffer_user_len = learn_buffer_len;
69566+ retval = learn_buffer_len;
69567+ learn_buffer_len = 0;
69568+
69569+ spin_unlock(&gr_learn_lock);
69570+
69571+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
69572+ retval = -EFAULT;
69573+
69574+ mutex_unlock(&gr_learn_user_mutex);
69575+out:
69576+ set_current_state(TASK_RUNNING);
69577+ remove_wait_queue(&learn_wait, &wait);
69578+ return retval;
69579+}
69580+
69581+static unsigned int
69582+poll_learn(struct file * file, poll_table * wait)
69583+{
69584+ poll_wait(file, &learn_wait, wait);
69585+
69586+ if (learn_buffer_len)
69587+ return (POLLIN | POLLRDNORM);
69588+
69589+ return 0;
69590+}
69591+
69592+void
69593+gr_clear_learn_entries(void)
69594+{
69595+ char *tmp;
69596+
69597+ mutex_lock(&gr_learn_user_mutex);
69598+ spin_lock(&gr_learn_lock);
69599+ tmp = learn_buffer;
69600+ learn_buffer = NULL;
69601+ spin_unlock(&gr_learn_lock);
69602+ if (tmp)
69603+ vfree(tmp);
69604+ if (learn_buffer_user != NULL) {
69605+ vfree(learn_buffer_user);
69606+ learn_buffer_user = NULL;
69607+ }
69608+ learn_buffer_len = 0;
69609+ mutex_unlock(&gr_learn_user_mutex);
69610+
69611+ return;
69612+}
69613+
69614+void
69615+gr_add_learn_entry(const char *fmt, ...)
69616+{
69617+ va_list args;
69618+ unsigned int len;
69619+
69620+ if (!gr_learn_attached)
69621+ return;
69622+
69623+ spin_lock(&gr_learn_lock);
69624+
69625+ /* leave a gap at the end so we know when it's "full" but don't have to
69626+ compute the exact length of the string we're trying to append
69627+ */
69628+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
69629+ spin_unlock(&gr_learn_lock);
69630+ wake_up_interruptible(&learn_wait);
69631+ return;
69632+ }
69633+ if (learn_buffer == NULL) {
69634+ spin_unlock(&gr_learn_lock);
69635+ return;
69636+ }
69637+
69638+ va_start(args, fmt);
69639+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
69640+ va_end(args);
69641+
69642+ learn_buffer_len += len + 1;
69643+
69644+ spin_unlock(&gr_learn_lock);
69645+ wake_up_interruptible(&learn_wait);
69646+
69647+ return;
69648+}
69649+
69650+static int
69651+open_learn(struct inode *inode, struct file *file)
69652+{
69653+ if (file->f_mode & FMODE_READ && gr_learn_attached)
69654+ return -EBUSY;
69655+ if (file->f_mode & FMODE_READ) {
69656+ int retval = 0;
69657+ mutex_lock(&gr_learn_user_mutex);
69658+ if (learn_buffer == NULL)
69659+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
69660+ if (learn_buffer_user == NULL)
69661+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
69662+ if (learn_buffer == NULL) {
69663+ retval = -ENOMEM;
69664+ goto out_error;
69665+ }
69666+ if (learn_buffer_user == NULL) {
69667+ retval = -ENOMEM;
69668+ goto out_error;
69669+ }
69670+ learn_buffer_len = 0;
69671+ learn_buffer_user_len = 0;
69672+ gr_learn_attached = 1;
69673+out_error:
69674+ mutex_unlock(&gr_learn_user_mutex);
69675+ return retval;
69676+ }
69677+ return 0;
69678+}
69679+
69680+static int
69681+close_learn(struct inode *inode, struct file *file)
69682+{
69683+ if (file->f_mode & FMODE_READ) {
69684+ char *tmp = NULL;
69685+ mutex_lock(&gr_learn_user_mutex);
69686+ spin_lock(&gr_learn_lock);
69687+ tmp = learn_buffer;
69688+ learn_buffer = NULL;
69689+ spin_unlock(&gr_learn_lock);
69690+ if (tmp)
69691+ vfree(tmp);
69692+ if (learn_buffer_user != NULL) {
69693+ vfree(learn_buffer_user);
69694+ learn_buffer_user = NULL;
69695+ }
69696+ learn_buffer_len = 0;
69697+ learn_buffer_user_len = 0;
69698+ gr_learn_attached = 0;
69699+ mutex_unlock(&gr_learn_user_mutex);
69700+ }
69701+
69702+ return 0;
69703+}
69704+
69705+const struct file_operations grsec_fops = {
69706+ .read = read_learn,
69707+ .write = write_grsec_handler,
69708+ .open = open_learn,
69709+ .release = close_learn,
69710+ .poll = poll_learn,
69711+};
69712diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
69713new file mode 100644
69714index 0000000..361a099
69715--- /dev/null
69716+++ b/grsecurity/gracl_policy.c
69717@@ -0,0 +1,1782 @@
69718+#include <linux/kernel.h>
69719+#include <linux/module.h>
69720+#include <linux/sched.h>
69721+#include <linux/mm.h>
69722+#include <linux/file.h>
69723+#include <linux/fs.h>
69724+#include <linux/namei.h>
69725+#include <linux/mount.h>
69726+#include <linux/tty.h>
69727+#include <linux/proc_fs.h>
69728+#include <linux/lglock.h>
69729+#include <linux/slab.h>
69730+#include <linux/vmalloc.h>
69731+#include <linux/types.h>
69732+#include <linux/sysctl.h>
69733+#include <linux/netdevice.h>
69734+#include <linux/ptrace.h>
69735+#include <linux/gracl.h>
69736+#include <linux/gralloc.h>
69737+#include <linux/security.h>
69738+#include <linux/grinternal.h>
69739+#include <linux/pid_namespace.h>
69740+#include <linux/stop_machine.h>
69741+#include <linux/fdtable.h>
69742+#include <linux/percpu.h>
69743+#include <linux/lglock.h>
69744+#include <linux/hugetlb.h>
69745+#include <linux/posix-timers.h>
69746+#include "../fs/mount.h"
69747+
69748+#include <asm/uaccess.h>
69749+#include <asm/errno.h>
69750+#include <asm/mman.h>
69751+
69752+extern struct gr_policy_state *polstate;
69753+
69754+#define FOR_EACH_ROLE_START(role) \
69755+ role = polstate->role_list; \
69756+ while (role) {
69757+
69758+#define FOR_EACH_ROLE_END(role) \
69759+ role = role->prev; \
69760+ }
69761+
69762+struct path gr_real_root;
69763+
69764+extern struct gr_alloc_state *current_alloc_state;
69765+
69766+u16 acl_sp_role_value;
69767+
69768+static DEFINE_MUTEX(gr_dev_mutex);
69769+
69770+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
69771+extern void gr_clear_learn_entries(void);
69772+
69773+static struct gr_arg gr_usermode;
69774+static unsigned char gr_system_salt[GR_SALT_LEN];
69775+static unsigned char gr_system_sum[GR_SHA_LEN];
69776+
69777+static unsigned int gr_auth_attempts = 0;
69778+static unsigned long gr_auth_expires = 0UL;
69779+
69780+struct acl_object_label *fakefs_obj_rw;
69781+struct acl_object_label *fakefs_obj_rwx;
69782+
69783+extern int gr_init_uidset(void);
69784+extern void gr_free_uidset(void);
69785+extern void gr_remove_uid(uid_t uid);
69786+extern int gr_find_uid(uid_t uid);
69787+
69788+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
69789+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
69790+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
69791+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
69792+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
69793+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
69794+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
69795+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
69796+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
69797+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
69798+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
69799+extern void assign_special_role(const char *rolename);
69800+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
69801+extern int gr_rbac_disable(void *unused);
69802+extern void gr_enable_rbac_system(void);
69803+
69804+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
69805+{
69806+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
69807+ return -EFAULT;
69808+
69809+ return 0;
69810+}
69811+
69812+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
69813+{
69814+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
69815+ return -EFAULT;
69816+
69817+ return 0;
69818+}
69819+
69820+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
69821+{
69822+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
69823+ return -EFAULT;
69824+
69825+ return 0;
69826+}
69827+
69828+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
69829+{
69830+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
69831+ return -EFAULT;
69832+
69833+ return 0;
69834+}
69835+
69836+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
69837+{
69838+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
69839+ return -EFAULT;
69840+
69841+ return 0;
69842+}
69843+
69844+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
69845+{
69846+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
69847+ return -EFAULT;
69848+
69849+ return 0;
69850+}
69851+
69852+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
69853+{
69854+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
69855+ return -EFAULT;
69856+
69857+ return 0;
69858+}
69859+
69860+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
69861+{
69862+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
69863+ return -EFAULT;
69864+
69865+ return 0;
69866+}
69867+
69868+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
69869+{
69870+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
69871+ return -EFAULT;
69872+
69873+ return 0;
69874+}
69875+
69876+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
69877+{
69878+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
69879+ return -EFAULT;
69880+
69881+ if (((uwrap->version != GRSECURITY_VERSION) &&
69882+ (uwrap->version != 0x2901)) ||
69883+ (uwrap->size != sizeof(struct gr_arg)))
69884+ return -EINVAL;
69885+
69886+ return 0;
69887+}
69888+
69889+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
69890+{
69891+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
69892+ return -EFAULT;
69893+
69894+ return 0;
69895+}
69896+
69897+static size_t get_gr_arg_wrapper_size_normal(void)
69898+{
69899+ return sizeof(struct gr_arg_wrapper);
69900+}
69901+
69902+#ifdef CONFIG_COMPAT
69903+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
69904+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
69905+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
69906+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
69907+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
69908+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
69909+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
69910+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
69911+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
69912+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
69913+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
69914+extern size_t get_gr_arg_wrapper_size_compat(void);
69915+
69916+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
69917+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
69918+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
69919+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
69920+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
69921+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
69922+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
69923+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
69924+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
69925+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
69926+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
69927+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
69928+
69929+#else
69930+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
69931+#define copy_gr_arg copy_gr_arg_normal
69932+#define copy_gr_hash_struct copy_gr_hash_struct_normal
69933+#define copy_acl_object_label copy_acl_object_label_normal
69934+#define copy_acl_subject_label copy_acl_subject_label_normal
69935+#define copy_acl_role_label copy_acl_role_label_normal
69936+#define copy_acl_ip_label copy_acl_ip_label_normal
69937+#define copy_pointer_from_array copy_pointer_from_array_normal
69938+#define copy_sprole_pw copy_sprole_pw_normal
69939+#define copy_role_transition copy_role_transition_normal
69940+#define copy_role_allowed_ip copy_role_allowed_ip_normal
69941+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
69942+#endif
69943+
69944+static struct acl_subject_label *
69945+lookup_subject_map(const struct acl_subject_label *userp)
69946+{
69947+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
69948+ struct subject_map *match;
69949+
69950+ match = polstate->subj_map_set.s_hash[index];
69951+
69952+ while (match && match->user != userp)
69953+ match = match->next;
69954+
69955+ if (match != NULL)
69956+ return match->kernel;
69957+ else
69958+ return NULL;
69959+}
69960+
69961+static void
69962+insert_subj_map_entry(struct subject_map *subjmap)
69963+{
69964+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
69965+ struct subject_map **curr;
69966+
69967+ subjmap->prev = NULL;
69968+
69969+ curr = &polstate->subj_map_set.s_hash[index];
69970+ if (*curr != NULL)
69971+ (*curr)->prev = subjmap;
69972+
69973+ subjmap->next = *curr;
69974+ *curr = subjmap;
69975+
69976+ return;
69977+}
69978+
69979+static void
69980+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
69981+{
69982+ unsigned int index =
69983+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
69984+ struct acl_role_label **curr;
69985+ struct acl_role_label *tmp, *tmp2;
69986+
69987+ curr = &polstate->acl_role_set.r_hash[index];
69988+
69989+ /* simple case, slot is empty, just set it to our role */
69990+ if (*curr == NULL) {
69991+ *curr = role;
69992+ } else {
69993+ /* example:
69994+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
69995+ 2 -> 3
69996+ */
69997+ /* first check to see if we can already be reached via this slot */
69998+ tmp = *curr;
69999+ while (tmp && tmp != role)
70000+ tmp = tmp->next;
70001+ if (tmp == role) {
70002+ /* we don't need to add ourselves to this slot's chain */
70003+ return;
70004+ }
70005+ /* we need to add ourselves to this chain, two cases */
70006+ if (role->next == NULL) {
70007+ /* simple case, append the current chain to our role */
70008+ role->next = *curr;
70009+ *curr = role;
70010+ } else {
70011+ /* 1 -> 2 -> 3 -> 4
70012+ 2 -> 3 -> 4
70013+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
70014+ */
70015+ /* trickier case: walk our role's chain until we find
70016+ the role for the start of the current slot's chain */
70017+ tmp = role;
70018+ tmp2 = *curr;
70019+ while (tmp->next && tmp->next != tmp2)
70020+ tmp = tmp->next;
70021+ if (tmp->next == tmp2) {
70022+ /* from example above, we found 3, so just
70023+ replace this slot's chain with ours */
70024+ *curr = role;
70025+ } else {
70026+ /* we didn't find a subset of our role's chain
70027+ in the current slot's chain, so append their
70028+ chain to ours, and set us as the first role in
70029+ the slot's chain
70030+
70031+ we could fold this case with the case above,
70032+ but making it explicit for clarity
70033+ */
70034+ tmp->next = tmp2;
70035+ *curr = role;
70036+ }
70037+ }
70038+ }
70039+
70040+ return;
70041+}
70042+
70043+static void
70044+insert_acl_role_label(struct acl_role_label *role)
70045+{
70046+ int i;
70047+
70048+ if (polstate->role_list == NULL) {
70049+ polstate->role_list = role;
70050+ role->prev = NULL;
70051+ } else {
70052+ role->prev = polstate->role_list;
70053+ polstate->role_list = role;
70054+ }
70055+
70056+ /* used for hash chains */
70057+ role->next = NULL;
70058+
70059+ if (role->roletype & GR_ROLE_DOMAIN) {
70060+ for (i = 0; i < role->domain_child_num; i++)
70061+ __insert_acl_role_label(role, role->domain_children[i]);
70062+ } else
70063+ __insert_acl_role_label(role, role->uidgid);
70064+}
70065+
70066+static int
70067+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
70068+{
70069+ struct name_entry **curr, *nentry;
70070+ struct inodev_entry *ientry;
70071+ unsigned int len = strlen(name);
70072+ unsigned int key = full_name_hash(name, len);
70073+ unsigned int index = key % polstate->name_set.n_size;
70074+
70075+ curr = &polstate->name_set.n_hash[index];
70076+
70077+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
70078+ curr = &((*curr)->next);
70079+
70080+ if (*curr != NULL)
70081+ return 1;
70082+
70083+ nentry = acl_alloc(sizeof (struct name_entry));
70084+ if (nentry == NULL)
70085+ return 0;
70086+ ientry = acl_alloc(sizeof (struct inodev_entry));
70087+ if (ientry == NULL)
70088+ return 0;
70089+ ientry->nentry = nentry;
70090+
70091+ nentry->key = key;
70092+ nentry->name = name;
70093+ nentry->inode = inode;
70094+ nentry->device = device;
70095+ nentry->len = len;
70096+ nentry->deleted = deleted;
70097+
70098+ nentry->prev = NULL;
70099+ curr = &polstate->name_set.n_hash[index];
70100+ if (*curr != NULL)
70101+ (*curr)->prev = nentry;
70102+ nentry->next = *curr;
70103+ *curr = nentry;
70104+
70105+ /* insert us into the table searchable by inode/dev */
70106+ __insert_inodev_entry(polstate, ientry);
70107+
70108+ return 1;
70109+}
70110+
70111+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
70112+
70113+static void *
70114+create_table(__u32 * len, int elementsize)
70115+{
70116+ unsigned int table_sizes[] = {
70117+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
70118+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
70119+ 4194301, 8388593, 16777213, 33554393, 67108859
70120+ };
70121+ void *newtable = NULL;
70122+ unsigned int pwr = 0;
70123+
70124+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
70125+ table_sizes[pwr] <= *len)
70126+ pwr++;
70127+
70128+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
70129+ return newtable;
70130+
70131+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
70132+ newtable =
70133+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
70134+ else
70135+ newtable = vmalloc(table_sizes[pwr] * elementsize);
70136+
70137+ *len = table_sizes[pwr];
70138+
70139+ return newtable;
70140+}
70141+
70142+static int
70143+init_variables(const struct gr_arg *arg, bool reload)
70144+{
70145+ struct task_struct *reaper = init_pid_ns.child_reaper;
70146+ unsigned int stacksize;
70147+
70148+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
70149+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
70150+ polstate->name_set.n_size = arg->role_db.num_objects;
70151+ polstate->inodev_set.i_size = arg->role_db.num_objects;
70152+
70153+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
70154+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
70155+ return 1;
70156+
70157+ if (!reload) {
70158+ if (!gr_init_uidset())
70159+ return 1;
70160+ }
70161+
70162+ /* set up the stack that holds allocation info */
70163+
70164+ stacksize = arg->role_db.num_pointers + 5;
70165+
70166+ if (!acl_alloc_stack_init(stacksize))
70167+ return 1;
70168+
70169+ if (!reload) {
70170+ /* grab reference for the real root dentry and vfsmount */
70171+ get_fs_root(reaper->fs, &gr_real_root);
70172+
70173+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70174+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
70175+#endif
70176+
70177+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
70178+ if (fakefs_obj_rw == NULL)
70179+ return 1;
70180+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
70181+
70182+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
70183+ if (fakefs_obj_rwx == NULL)
70184+ return 1;
70185+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
70186+ }
70187+
70188+ polstate->subj_map_set.s_hash =
70189+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
70190+ polstate->acl_role_set.r_hash =
70191+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
70192+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
70193+ polstate->inodev_set.i_hash =
70194+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
70195+
70196+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
70197+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
70198+ return 1;
70199+
70200+ memset(polstate->subj_map_set.s_hash, 0,
70201+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
70202+ memset(polstate->acl_role_set.r_hash, 0,
70203+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
70204+ memset(polstate->name_set.n_hash, 0,
70205+ sizeof (struct name_entry *) * polstate->name_set.n_size);
70206+ memset(polstate->inodev_set.i_hash, 0,
70207+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
70208+
70209+ return 0;
70210+}
70211+
70212+/* free information not needed after startup
70213+ currently contains user->kernel pointer mappings for subjects
70214+*/
70215+
70216+static void
70217+free_init_variables(void)
70218+{
70219+ __u32 i;
70220+
70221+ if (polstate->subj_map_set.s_hash) {
70222+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
70223+ if (polstate->subj_map_set.s_hash[i]) {
70224+ kfree(polstate->subj_map_set.s_hash[i]);
70225+ polstate->subj_map_set.s_hash[i] = NULL;
70226+ }
70227+ }
70228+
70229+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
70230+ PAGE_SIZE)
70231+ kfree(polstate->subj_map_set.s_hash);
70232+ else
70233+ vfree(polstate->subj_map_set.s_hash);
70234+ }
70235+
70236+ return;
70237+}
70238+
70239+static void
70240+free_variables(bool reload)
70241+{
70242+ struct acl_subject_label *s;
70243+ struct acl_role_label *r;
70244+ struct task_struct *task, *task2;
70245+ unsigned int x;
70246+
70247+ if (!reload) {
70248+ gr_clear_learn_entries();
70249+
70250+ read_lock(&tasklist_lock);
70251+ do_each_thread(task2, task) {
70252+ task->acl_sp_role = 0;
70253+ task->acl_role_id = 0;
70254+ task->inherited = 0;
70255+ task->acl = NULL;
70256+ task->role = NULL;
70257+ } while_each_thread(task2, task);
70258+ read_unlock(&tasklist_lock);
70259+
70260+ kfree(fakefs_obj_rw);
70261+ fakefs_obj_rw = NULL;
70262+ kfree(fakefs_obj_rwx);
70263+ fakefs_obj_rwx = NULL;
70264+
70265+ /* release the reference to the real root dentry and vfsmount */
70266+ path_put(&gr_real_root);
70267+ memset(&gr_real_root, 0, sizeof(gr_real_root));
70268+ }
70269+
70270+ /* free all object hash tables */
70271+
70272+ FOR_EACH_ROLE_START(r)
70273+ if (r->subj_hash == NULL)
70274+ goto next_role;
70275+ FOR_EACH_SUBJECT_START(r, s, x)
70276+ if (s->obj_hash == NULL)
70277+ break;
70278+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
70279+ kfree(s->obj_hash);
70280+ else
70281+ vfree(s->obj_hash);
70282+ FOR_EACH_SUBJECT_END(s, x)
70283+ FOR_EACH_NESTED_SUBJECT_START(r, s)
70284+ if (s->obj_hash == NULL)
70285+ break;
70286+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
70287+ kfree(s->obj_hash);
70288+ else
70289+ vfree(s->obj_hash);
70290+ FOR_EACH_NESTED_SUBJECT_END(s)
70291+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
70292+ kfree(r->subj_hash);
70293+ else
70294+ vfree(r->subj_hash);
70295+ r->subj_hash = NULL;
70296+next_role:
70297+ FOR_EACH_ROLE_END(r)
70298+
70299+ acl_free_all();
70300+
70301+ if (polstate->acl_role_set.r_hash) {
70302+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
70303+ PAGE_SIZE)
70304+ kfree(polstate->acl_role_set.r_hash);
70305+ else
70306+ vfree(polstate->acl_role_set.r_hash);
70307+ }
70308+ if (polstate->name_set.n_hash) {
70309+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
70310+ PAGE_SIZE)
70311+ kfree(polstate->name_set.n_hash);
70312+ else
70313+ vfree(polstate->name_set.n_hash);
70314+ }
70315+
70316+ if (polstate->inodev_set.i_hash) {
70317+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
70318+ PAGE_SIZE)
70319+ kfree(polstate->inodev_set.i_hash);
70320+ else
70321+ vfree(polstate->inodev_set.i_hash);
70322+ }
70323+
70324+ if (!reload)
70325+ gr_free_uidset();
70326+
70327+ memset(&polstate->name_set, 0, sizeof (struct name_db));
70328+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
70329+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
70330+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
70331+
70332+ polstate->default_role = NULL;
70333+ polstate->kernel_role = NULL;
70334+ polstate->role_list = NULL;
70335+
70336+ return;
70337+}
70338+
70339+static struct acl_subject_label *
70340+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
70341+
70342+static int alloc_and_copy_string(char **name, unsigned int maxlen)
70343+{
70344+ unsigned int len = strnlen_user(*name, maxlen);
70345+ char *tmp;
70346+
70347+ if (!len || len >= maxlen)
70348+ return -EINVAL;
70349+
70350+ if ((tmp = (char *) acl_alloc(len)) == NULL)
70351+ return -ENOMEM;
70352+
70353+ if (copy_from_user(tmp, *name, len))
70354+ return -EFAULT;
70355+
70356+ tmp[len-1] = '\0';
70357+ *name = tmp;
70358+
70359+ return 0;
70360+}
70361+
70362+static int
70363+copy_user_glob(struct acl_object_label *obj)
70364+{
70365+ struct acl_object_label *g_tmp, **guser;
70366+ int error;
70367+
70368+ if (obj->globbed == NULL)
70369+ return 0;
70370+
70371+ guser = &obj->globbed;
70372+ while (*guser) {
70373+ g_tmp = (struct acl_object_label *)
70374+ acl_alloc(sizeof (struct acl_object_label));
70375+ if (g_tmp == NULL)
70376+ return -ENOMEM;
70377+
70378+ if (copy_acl_object_label(g_tmp, *guser))
70379+ return -EFAULT;
70380+
70381+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
70382+ if (error)
70383+ return error;
70384+
70385+ *guser = g_tmp;
70386+ guser = &(g_tmp->next);
70387+ }
70388+
70389+ return 0;
70390+}
70391+
70392+static int
70393+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
70394+ struct acl_role_label *role)
70395+{
70396+ struct acl_object_label *o_tmp;
70397+ int ret;
70398+
70399+ while (userp) {
70400+ if ((o_tmp = (struct acl_object_label *)
70401+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
70402+ return -ENOMEM;
70403+
70404+ if (copy_acl_object_label(o_tmp, userp))
70405+ return -EFAULT;
70406+
70407+ userp = o_tmp->prev;
70408+
70409+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
70410+ if (ret)
70411+ return ret;
70412+
70413+ insert_acl_obj_label(o_tmp, subj);
70414+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
70415+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
70416+ return -ENOMEM;
70417+
70418+ ret = copy_user_glob(o_tmp);
70419+ if (ret)
70420+ return ret;
70421+
70422+ if (o_tmp->nested) {
70423+ int already_copied;
70424+
70425+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
70426+ if (IS_ERR(o_tmp->nested))
70427+ return PTR_ERR(o_tmp->nested);
70428+
70429+ /* insert into nested subject list if we haven't copied this one yet
70430+ to prevent duplicate entries */
70431+ if (!already_copied) {
70432+ o_tmp->nested->next = role->hash->first;
70433+ role->hash->first = o_tmp->nested;
70434+ }
70435+ }
70436+ }
70437+
70438+ return 0;
70439+}
70440+
70441+static __u32
70442+count_user_subjs(struct acl_subject_label *userp)
70443+{
70444+ struct acl_subject_label s_tmp;
70445+ __u32 num = 0;
70446+
70447+ while (userp) {
70448+ if (copy_acl_subject_label(&s_tmp, userp))
70449+ break;
70450+
70451+ userp = s_tmp.prev;
70452+ }
70453+
70454+ return num;
70455+}
70456+
70457+static int
70458+copy_user_allowedips(struct acl_role_label *rolep)
70459+{
70460+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
70461+
70462+ ruserip = rolep->allowed_ips;
70463+
70464+ while (ruserip) {
70465+ rlast = rtmp;
70466+
70467+ if ((rtmp = (struct role_allowed_ip *)
70468+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
70469+ return -ENOMEM;
70470+
70471+ if (copy_role_allowed_ip(rtmp, ruserip))
70472+ return -EFAULT;
70473+
70474+ ruserip = rtmp->prev;
70475+
70476+ if (!rlast) {
70477+ rtmp->prev = NULL;
70478+ rolep->allowed_ips = rtmp;
70479+ } else {
70480+ rlast->next = rtmp;
70481+ rtmp->prev = rlast;
70482+ }
70483+
70484+ if (!ruserip)
70485+ rtmp->next = NULL;
70486+ }
70487+
70488+ return 0;
70489+}
70490+
70491+static int
70492+copy_user_transitions(struct acl_role_label *rolep)
70493+{
70494+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
70495+ int error;
70496+
70497+ rusertp = rolep->transitions;
70498+
70499+ while (rusertp) {
70500+ rlast = rtmp;
70501+
70502+ if ((rtmp = (struct role_transition *)
70503+ acl_alloc(sizeof (struct role_transition))) == NULL)
70504+ return -ENOMEM;
70505+
70506+ if (copy_role_transition(rtmp, rusertp))
70507+ return -EFAULT;
70508+
70509+ rusertp = rtmp->prev;
70510+
70511+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
70512+ if (error)
70513+ return error;
70514+
70515+ if (!rlast) {
70516+ rtmp->prev = NULL;
70517+ rolep->transitions = rtmp;
70518+ } else {
70519+ rlast->next = rtmp;
70520+ rtmp->prev = rlast;
70521+ }
70522+
70523+ if (!rusertp)
70524+ rtmp->next = NULL;
70525+ }
70526+
70527+ return 0;
70528+}
70529+
70530+static __u32 count_user_objs(const struct acl_object_label __user *userp)
70531+{
70532+ struct acl_object_label o_tmp;
70533+ __u32 num = 0;
70534+
70535+ while (userp) {
70536+ if (copy_acl_object_label(&o_tmp, userp))
70537+ break;
70538+
70539+ userp = o_tmp.prev;
70540+ num++;
70541+ }
70542+
70543+ return num;
70544+}
70545+
70546+static struct acl_subject_label *
70547+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
70548+{
70549+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
70550+ __u32 num_objs;
70551+ struct acl_ip_label **i_tmp, *i_utmp2;
70552+ struct gr_hash_struct ghash;
70553+ struct subject_map *subjmap;
70554+ unsigned int i_num;
70555+ int err;
70556+
70557+ if (already_copied != NULL)
70558+ *already_copied = 0;
70559+
70560+ s_tmp = lookup_subject_map(userp);
70561+
70562+ /* we've already copied this subject into the kernel, just return
70563+ the reference to it, and don't copy it over again
70564+ */
70565+ if (s_tmp) {
70566+ if (already_copied != NULL)
70567+ *already_copied = 1;
70568+ return(s_tmp);
70569+ }
70570+
70571+ if ((s_tmp = (struct acl_subject_label *)
70572+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
70573+ return ERR_PTR(-ENOMEM);
70574+
70575+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
70576+ if (subjmap == NULL)
70577+ return ERR_PTR(-ENOMEM);
70578+
70579+ subjmap->user = userp;
70580+ subjmap->kernel = s_tmp;
70581+ insert_subj_map_entry(subjmap);
70582+
70583+ if (copy_acl_subject_label(s_tmp, userp))
70584+ return ERR_PTR(-EFAULT);
70585+
70586+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
70587+ if (err)
70588+ return ERR_PTR(err);
70589+
70590+ if (!strcmp(s_tmp->filename, "/"))
70591+ role->root_label = s_tmp;
70592+
70593+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
70594+ return ERR_PTR(-EFAULT);
70595+
70596+ /* copy user and group transition tables */
70597+
70598+ if (s_tmp->user_trans_num) {
70599+ uid_t *uidlist;
70600+
70601+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
70602+ if (uidlist == NULL)
70603+ return ERR_PTR(-ENOMEM);
70604+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
70605+ return ERR_PTR(-EFAULT);
70606+
70607+ s_tmp->user_transitions = uidlist;
70608+ }
70609+
70610+ if (s_tmp->group_trans_num) {
70611+ gid_t *gidlist;
70612+
70613+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
70614+ if (gidlist == NULL)
70615+ return ERR_PTR(-ENOMEM);
70616+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
70617+ return ERR_PTR(-EFAULT);
70618+
70619+ s_tmp->group_transitions = gidlist;
70620+ }
70621+
70622+ /* set up object hash table */
70623+ num_objs = count_user_objs(ghash.first);
70624+
70625+ s_tmp->obj_hash_size = num_objs;
70626+ s_tmp->obj_hash =
70627+ (struct acl_object_label **)
70628+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
70629+
70630+ if (!s_tmp->obj_hash)
70631+ return ERR_PTR(-ENOMEM);
70632+
70633+ memset(s_tmp->obj_hash, 0,
70634+ s_tmp->obj_hash_size *
70635+ sizeof (struct acl_object_label *));
70636+
70637+ /* add in objects */
70638+ err = copy_user_objs(ghash.first, s_tmp, role);
70639+
70640+ if (err)
70641+ return ERR_PTR(err);
70642+
70643+ /* set pointer for parent subject */
70644+ if (s_tmp->parent_subject) {
70645+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
70646+
70647+ if (IS_ERR(s_tmp2))
70648+ return s_tmp2;
70649+
70650+ s_tmp->parent_subject = s_tmp2;
70651+ }
70652+
70653+ /* add in ip acls */
70654+
70655+ if (!s_tmp->ip_num) {
70656+ s_tmp->ips = NULL;
70657+ goto insert;
70658+ }
70659+
70660+ i_tmp =
70661+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
70662+ sizeof (struct acl_ip_label *));
70663+
70664+ if (!i_tmp)
70665+ return ERR_PTR(-ENOMEM);
70666+
70667+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
70668+ *(i_tmp + i_num) =
70669+ (struct acl_ip_label *)
70670+ acl_alloc(sizeof (struct acl_ip_label));
70671+ if (!*(i_tmp + i_num))
70672+ return ERR_PTR(-ENOMEM);
70673+
70674+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
70675+ return ERR_PTR(-EFAULT);
70676+
70677+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
70678+ return ERR_PTR(-EFAULT);
70679+
70680+ if ((*(i_tmp + i_num))->iface == NULL)
70681+ continue;
70682+
70683+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
70684+ if (err)
70685+ return ERR_PTR(err);
70686+ }
70687+
70688+ s_tmp->ips = i_tmp;
70689+
70690+insert:
70691+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
70692+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
70693+ return ERR_PTR(-ENOMEM);
70694+
70695+ return s_tmp;
70696+}
70697+
70698+static int
70699+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
70700+{
70701+ struct acl_subject_label s_pre;
70702+ struct acl_subject_label * ret;
70703+ int err;
70704+
70705+ while (userp) {
70706+ if (copy_acl_subject_label(&s_pre, userp))
70707+ return -EFAULT;
70708+
70709+ ret = do_copy_user_subj(userp, role, NULL);
70710+
70711+ err = PTR_ERR(ret);
70712+ if (IS_ERR(ret))
70713+ return err;
70714+
70715+ insert_acl_subj_label(ret, role);
70716+
70717+ userp = s_pre.prev;
70718+ }
70719+
70720+ return 0;
70721+}
70722+
70723+static int
70724+copy_user_acl(struct gr_arg *arg)
70725+{
70726+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
70727+ struct acl_subject_label *subj_list;
70728+ struct sprole_pw *sptmp;
70729+ struct gr_hash_struct *ghash;
70730+ uid_t *domainlist;
70731+ unsigned int r_num;
70732+ int err = 0;
70733+ __u16 i;
70734+ __u32 num_subjs;
70735+
70736+ /* we need a default and kernel role */
70737+ if (arg->role_db.num_roles < 2)
70738+ return -EINVAL;
70739+
70740+ /* copy special role authentication info from userspace */
70741+
70742+ polstate->num_sprole_pws = arg->num_sprole_pws;
70743+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
70744+
70745+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
70746+ return -ENOMEM;
70747+
70748+ for (i = 0; i < polstate->num_sprole_pws; i++) {
70749+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
70750+ if (!sptmp)
70751+ return -ENOMEM;
70752+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
70753+ return -EFAULT;
70754+
70755+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
70756+ if (err)
70757+ return err;
70758+
70759+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70760+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
70761+#endif
70762+
70763+ polstate->acl_special_roles[i] = sptmp;
70764+ }
70765+
70766+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
70767+
70768+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
70769+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
70770+
70771+ if (!r_tmp)
70772+ return -ENOMEM;
70773+
70774+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
70775+ return -EFAULT;
70776+
70777+ if (copy_acl_role_label(r_tmp, r_utmp2))
70778+ return -EFAULT;
70779+
70780+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
70781+ if (err)
70782+ return err;
70783+
70784+ if (!strcmp(r_tmp->rolename, "default")
70785+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
70786+ polstate->default_role = r_tmp;
70787+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
70788+ polstate->kernel_role = r_tmp;
70789+ }
70790+
70791+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
70792+ return -ENOMEM;
70793+
70794+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
70795+ return -EFAULT;
70796+
70797+ r_tmp->hash = ghash;
70798+
70799+ num_subjs = count_user_subjs(r_tmp->hash->first);
70800+
70801+ r_tmp->subj_hash_size = num_subjs;
70802+ r_tmp->subj_hash =
70803+ (struct acl_subject_label **)
70804+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
70805+
70806+ if (!r_tmp->subj_hash)
70807+ return -ENOMEM;
70808+
70809+ err = copy_user_allowedips(r_tmp);
70810+ if (err)
70811+ return err;
70812+
70813+ /* copy domain info */
70814+ if (r_tmp->domain_children != NULL) {
70815+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
70816+ if (domainlist == NULL)
70817+ return -ENOMEM;
70818+
70819+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
70820+ return -EFAULT;
70821+
70822+ r_tmp->domain_children = domainlist;
70823+ }
70824+
70825+ err = copy_user_transitions(r_tmp);
70826+ if (err)
70827+ return err;
70828+
70829+ memset(r_tmp->subj_hash, 0,
70830+ r_tmp->subj_hash_size *
70831+ sizeof (struct acl_subject_label *));
70832+
70833+ /* acquire the list of subjects, then NULL out
70834+ the list prior to parsing the subjects for this role,
70835+ as during this parsing the list is replaced with a list
70836+ of *nested* subjects for the role
70837+ */
70838+ subj_list = r_tmp->hash->first;
70839+
70840+ /* set nested subject list to null */
70841+ r_tmp->hash->first = NULL;
70842+
70843+ err = copy_user_subjs(subj_list, r_tmp);
70844+
70845+ if (err)
70846+ return err;
70847+
70848+ insert_acl_role_label(r_tmp);
70849+ }
70850+
70851+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
70852+ return -EINVAL;
70853+
70854+ return err;
70855+}
70856+
70857+static int gracl_reload_apply_policies(void *reload)
70858+{
70859+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
70860+ struct task_struct *task, *task2;
70861+ struct acl_role_label *role, *rtmp;
70862+ struct acl_subject_label *subj;
70863+ const struct cred *cred;
70864+ int role_applied;
70865+ int ret = 0;
70866+
70867+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
70868+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
70869+
70870+ /* first make sure we'll be able to apply the new policy cleanly */
70871+ do_each_thread(task2, task) {
70872+ if (task->exec_file == NULL)
70873+ continue;
70874+ role_applied = 0;
70875+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
70876+ /* preserve special roles */
70877+ FOR_EACH_ROLE_START(role)
70878+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
70879+ rtmp = task->role;
70880+ task->role = role;
70881+ role_applied = 1;
70882+ break;
70883+ }
70884+ FOR_EACH_ROLE_END(role)
70885+ }
70886+ if (!role_applied) {
70887+ cred = __task_cred(task);
70888+ rtmp = task->role;
70889+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
70890+ }
70891+ /* this handles non-nested inherited subjects, nested subjects will still
70892+ be dropped currently */
70893+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
70894+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
70895+ /* change the role back so that we've made no modifications to the policy */
70896+ task->role = rtmp;
70897+
70898+ if (subj == NULL || task->tmpacl == NULL) {
70899+ ret = -EINVAL;
70900+ goto out;
70901+ }
70902+ } while_each_thread(task2, task);
70903+
70904+ /* now actually apply the policy */
70905+
70906+ do_each_thread(task2, task) {
70907+ if (task->exec_file) {
70908+ role_applied = 0;
70909+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
70910+ /* preserve special roles */
70911+ FOR_EACH_ROLE_START(role)
70912+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
70913+ task->role = role;
70914+ role_applied = 1;
70915+ break;
70916+ }
70917+ FOR_EACH_ROLE_END(role)
70918+ }
70919+ if (!role_applied) {
70920+ cred = __task_cred(task);
70921+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
70922+ }
70923+ /* this handles non-nested inherited subjects, nested subjects will still
70924+ be dropped currently */
70925+ if (!reload_state->oldmode && task->inherited)
70926+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
70927+ else {
70928+ /* looked up and tagged to the task previously */
70929+ subj = task->tmpacl;
70930+ }
70931+ /* subj will be non-null */
70932+ __gr_apply_subject_to_task(polstate, task, subj);
70933+ if (reload_state->oldmode) {
70934+ task->acl_role_id = 0;
70935+ task->acl_sp_role = 0;
70936+ task->inherited = 0;
70937+ }
70938+ } else {
70939+ // it's a kernel process
70940+ task->role = polstate->kernel_role;
70941+ task->acl = polstate->kernel_role->root_label;
70942+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
70943+ task->acl->mode &= ~GR_PROCFIND;
70944+#endif
70945+ }
70946+ } while_each_thread(task2, task);
70947+
70948+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
70949+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
70950+
70951+out:
70952+
70953+ return ret;
70954+}
70955+
70956+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
70957+{
70958+ struct gr_reload_state new_reload_state = { };
70959+ int err;
70960+
70961+ new_reload_state.oldpolicy_ptr = polstate;
70962+ new_reload_state.oldalloc_ptr = current_alloc_state;
70963+ new_reload_state.oldmode = oldmode;
70964+
70965+ current_alloc_state = &new_reload_state.newalloc;
70966+ polstate = &new_reload_state.newpolicy;
70967+
70968+ /* everything relevant is now saved off, copy in the new policy */
70969+ if (init_variables(args, true)) {
70970+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
70971+ err = -ENOMEM;
70972+ goto error;
70973+ }
70974+
70975+ err = copy_user_acl(args);
70976+ free_init_variables();
70977+ if (err)
70978+ goto error;
70979+ /* the new policy is copied in, with the old policy available via saved_state
70980+ first go through applying roles, making sure to preserve special roles
70981+ then apply new subjects, making sure to preserve inherited and nested subjects,
70982+ though currently only inherited subjects will be preserved
70983+ */
70984+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
70985+ if (err)
70986+ goto error;
70987+
70988+ /* we've now applied the new policy, so restore the old policy state to free it */
70989+ polstate = &new_reload_state.oldpolicy;
70990+ current_alloc_state = &new_reload_state.oldalloc;
70991+ free_variables(true);
70992+
70993+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
70994+ to running_polstate/current_alloc_state inside stop_machine
70995+ */
70996+ err = 0;
70997+ goto out;
70998+error:
70999+ /* on error of loading the new policy, we'll just keep the previous
71000+ policy set around
71001+ */
71002+ free_variables(true);
71003+
71004+ /* doesn't affect runtime, but maintains consistent state */
71005+out:
71006+ polstate = new_reload_state.oldpolicy_ptr;
71007+ current_alloc_state = new_reload_state.oldalloc_ptr;
71008+
71009+ return err;
71010+}
71011+
71012+static int
71013+gracl_init(struct gr_arg *args)
71014+{
71015+ int error = 0;
71016+
71017+ memcpy(&gr_system_salt, args->salt, sizeof(gr_system_salt));
71018+ memcpy(&gr_system_sum, args->sum, sizeof(gr_system_sum));
71019+
71020+ if (init_variables(args, false)) {
71021+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
71022+ error = -ENOMEM;
71023+ goto out;
71024+ }
71025+
71026+ error = copy_user_acl(args);
71027+ free_init_variables();
71028+ if (error)
71029+ goto out;
71030+
71031+ error = gr_set_acls(0);
71032+ if (error)
71033+ goto out;
71034+
71035+ gr_enable_rbac_system();
71036+
71037+ return 0;
71038+
71039+out:
71040+ free_variables(false);
71041+ return error;
71042+}
71043+
71044+static int
71045+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
71046+ unsigned char **sum)
71047+{
71048+ struct acl_role_label *r;
71049+ struct role_allowed_ip *ipp;
71050+ struct role_transition *trans;
71051+ unsigned int i;
71052+ int found = 0;
71053+ u32 curr_ip = current->signal->curr_ip;
71054+
71055+ current->signal->saved_ip = curr_ip;
71056+
71057+ /* check transition table */
71058+
71059+ for (trans = current->role->transitions; trans; trans = trans->next) {
71060+ if (!strcmp(rolename, trans->rolename)) {
71061+ found = 1;
71062+ break;
71063+ }
71064+ }
71065+
71066+ if (!found)
71067+ return 0;
71068+
71069+ /* handle special roles that do not require authentication
71070+ and check ip */
71071+
71072+ FOR_EACH_ROLE_START(r)
71073+ if (!strcmp(rolename, r->rolename) &&
71074+ (r->roletype & GR_ROLE_SPECIAL)) {
71075+ found = 0;
71076+ if (r->allowed_ips != NULL) {
71077+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
71078+ if ((ntohl(curr_ip) & ipp->netmask) ==
71079+ (ntohl(ipp->addr) & ipp->netmask))
71080+ found = 1;
71081+ }
71082+ } else
71083+ found = 2;
71084+ if (!found)
71085+ return 0;
71086+
71087+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
71088+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
71089+ *salt = NULL;
71090+ *sum = NULL;
71091+ return 1;
71092+ }
71093+ }
71094+ FOR_EACH_ROLE_END(r)
71095+
71096+ for (i = 0; i < polstate->num_sprole_pws; i++) {
71097+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
71098+ *salt = polstate->acl_special_roles[i]->salt;
71099+ *sum = polstate->acl_special_roles[i]->sum;
71100+ return 1;
71101+ }
71102+ }
71103+
71104+ return 0;
71105+}
71106+
71107+int gr_check_secure_terminal(struct task_struct *task)
71108+{
71109+ struct task_struct *p, *p2, *p3;
71110+ struct files_struct *files;
71111+ struct fdtable *fdt;
71112+ struct file *our_file = NULL, *file;
71113+ int i;
71114+
71115+ if (task->signal->tty == NULL)
71116+ return 1;
71117+
71118+ files = get_files_struct(task);
71119+ if (files != NULL) {
71120+ rcu_read_lock();
71121+ fdt = files_fdtable(files);
71122+ for (i=0; i < fdt->max_fds; i++) {
71123+ file = fcheck_files(files, i);
71124+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
71125+ get_file(file);
71126+ our_file = file;
71127+ }
71128+ }
71129+ rcu_read_unlock();
71130+ put_files_struct(files);
71131+ }
71132+
71133+ if (our_file == NULL)
71134+ return 1;
71135+
71136+ read_lock(&tasklist_lock);
71137+ do_each_thread(p2, p) {
71138+ files = get_files_struct(p);
71139+ if (files == NULL ||
71140+ (p->signal && p->signal->tty == task->signal->tty)) {
71141+ if (files != NULL)
71142+ put_files_struct(files);
71143+ continue;
71144+ }
71145+ rcu_read_lock();
71146+ fdt = files_fdtable(files);
71147+ for (i=0; i < fdt->max_fds; i++) {
71148+ file = fcheck_files(files, i);
71149+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
71150+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
71151+ p3 = task;
71152+ while (task_pid_nr(p3) > 0) {
71153+ if (p3 == p)
71154+ break;
71155+ p3 = p3->real_parent;
71156+ }
71157+ if (p3 == p)
71158+ break;
71159+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
71160+ gr_handle_alertkill(p);
71161+ rcu_read_unlock();
71162+ put_files_struct(files);
71163+ read_unlock(&tasklist_lock);
71164+ fput(our_file);
71165+ return 0;
71166+ }
71167+ }
71168+ rcu_read_unlock();
71169+ put_files_struct(files);
71170+ } while_each_thread(p2, p);
71171+ read_unlock(&tasklist_lock);
71172+
71173+ fput(our_file);
71174+ return 1;
71175+}
71176+
71177+ssize_t
71178+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
71179+{
71180+ struct gr_arg_wrapper uwrap;
71181+ unsigned char *sprole_salt = NULL;
71182+ unsigned char *sprole_sum = NULL;
71183+ int error = 0;
71184+ int error2 = 0;
71185+ size_t req_count = 0;
71186+ unsigned char oldmode = 0;
71187+
71188+ mutex_lock(&gr_dev_mutex);
71189+
71190+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
71191+ error = -EPERM;
71192+ goto out;
71193+ }
71194+
71195+#ifdef CONFIG_COMPAT
71196+ pax_open_kernel();
71197+ if (is_compat_task()) {
71198+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
71199+ copy_gr_arg = &copy_gr_arg_compat;
71200+ copy_acl_object_label = &copy_acl_object_label_compat;
71201+ copy_acl_subject_label = &copy_acl_subject_label_compat;
71202+ copy_acl_role_label = &copy_acl_role_label_compat;
71203+ copy_acl_ip_label = &copy_acl_ip_label_compat;
71204+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
71205+ copy_role_transition = &copy_role_transition_compat;
71206+ copy_sprole_pw = &copy_sprole_pw_compat;
71207+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
71208+ copy_pointer_from_array = &copy_pointer_from_array_compat;
71209+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
71210+ } else {
71211+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
71212+ copy_gr_arg = &copy_gr_arg_normal;
71213+ copy_acl_object_label = &copy_acl_object_label_normal;
71214+ copy_acl_subject_label = &copy_acl_subject_label_normal;
71215+ copy_acl_role_label = &copy_acl_role_label_normal;
71216+ copy_acl_ip_label = &copy_acl_ip_label_normal;
71217+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
71218+ copy_role_transition = &copy_role_transition_normal;
71219+ copy_sprole_pw = &copy_sprole_pw_normal;
71220+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
71221+ copy_pointer_from_array = &copy_pointer_from_array_normal;
71222+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
71223+ }
71224+ pax_close_kernel();
71225+#endif
71226+
71227+ req_count = get_gr_arg_wrapper_size();
71228+
71229+ if (count != req_count) {
71230+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
71231+ error = -EINVAL;
71232+ goto out;
71233+ }
71234+
71235+
71236+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
71237+ gr_auth_expires = 0;
71238+ gr_auth_attempts = 0;
71239+ }
71240+
71241+ error = copy_gr_arg_wrapper(buf, &uwrap);
71242+ if (error)
71243+ goto out;
71244+
71245+ error = copy_gr_arg(uwrap.arg, &gr_usermode);
71246+ if (error)
71247+ goto out;
71248+
71249+ if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_SPROLEPAM &&
71250+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
71251+ time_after(gr_auth_expires, get_seconds())) {
71252+ error = -EBUSY;
71253+ goto out;
71254+ }
71255+
71256+ /* if non-root trying to do anything other than use a special role,
71257+ do not attempt authentication, do not count towards authentication
71258+ locking
71259+ */
71260+
71261+ if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_STATUS &&
71262+ gr_usermode.mode != GR_UNSPROLE && gr_usermode.mode != GR_SPROLEPAM &&
71263+ gr_is_global_nonroot(current_uid())) {
71264+ error = -EPERM;
71265+ goto out;
71266+ }
71267+
71268+ /* ensure pw and special role name are null terminated */
71269+
71270+ gr_usermode.pw[GR_PW_LEN - 1] = '\0';
71271+ gr_usermode.sp_role[GR_SPROLE_LEN - 1] = '\0';
71272+
71273+ /* Okay.
71274+ * We have our enough of the argument structure..(we have yet
71275+ * to copy_from_user the tables themselves) . Copy the tables
71276+ * only if we need them, i.e. for loading operations. */
71277+
71278+ switch (gr_usermode.mode) {
71279+ case GR_STATUS:
71280+ if (gr_acl_is_enabled()) {
71281+ error = 1;
71282+ if (!gr_check_secure_terminal(current))
71283+ error = 3;
71284+ } else
71285+ error = 2;
71286+ goto out;
71287+ case GR_SHUTDOWN:
71288+ if (gr_acl_is_enabled() && !(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
71289+ stop_machine(gr_rbac_disable, NULL, NULL);
71290+ free_variables(false);
71291+ memset(&gr_usermode, 0, sizeof(gr_usermode));
71292+ memset(&gr_system_salt, 0, sizeof(gr_system_salt));
71293+ memset(&gr_system_sum, 0, sizeof(gr_system_sum));
71294+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
71295+ } else if (gr_acl_is_enabled()) {
71296+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
71297+ error = -EPERM;
71298+ } else {
71299+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
71300+ error = -EAGAIN;
71301+ }
71302+ break;
71303+ case GR_ENABLE:
71304+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(&gr_usermode)))
71305+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
71306+ else {
71307+ if (gr_acl_is_enabled())
71308+ error = -EAGAIN;
71309+ else
71310+ error = error2;
71311+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
71312+ }
71313+ break;
71314+ case GR_OLDRELOAD:
71315+ oldmode = 1;
71316+ case GR_RELOAD:
71317+ if (!gr_acl_is_enabled()) {
71318+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
71319+ error = -EAGAIN;
71320+ } else if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
71321+ error2 = gracl_reload(&gr_usermode, oldmode);
71322+ if (!error2)
71323+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
71324+ else {
71325+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
71326+ error = error2;
71327+ }
71328+ } else {
71329+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
71330+ error = -EPERM;
71331+ }
71332+ break;
71333+ case GR_SEGVMOD:
71334+ if (unlikely(!gr_acl_is_enabled())) {
71335+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
71336+ error = -EAGAIN;
71337+ break;
71338+ }
71339+
71340+ if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
71341+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
71342+ if (gr_usermode.segv_device && gr_usermode.segv_inode) {
71343+ struct acl_subject_label *segvacl;
71344+ segvacl =
71345+ lookup_acl_subj_label(gr_usermode.segv_inode,
71346+ gr_usermode.segv_device,
71347+ current->role);
71348+ if (segvacl) {
71349+ segvacl->crashes = 0;
71350+ segvacl->expires = 0;
71351+ }
71352+ } else if (gr_find_uid(gr_usermode.segv_uid) >= 0) {
71353+ gr_remove_uid(gr_usermode.segv_uid);
71354+ }
71355+ } else {
71356+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
71357+ error = -EPERM;
71358+ }
71359+ break;
71360+ case GR_SPROLE:
71361+ case GR_SPROLEPAM:
71362+ if (unlikely(!gr_acl_is_enabled())) {
71363+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
71364+ error = -EAGAIN;
71365+ break;
71366+ }
71367+
71368+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
71369+ current->role->expires = 0;
71370+ current->role->auth_attempts = 0;
71371+ }
71372+
71373+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
71374+ time_after(current->role->expires, get_seconds())) {
71375+ error = -EBUSY;
71376+ goto out;
71377+ }
71378+
71379+ if (lookup_special_role_auth
71380+ (gr_usermode.mode, gr_usermode.sp_role, &sprole_salt, &sprole_sum)
71381+ && ((!sprole_salt && !sprole_sum)
71382+ || !(chkpw(&gr_usermode, sprole_salt, sprole_sum)))) {
71383+ char *p = "";
71384+ assign_special_role(gr_usermode.sp_role);
71385+ read_lock(&tasklist_lock);
71386+ if (current->real_parent)
71387+ p = current->real_parent->role->rolename;
71388+ read_unlock(&tasklist_lock);
71389+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
71390+ p, acl_sp_role_value);
71391+ } else {
71392+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode.sp_role);
71393+ error = -EPERM;
71394+ if(!(current->role->auth_attempts++))
71395+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
71396+
71397+ goto out;
71398+ }
71399+ break;
71400+ case GR_UNSPROLE:
71401+ if (unlikely(!gr_acl_is_enabled())) {
71402+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
71403+ error = -EAGAIN;
71404+ break;
71405+ }
71406+
71407+ if (current->role->roletype & GR_ROLE_SPECIAL) {
71408+ char *p = "";
71409+ int i = 0;
71410+
71411+ read_lock(&tasklist_lock);
71412+ if (current->real_parent) {
71413+ p = current->real_parent->role->rolename;
71414+ i = current->real_parent->acl_role_id;
71415+ }
71416+ read_unlock(&tasklist_lock);
71417+
71418+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
71419+ gr_set_acls(1);
71420+ } else {
71421+ error = -EPERM;
71422+ goto out;
71423+ }
71424+ break;
71425+ default:
71426+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode.mode);
71427+ error = -EINVAL;
71428+ break;
71429+ }
71430+
71431+ if (error != -EPERM)
71432+ goto out;
71433+
71434+ if(!(gr_auth_attempts++))
71435+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
71436+
71437+ out:
71438+ mutex_unlock(&gr_dev_mutex);
71439+
71440+ if (!error)
71441+ error = req_count;
71442+
71443+ return error;
71444+}
71445+
71446+int
71447+gr_set_acls(const int type)
71448+{
71449+ struct task_struct *task, *task2;
71450+ struct acl_role_label *role = current->role;
71451+ struct acl_subject_label *subj;
71452+ __u16 acl_role_id = current->acl_role_id;
71453+ const struct cred *cred;
71454+ int ret;
71455+
71456+ rcu_read_lock();
71457+ read_lock(&tasklist_lock);
71458+ read_lock(&grsec_exec_file_lock);
71459+ do_each_thread(task2, task) {
71460+ /* check to see if we're called from the exit handler,
71461+ if so, only replace ACLs that have inherited the admin
71462+ ACL */
71463+
71464+ if (type && (task->role != role ||
71465+ task->acl_role_id != acl_role_id))
71466+ continue;
71467+
71468+ task->acl_role_id = 0;
71469+ task->acl_sp_role = 0;
71470+ task->inherited = 0;
71471+
71472+ if (task->exec_file) {
71473+ cred = __task_cred(task);
71474+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
71475+ subj = __gr_get_subject_for_task(polstate, task, NULL);
71476+ if (subj == NULL) {
71477+ ret = -EINVAL;
71478+ read_unlock(&grsec_exec_file_lock);
71479+ read_unlock(&tasklist_lock);
71480+ rcu_read_unlock();
71481+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
71482+ return ret;
71483+ }
71484+ __gr_apply_subject_to_task(polstate, task, subj);
71485+ } else {
71486+ // it's a kernel process
71487+ task->role = polstate->kernel_role;
71488+ task->acl = polstate->kernel_role->root_label;
71489+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
71490+ task->acl->mode &= ~GR_PROCFIND;
71491+#endif
71492+ }
71493+ } while_each_thread(task2, task);
71494+ read_unlock(&grsec_exec_file_lock);
71495+ read_unlock(&tasklist_lock);
71496+ rcu_read_unlock();
71497+
71498+ return 0;
71499+}
71500diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
71501new file mode 100644
71502index 0000000..39645c9
71503--- /dev/null
71504+++ b/grsecurity/gracl_res.c
71505@@ -0,0 +1,68 @@
71506+#include <linux/kernel.h>
71507+#include <linux/sched.h>
71508+#include <linux/gracl.h>
71509+#include <linux/grinternal.h>
71510+
71511+static const char *restab_log[] = {
71512+ [RLIMIT_CPU] = "RLIMIT_CPU",
71513+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
71514+ [RLIMIT_DATA] = "RLIMIT_DATA",
71515+ [RLIMIT_STACK] = "RLIMIT_STACK",
71516+ [RLIMIT_CORE] = "RLIMIT_CORE",
71517+ [RLIMIT_RSS] = "RLIMIT_RSS",
71518+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
71519+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
71520+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
71521+ [RLIMIT_AS] = "RLIMIT_AS",
71522+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
71523+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
71524+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
71525+ [RLIMIT_NICE] = "RLIMIT_NICE",
71526+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
71527+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
71528+ [GR_CRASH_RES] = "RLIMIT_CRASH"
71529+};
71530+
71531+void
71532+gr_log_resource(const struct task_struct *task,
71533+ const int res, const unsigned long wanted, const int gt)
71534+{
71535+ const struct cred *cred;
71536+ unsigned long rlim;
71537+
71538+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
71539+ return;
71540+
71541+ // not yet supported resource
71542+ if (unlikely(!restab_log[res]))
71543+ return;
71544+
71545+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
71546+ rlim = task_rlimit_max(task, res);
71547+ else
71548+ rlim = task_rlimit(task, res);
71549+
71550+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
71551+ return;
71552+
71553+ rcu_read_lock();
71554+ cred = __task_cred(task);
71555+
71556+ if (res == RLIMIT_NPROC &&
71557+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
71558+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
71559+ goto out_rcu_unlock;
71560+ else if (res == RLIMIT_MEMLOCK &&
71561+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
71562+ goto out_rcu_unlock;
71563+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
71564+ goto out_rcu_unlock;
71565+ rcu_read_unlock();
71566+
71567+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
71568+
71569+ return;
71570+out_rcu_unlock:
71571+ rcu_read_unlock();
71572+ return;
71573+}
71574diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
71575new file mode 100644
71576index 0000000..2040e61
71577--- /dev/null
71578+++ b/grsecurity/gracl_segv.c
71579@@ -0,0 +1,313 @@
71580+#include <linux/kernel.h>
71581+#include <linux/mm.h>
71582+#include <asm/uaccess.h>
71583+#include <asm/errno.h>
71584+#include <asm/mman.h>
71585+#include <net/sock.h>
71586+#include <linux/file.h>
71587+#include <linux/fs.h>
71588+#include <linux/net.h>
71589+#include <linux/in.h>
71590+#include <linux/slab.h>
71591+#include <linux/types.h>
71592+#include <linux/sched.h>
71593+#include <linux/timer.h>
71594+#include <linux/gracl.h>
71595+#include <linux/grsecurity.h>
71596+#include <linux/grinternal.h>
71597+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
71598+#include <linux/magic.h>
71599+#include <linux/pagemap.h>
71600+#include "../fs/btrfs/async-thread.h"
71601+#include "../fs/btrfs/ctree.h"
71602+#include "../fs/btrfs/btrfs_inode.h"
71603+#endif
71604+
71605+static struct crash_uid *uid_set;
71606+static unsigned short uid_used;
71607+static DEFINE_SPINLOCK(gr_uid_lock);
71608+extern rwlock_t gr_inode_lock;
71609+extern struct acl_subject_label *
71610+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
71611+ struct acl_role_label *role);
71612+
71613+static inline dev_t __get_dev(const struct dentry *dentry)
71614+{
71615+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
71616+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
71617+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
71618+ else
71619+#endif
71620+ return dentry->d_sb->s_dev;
71621+}
71622+
71623+int
71624+gr_init_uidset(void)
71625+{
71626+ uid_set =
71627+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
71628+ uid_used = 0;
71629+
71630+ return uid_set ? 1 : 0;
71631+}
71632+
71633+void
71634+gr_free_uidset(void)
71635+{
71636+ if (uid_set) {
71637+ struct crash_uid *tmpset;
71638+ spin_lock(&gr_uid_lock);
71639+ tmpset = uid_set;
71640+ uid_set = NULL;
71641+ uid_used = 0;
71642+ spin_unlock(&gr_uid_lock);
71643+ if (tmpset)
71644+ kfree(tmpset);
71645+ }
71646+
71647+ return;
71648+}
71649+
71650+int
71651+gr_find_uid(const uid_t uid)
71652+{
71653+ struct crash_uid *tmp = uid_set;
71654+ uid_t buid;
71655+ int low = 0, high = uid_used - 1, mid;
71656+
71657+ while (high >= low) {
71658+ mid = (low + high) >> 1;
71659+ buid = tmp[mid].uid;
71660+ if (buid == uid)
71661+ return mid;
71662+ if (buid > uid)
71663+ high = mid - 1;
71664+ if (buid < uid)
71665+ low = mid + 1;
71666+ }
71667+
71668+ return -1;
71669+}
71670+
71671+static __inline__ void
71672+gr_insertsort(void)
71673+{
71674+ unsigned short i, j;
71675+ struct crash_uid index;
71676+
71677+ for (i = 1; i < uid_used; i++) {
71678+ index = uid_set[i];
71679+ j = i;
71680+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
71681+ uid_set[j] = uid_set[j - 1];
71682+ j--;
71683+ }
71684+ uid_set[j] = index;
71685+ }
71686+
71687+ return;
71688+}
71689+
71690+static __inline__ void
71691+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
71692+{
71693+ int loc;
71694+ uid_t uid = GR_GLOBAL_UID(kuid);
71695+
71696+ if (uid_used == GR_UIDTABLE_MAX)
71697+ return;
71698+
71699+ loc = gr_find_uid(uid);
71700+
71701+ if (loc >= 0) {
71702+ uid_set[loc].expires = expires;
71703+ return;
71704+ }
71705+
71706+ uid_set[uid_used].uid = uid;
71707+ uid_set[uid_used].expires = expires;
71708+ uid_used++;
71709+
71710+ gr_insertsort();
71711+
71712+ return;
71713+}
71714+
71715+void
71716+gr_remove_uid(const unsigned short loc)
71717+{
71718+ unsigned short i;
71719+
71720+ for (i = loc + 1; i < uid_used; i++)
71721+ uid_set[i - 1] = uid_set[i];
71722+
71723+ uid_used--;
71724+
71725+ return;
71726+}
71727+
71728+int
71729+gr_check_crash_uid(const kuid_t kuid)
71730+{
71731+ int loc;
71732+ int ret = 0;
71733+ uid_t uid;
71734+
71735+ if (unlikely(!gr_acl_is_enabled()))
71736+ return 0;
71737+
71738+ uid = GR_GLOBAL_UID(kuid);
71739+
71740+ spin_lock(&gr_uid_lock);
71741+ loc = gr_find_uid(uid);
71742+
71743+ if (loc < 0)
71744+ goto out_unlock;
71745+
71746+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
71747+ gr_remove_uid(loc);
71748+ else
71749+ ret = 1;
71750+
71751+out_unlock:
71752+ spin_unlock(&gr_uid_lock);
71753+ return ret;
71754+}
71755+
71756+static __inline__ int
71757+proc_is_setxid(const struct cred *cred)
71758+{
71759+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
71760+ !uid_eq(cred->uid, cred->fsuid))
71761+ return 1;
71762+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
71763+ !gid_eq(cred->gid, cred->fsgid))
71764+ return 1;
71765+
71766+ return 0;
71767+}
71768+
71769+extern int gr_fake_force_sig(int sig, struct task_struct *t);
71770+
71771+void
71772+gr_handle_crash(struct task_struct *task, const int sig)
71773+{
71774+ struct acl_subject_label *curr;
71775+ struct task_struct *tsk, *tsk2;
71776+ const struct cred *cred;
71777+ const struct cred *cred2;
71778+
71779+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
71780+ return;
71781+
71782+ if (unlikely(!gr_acl_is_enabled()))
71783+ return;
71784+
71785+ curr = task->acl;
71786+
71787+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
71788+ return;
71789+
71790+ if (time_before_eq(curr->expires, get_seconds())) {
71791+ curr->expires = 0;
71792+ curr->crashes = 0;
71793+ }
71794+
71795+ curr->crashes++;
71796+
71797+ if (!curr->expires)
71798+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
71799+
71800+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
71801+ time_after(curr->expires, get_seconds())) {
71802+ rcu_read_lock();
71803+ cred = __task_cred(task);
71804+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
71805+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
71806+ spin_lock(&gr_uid_lock);
71807+ gr_insert_uid(cred->uid, curr->expires);
71808+ spin_unlock(&gr_uid_lock);
71809+ curr->expires = 0;
71810+ curr->crashes = 0;
71811+ read_lock(&tasklist_lock);
71812+ do_each_thread(tsk2, tsk) {
71813+ cred2 = __task_cred(tsk);
71814+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
71815+ gr_fake_force_sig(SIGKILL, tsk);
71816+ } while_each_thread(tsk2, tsk);
71817+ read_unlock(&tasklist_lock);
71818+ } else {
71819+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
71820+ read_lock(&tasklist_lock);
71821+ read_lock(&grsec_exec_file_lock);
71822+ do_each_thread(tsk2, tsk) {
71823+ if (likely(tsk != task)) {
71824+ // if this thread has the same subject as the one that triggered
71825+ // RES_CRASH and it's the same binary, kill it
71826+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
71827+ gr_fake_force_sig(SIGKILL, tsk);
71828+ }
71829+ } while_each_thread(tsk2, tsk);
71830+ read_unlock(&grsec_exec_file_lock);
71831+ read_unlock(&tasklist_lock);
71832+ }
71833+ rcu_read_unlock();
71834+ }
71835+
71836+ return;
71837+}
71838+
71839+int
71840+gr_check_crash_exec(const struct file *filp)
71841+{
71842+ struct acl_subject_label *curr;
71843+
71844+ if (unlikely(!gr_acl_is_enabled()))
71845+ return 0;
71846+
71847+ read_lock(&gr_inode_lock);
71848+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
71849+ __get_dev(filp->f_path.dentry),
71850+ current->role);
71851+ read_unlock(&gr_inode_lock);
71852+
71853+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
71854+ (!curr->crashes && !curr->expires))
71855+ return 0;
71856+
71857+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
71858+ time_after(curr->expires, get_seconds()))
71859+ return 1;
71860+ else if (time_before_eq(curr->expires, get_seconds())) {
71861+ curr->crashes = 0;
71862+ curr->expires = 0;
71863+ }
71864+
71865+ return 0;
71866+}
71867+
71868+void
71869+gr_handle_alertkill(struct task_struct *task)
71870+{
71871+ struct acl_subject_label *curracl;
71872+ __u32 curr_ip;
71873+ struct task_struct *p, *p2;
71874+
71875+ if (unlikely(!gr_acl_is_enabled()))
71876+ return;
71877+
71878+ curracl = task->acl;
71879+ curr_ip = task->signal->curr_ip;
71880+
71881+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
71882+ read_lock(&tasklist_lock);
71883+ do_each_thread(p2, p) {
71884+ if (p->signal->curr_ip == curr_ip)
71885+ gr_fake_force_sig(SIGKILL, p);
71886+ } while_each_thread(p2, p);
71887+ read_unlock(&tasklist_lock);
71888+ } else if (curracl->mode & GR_KILLPROC)
71889+ gr_fake_force_sig(SIGKILL, task);
71890+
71891+ return;
71892+}
71893diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
71894new file mode 100644
71895index 0000000..98011b0
71896--- /dev/null
71897+++ b/grsecurity/gracl_shm.c
71898@@ -0,0 +1,40 @@
71899+#include <linux/kernel.h>
71900+#include <linux/mm.h>
71901+#include <linux/sched.h>
71902+#include <linux/file.h>
71903+#include <linux/ipc.h>
71904+#include <linux/gracl.h>
71905+#include <linux/grsecurity.h>
71906+#include <linux/grinternal.h>
71907+
71908+int
71909+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71910+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
71911+{
71912+ struct task_struct *task;
71913+
71914+ if (!gr_acl_is_enabled())
71915+ return 1;
71916+
71917+ rcu_read_lock();
71918+ read_lock(&tasklist_lock);
71919+
71920+ task = find_task_by_vpid(shm_cprid);
71921+
71922+ if (unlikely(!task))
71923+ task = find_task_by_vpid(shm_lapid);
71924+
71925+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
71926+ (task_pid_nr(task) == shm_lapid)) &&
71927+ (task->acl->mode & GR_PROTSHM) &&
71928+ (task->acl != current->acl))) {
71929+ read_unlock(&tasklist_lock);
71930+ rcu_read_unlock();
71931+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
71932+ return 0;
71933+ }
71934+ read_unlock(&tasklist_lock);
71935+ rcu_read_unlock();
71936+
71937+ return 1;
71938+}
71939diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
71940new file mode 100644
71941index 0000000..bc0be01
71942--- /dev/null
71943+++ b/grsecurity/grsec_chdir.c
71944@@ -0,0 +1,19 @@
71945+#include <linux/kernel.h>
71946+#include <linux/sched.h>
71947+#include <linux/fs.h>
71948+#include <linux/file.h>
71949+#include <linux/grsecurity.h>
71950+#include <linux/grinternal.h>
71951+
71952+void
71953+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
71954+{
71955+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
71956+ if ((grsec_enable_chdir && grsec_enable_group &&
71957+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
71958+ !grsec_enable_group)) {
71959+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
71960+ }
71961+#endif
71962+ return;
71963+}
71964diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
71965new file mode 100644
71966index 0000000..651d6c2
71967--- /dev/null
71968+++ b/grsecurity/grsec_chroot.c
71969@@ -0,0 +1,370 @@
71970+#include <linux/kernel.h>
71971+#include <linux/module.h>
71972+#include <linux/sched.h>
71973+#include <linux/file.h>
71974+#include <linux/fs.h>
71975+#include <linux/mount.h>
71976+#include <linux/types.h>
71977+#include "../fs/mount.h"
71978+#include <linux/grsecurity.h>
71979+#include <linux/grinternal.h>
71980+
71981+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
71982+int gr_init_ran;
71983+#endif
71984+
71985+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
71986+{
71987+#ifdef CONFIG_GRKERNSEC
71988+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
71989+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
71990+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
71991+ && gr_init_ran
71992+#endif
71993+ )
71994+ task->gr_is_chrooted = 1;
71995+ else {
71996+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
71997+ if (task_pid_nr(task) == 1 && !gr_init_ran)
71998+ gr_init_ran = 1;
71999+#endif
72000+ task->gr_is_chrooted = 0;
72001+ }
72002+
72003+ task->gr_chroot_dentry = path->dentry;
72004+#endif
72005+ return;
72006+}
72007+
72008+void gr_clear_chroot_entries(struct task_struct *task)
72009+{
72010+#ifdef CONFIG_GRKERNSEC
72011+ task->gr_is_chrooted = 0;
72012+ task->gr_chroot_dentry = NULL;
72013+#endif
72014+ return;
72015+}
72016+
72017+int
72018+gr_handle_chroot_unix(const pid_t pid)
72019+{
72020+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
72021+ struct task_struct *p;
72022+
72023+ if (unlikely(!grsec_enable_chroot_unix))
72024+ return 1;
72025+
72026+ if (likely(!proc_is_chrooted(current)))
72027+ return 1;
72028+
72029+ rcu_read_lock();
72030+ read_lock(&tasklist_lock);
72031+ p = find_task_by_vpid_unrestricted(pid);
72032+ if (unlikely(p && !have_same_root(current, p))) {
72033+ read_unlock(&tasklist_lock);
72034+ rcu_read_unlock();
72035+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
72036+ return 0;
72037+ }
72038+ read_unlock(&tasklist_lock);
72039+ rcu_read_unlock();
72040+#endif
72041+ return 1;
72042+}
72043+
72044+int
72045+gr_handle_chroot_nice(void)
72046+{
72047+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
72048+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
72049+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
72050+ return -EPERM;
72051+ }
72052+#endif
72053+ return 0;
72054+}
72055+
72056+int
72057+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
72058+{
72059+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
72060+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
72061+ && proc_is_chrooted(current)) {
72062+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
72063+ return -EACCES;
72064+ }
72065+#endif
72066+ return 0;
72067+}
72068+
72069+int
72070+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
72071+{
72072+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
72073+ struct task_struct *p;
72074+ int ret = 0;
72075+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
72076+ return ret;
72077+
72078+ read_lock(&tasklist_lock);
72079+ do_each_pid_task(pid, type, p) {
72080+ if (!have_same_root(current, p)) {
72081+ ret = 1;
72082+ goto out;
72083+ }
72084+ } while_each_pid_task(pid, type, p);
72085+out:
72086+ read_unlock(&tasklist_lock);
72087+ return ret;
72088+#endif
72089+ return 0;
72090+}
72091+
72092+int
72093+gr_pid_is_chrooted(struct task_struct *p)
72094+{
72095+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
72096+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
72097+ return 0;
72098+
72099+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
72100+ !have_same_root(current, p)) {
72101+ return 1;
72102+ }
72103+#endif
72104+ return 0;
72105+}
72106+
72107+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
72108+
72109+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
72110+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
72111+{
72112+ struct path path, currentroot;
72113+ int ret = 0;
72114+
72115+ path.dentry = (struct dentry *)u_dentry;
72116+ path.mnt = (struct vfsmount *)u_mnt;
72117+ get_fs_root(current->fs, &currentroot);
72118+ if (path_is_under(&path, &currentroot))
72119+ ret = 1;
72120+ path_put(&currentroot);
72121+
72122+ return ret;
72123+}
72124+#endif
72125+
72126+int
72127+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
72128+{
72129+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
72130+ if (!grsec_enable_chroot_fchdir)
72131+ return 1;
72132+
72133+ if (!proc_is_chrooted(current))
72134+ return 1;
72135+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
72136+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
72137+ return 0;
72138+ }
72139+#endif
72140+ return 1;
72141+}
72142+
72143+int
72144+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72145+ const time_t shm_createtime)
72146+{
72147+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
72148+ struct task_struct *p;
72149+ time_t starttime;
72150+
72151+ if (unlikely(!grsec_enable_chroot_shmat))
72152+ return 1;
72153+
72154+ if (likely(!proc_is_chrooted(current)))
72155+ return 1;
72156+
72157+ rcu_read_lock();
72158+ read_lock(&tasklist_lock);
72159+
72160+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
72161+ starttime = p->start_time.tv_sec;
72162+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
72163+ if (have_same_root(current, p)) {
72164+ goto allow;
72165+ } else {
72166+ read_unlock(&tasklist_lock);
72167+ rcu_read_unlock();
72168+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
72169+ return 0;
72170+ }
72171+ }
72172+ /* creator exited, pid reuse, fall through to next check */
72173+ }
72174+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
72175+ if (unlikely(!have_same_root(current, p))) {
72176+ read_unlock(&tasklist_lock);
72177+ rcu_read_unlock();
72178+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
72179+ return 0;
72180+ }
72181+ }
72182+
72183+allow:
72184+ read_unlock(&tasklist_lock);
72185+ rcu_read_unlock();
72186+#endif
72187+ return 1;
72188+}
72189+
72190+void
72191+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
72192+{
72193+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
72194+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
72195+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
72196+#endif
72197+ return;
72198+}
72199+
72200+int
72201+gr_handle_chroot_mknod(const struct dentry *dentry,
72202+ const struct vfsmount *mnt, const int mode)
72203+{
72204+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
72205+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
72206+ proc_is_chrooted(current)) {
72207+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
72208+ return -EPERM;
72209+ }
72210+#endif
72211+ return 0;
72212+}
72213+
72214+int
72215+gr_handle_chroot_mount(const struct dentry *dentry,
72216+ const struct vfsmount *mnt, const char *dev_name)
72217+{
72218+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
72219+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
72220+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
72221+ return -EPERM;
72222+ }
72223+#endif
72224+ return 0;
72225+}
72226+
72227+int
72228+gr_handle_chroot_pivot(void)
72229+{
72230+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
72231+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
72232+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
72233+ return -EPERM;
72234+ }
72235+#endif
72236+ return 0;
72237+}
72238+
72239+int
72240+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
72241+{
72242+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
72243+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
72244+ !gr_is_outside_chroot(dentry, mnt)) {
72245+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
72246+ return -EPERM;
72247+ }
72248+#endif
72249+ return 0;
72250+}
72251+
72252+extern const char *captab_log[];
72253+extern int captab_log_entries;
72254+
72255+int
72256+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72257+{
72258+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72259+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
72260+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
72261+ if (cap_raised(chroot_caps, cap)) {
72262+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
72263+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
72264+ }
72265+ return 0;
72266+ }
72267+ }
72268+#endif
72269+ return 1;
72270+}
72271+
72272+int
72273+gr_chroot_is_capable(const int cap)
72274+{
72275+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72276+ return gr_task_chroot_is_capable(current, current_cred(), cap);
72277+#endif
72278+ return 1;
72279+}
72280+
72281+int
72282+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
72283+{
72284+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72285+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
72286+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
72287+ if (cap_raised(chroot_caps, cap)) {
72288+ return 0;
72289+ }
72290+ }
72291+#endif
72292+ return 1;
72293+}
72294+
72295+int
72296+gr_chroot_is_capable_nolog(const int cap)
72297+{
72298+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72299+ return gr_task_chroot_is_capable_nolog(current, cap);
72300+#endif
72301+ return 1;
72302+}
72303+
72304+int
72305+gr_handle_chroot_sysctl(const int op)
72306+{
72307+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
72308+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
72309+ proc_is_chrooted(current))
72310+ return -EACCES;
72311+#endif
72312+ return 0;
72313+}
72314+
72315+void
72316+gr_handle_chroot_chdir(const struct path *path)
72317+{
72318+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
72319+ if (grsec_enable_chroot_chdir)
72320+ set_fs_pwd(current->fs, path);
72321+#endif
72322+ return;
72323+}
72324+
72325+int
72326+gr_handle_chroot_chmod(const struct dentry *dentry,
72327+ const struct vfsmount *mnt, const int mode)
72328+{
72329+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
72330+ /* allow chmod +s on directories, but not files */
72331+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
72332+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
72333+ proc_is_chrooted(current)) {
72334+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
72335+ return -EPERM;
72336+ }
72337+#endif
72338+ return 0;
72339+}
72340diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
72341new file mode 100644
72342index 0000000..4d6fce8
72343--- /dev/null
72344+++ b/grsecurity/grsec_disabled.c
72345@@ -0,0 +1,433 @@
72346+#include <linux/kernel.h>
72347+#include <linux/module.h>
72348+#include <linux/sched.h>
72349+#include <linux/file.h>
72350+#include <linux/fs.h>
72351+#include <linux/kdev_t.h>
72352+#include <linux/net.h>
72353+#include <linux/in.h>
72354+#include <linux/ip.h>
72355+#include <linux/skbuff.h>
72356+#include <linux/sysctl.h>
72357+
72358+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
72359+void
72360+pax_set_initial_flags(struct linux_binprm *bprm)
72361+{
72362+ return;
72363+}
72364+#endif
72365+
72366+#ifdef CONFIG_SYSCTL
72367+__u32
72368+gr_handle_sysctl(const struct ctl_table * table, const int op)
72369+{
72370+ return 0;
72371+}
72372+#endif
72373+
72374+#ifdef CONFIG_TASKSTATS
72375+int gr_is_taskstats_denied(int pid)
72376+{
72377+ return 0;
72378+}
72379+#endif
72380+
72381+int
72382+gr_acl_is_enabled(void)
72383+{
72384+ return 0;
72385+}
72386+
72387+void
72388+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
72389+{
72390+ return;
72391+}
72392+
72393+int
72394+gr_handle_rawio(const struct inode *inode)
72395+{
72396+ return 0;
72397+}
72398+
72399+void
72400+gr_acl_handle_psacct(struct task_struct *task, const long code)
72401+{
72402+ return;
72403+}
72404+
72405+int
72406+gr_handle_ptrace(struct task_struct *task, const long request)
72407+{
72408+ return 0;
72409+}
72410+
72411+int
72412+gr_handle_proc_ptrace(struct task_struct *task)
72413+{
72414+ return 0;
72415+}
72416+
72417+int
72418+gr_set_acls(const int type)
72419+{
72420+ return 0;
72421+}
72422+
72423+int
72424+gr_check_hidden_task(const struct task_struct *tsk)
72425+{
72426+ return 0;
72427+}
72428+
72429+int
72430+gr_check_protected_task(const struct task_struct *task)
72431+{
72432+ return 0;
72433+}
72434+
72435+int
72436+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
72437+{
72438+ return 0;
72439+}
72440+
72441+void
72442+gr_copy_label(struct task_struct *tsk)
72443+{
72444+ return;
72445+}
72446+
72447+void
72448+gr_set_pax_flags(struct task_struct *task)
72449+{
72450+ return;
72451+}
72452+
72453+int
72454+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
72455+ const int unsafe_share)
72456+{
72457+ return 0;
72458+}
72459+
72460+void
72461+gr_handle_delete(const ino_t ino, const dev_t dev)
72462+{
72463+ return;
72464+}
72465+
72466+void
72467+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
72468+{
72469+ return;
72470+}
72471+
72472+void
72473+gr_handle_crash(struct task_struct *task, const int sig)
72474+{
72475+ return;
72476+}
72477+
72478+int
72479+gr_check_crash_exec(const struct file *filp)
72480+{
72481+ return 0;
72482+}
72483+
72484+int
72485+gr_check_crash_uid(const kuid_t uid)
72486+{
72487+ return 0;
72488+}
72489+
72490+void
72491+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
72492+ struct dentry *old_dentry,
72493+ struct dentry *new_dentry,
72494+ struct vfsmount *mnt, const __u8 replace)
72495+{
72496+ return;
72497+}
72498+
72499+int
72500+gr_search_socket(const int family, const int type, const int protocol)
72501+{
72502+ return 1;
72503+}
72504+
72505+int
72506+gr_search_connectbind(const int mode, const struct socket *sock,
72507+ const struct sockaddr_in *addr)
72508+{
72509+ return 0;
72510+}
72511+
72512+void
72513+gr_handle_alertkill(struct task_struct *task)
72514+{
72515+ return;
72516+}
72517+
72518+__u32
72519+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
72520+{
72521+ return 1;
72522+}
72523+
72524+__u32
72525+gr_acl_handle_hidden_file(const struct dentry * dentry,
72526+ const struct vfsmount * mnt)
72527+{
72528+ return 1;
72529+}
72530+
72531+__u32
72532+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
72533+ int acc_mode)
72534+{
72535+ return 1;
72536+}
72537+
72538+__u32
72539+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
72540+{
72541+ return 1;
72542+}
72543+
72544+__u32
72545+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
72546+{
72547+ return 1;
72548+}
72549+
72550+int
72551+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
72552+ unsigned int *vm_flags)
72553+{
72554+ return 1;
72555+}
72556+
72557+__u32
72558+gr_acl_handle_truncate(const struct dentry * dentry,
72559+ const struct vfsmount * mnt)
72560+{
72561+ return 1;
72562+}
72563+
72564+__u32
72565+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
72566+{
72567+ return 1;
72568+}
72569+
72570+__u32
72571+gr_acl_handle_access(const struct dentry * dentry,
72572+ const struct vfsmount * mnt, const int fmode)
72573+{
72574+ return 1;
72575+}
72576+
72577+__u32
72578+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
72579+ umode_t *mode)
72580+{
72581+ return 1;
72582+}
72583+
72584+__u32
72585+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
72586+{
72587+ return 1;
72588+}
72589+
72590+__u32
72591+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
72592+{
72593+ return 1;
72594+}
72595+
72596+__u32
72597+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
72598+{
72599+ return 1;
72600+}
72601+
72602+void
72603+grsecurity_init(void)
72604+{
72605+ return;
72606+}
72607+
72608+umode_t gr_acl_umask(void)
72609+{
72610+ return 0;
72611+}
72612+
72613+__u32
72614+gr_acl_handle_mknod(const struct dentry * new_dentry,
72615+ const struct dentry * parent_dentry,
72616+ const struct vfsmount * parent_mnt,
72617+ const int mode)
72618+{
72619+ return 1;
72620+}
72621+
72622+__u32
72623+gr_acl_handle_mkdir(const struct dentry * new_dentry,
72624+ const struct dentry * parent_dentry,
72625+ const struct vfsmount * parent_mnt)
72626+{
72627+ return 1;
72628+}
72629+
72630+__u32
72631+gr_acl_handle_symlink(const struct dentry * new_dentry,
72632+ const struct dentry * parent_dentry,
72633+ const struct vfsmount * parent_mnt, const struct filename *from)
72634+{
72635+ return 1;
72636+}
72637+
72638+__u32
72639+gr_acl_handle_link(const struct dentry * new_dentry,
72640+ const struct dentry * parent_dentry,
72641+ const struct vfsmount * parent_mnt,
72642+ const struct dentry * old_dentry,
72643+ const struct vfsmount * old_mnt, const struct filename *to)
72644+{
72645+ return 1;
72646+}
72647+
72648+int
72649+gr_acl_handle_rename(const struct dentry *new_dentry,
72650+ const struct dentry *parent_dentry,
72651+ const struct vfsmount *parent_mnt,
72652+ const struct dentry *old_dentry,
72653+ const struct inode *old_parent_inode,
72654+ const struct vfsmount *old_mnt, const struct filename *newname)
72655+{
72656+ return 0;
72657+}
72658+
72659+int
72660+gr_acl_handle_filldir(const struct file *file, const char *name,
72661+ const int namelen, const ino_t ino)
72662+{
72663+ return 1;
72664+}
72665+
72666+int
72667+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72668+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
72669+{
72670+ return 1;
72671+}
72672+
72673+int
72674+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
72675+{
72676+ return 0;
72677+}
72678+
72679+int
72680+gr_search_accept(const struct socket *sock)
72681+{
72682+ return 0;
72683+}
72684+
72685+int
72686+gr_search_listen(const struct socket *sock)
72687+{
72688+ return 0;
72689+}
72690+
72691+int
72692+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
72693+{
72694+ return 0;
72695+}
72696+
72697+__u32
72698+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
72699+{
72700+ return 1;
72701+}
72702+
72703+__u32
72704+gr_acl_handle_creat(const struct dentry * dentry,
72705+ const struct dentry * p_dentry,
72706+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
72707+ const int imode)
72708+{
72709+ return 1;
72710+}
72711+
72712+void
72713+gr_acl_handle_exit(void)
72714+{
72715+ return;
72716+}
72717+
72718+int
72719+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
72720+{
72721+ return 1;
72722+}
72723+
72724+void
72725+gr_set_role_label(const kuid_t uid, const kgid_t gid)
72726+{
72727+ return;
72728+}
72729+
72730+int
72731+gr_acl_handle_procpidmem(const struct task_struct *task)
72732+{
72733+ return 0;
72734+}
72735+
72736+int
72737+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
72738+{
72739+ return 0;
72740+}
72741+
72742+int
72743+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
72744+{
72745+ return 0;
72746+}
72747+
72748+int
72749+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
72750+{
72751+ return 0;
72752+}
72753+
72754+int
72755+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
72756+{
72757+ return 0;
72758+}
72759+
72760+int gr_acl_enable_at_secure(void)
72761+{
72762+ return 0;
72763+}
72764+
72765+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
72766+{
72767+ return dentry->d_sb->s_dev;
72768+}
72769+
72770+void gr_put_exec_file(struct task_struct *task)
72771+{
72772+ return;
72773+}
72774+
72775+#ifdef CONFIG_SECURITY
72776+EXPORT_SYMBOL_GPL(gr_check_user_change);
72777+EXPORT_SYMBOL_GPL(gr_check_group_change);
72778+#endif
72779diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
72780new file mode 100644
72781index 0000000..f35f454
72782--- /dev/null
72783+++ b/grsecurity/grsec_exec.c
72784@@ -0,0 +1,187 @@
72785+#include <linux/kernel.h>
72786+#include <linux/sched.h>
72787+#include <linux/file.h>
72788+#include <linux/binfmts.h>
72789+#include <linux/fs.h>
72790+#include <linux/types.h>
72791+#include <linux/grdefs.h>
72792+#include <linux/grsecurity.h>
72793+#include <linux/grinternal.h>
72794+#include <linux/capability.h>
72795+#include <linux/module.h>
72796+#include <linux/compat.h>
72797+
72798+#include <asm/uaccess.h>
72799+
72800+#ifdef CONFIG_GRKERNSEC_EXECLOG
72801+static char gr_exec_arg_buf[132];
72802+static DEFINE_MUTEX(gr_exec_arg_mutex);
72803+#endif
72804+
72805+struct user_arg_ptr {
72806+#ifdef CONFIG_COMPAT
72807+ bool is_compat;
72808+#endif
72809+ union {
72810+ const char __user *const __user *native;
72811+#ifdef CONFIG_COMPAT
72812+ const compat_uptr_t __user *compat;
72813+#endif
72814+ } ptr;
72815+};
72816+
72817+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
72818+
72819+void
72820+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
72821+{
72822+#ifdef CONFIG_GRKERNSEC_EXECLOG
72823+ char *grarg = gr_exec_arg_buf;
72824+ unsigned int i, x, execlen = 0;
72825+ char c;
72826+
72827+ if (!((grsec_enable_execlog && grsec_enable_group &&
72828+ in_group_p(grsec_audit_gid))
72829+ || (grsec_enable_execlog && !grsec_enable_group)))
72830+ return;
72831+
72832+ mutex_lock(&gr_exec_arg_mutex);
72833+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
72834+
72835+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
72836+ const char __user *p;
72837+ unsigned int len;
72838+
72839+ p = get_user_arg_ptr(argv, i);
72840+ if (IS_ERR(p))
72841+ goto log;
72842+
72843+ len = strnlen_user(p, 128 - execlen);
72844+ if (len > 128 - execlen)
72845+ len = 128 - execlen;
72846+ else if (len > 0)
72847+ len--;
72848+ if (copy_from_user(grarg + execlen, p, len))
72849+ goto log;
72850+
72851+ /* rewrite unprintable characters */
72852+ for (x = 0; x < len; x++) {
72853+ c = *(grarg + execlen + x);
72854+ if (c < 32 || c > 126)
72855+ *(grarg + execlen + x) = ' ';
72856+ }
72857+
72858+ execlen += len;
72859+ *(grarg + execlen) = ' ';
72860+ *(grarg + execlen + 1) = '\0';
72861+ execlen++;
72862+ }
72863+
72864+ log:
72865+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
72866+ bprm->file->f_path.mnt, grarg);
72867+ mutex_unlock(&gr_exec_arg_mutex);
72868+#endif
72869+ return;
72870+}
72871+
72872+#ifdef CONFIG_GRKERNSEC
72873+extern int gr_acl_is_capable(const int cap);
72874+extern int gr_acl_is_capable_nolog(const int cap);
72875+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
72876+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
72877+extern int gr_chroot_is_capable(const int cap);
72878+extern int gr_chroot_is_capable_nolog(const int cap);
72879+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
72880+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
72881+#endif
72882+
72883+const char *captab_log[] = {
72884+ "CAP_CHOWN",
72885+ "CAP_DAC_OVERRIDE",
72886+ "CAP_DAC_READ_SEARCH",
72887+ "CAP_FOWNER",
72888+ "CAP_FSETID",
72889+ "CAP_KILL",
72890+ "CAP_SETGID",
72891+ "CAP_SETUID",
72892+ "CAP_SETPCAP",
72893+ "CAP_LINUX_IMMUTABLE",
72894+ "CAP_NET_BIND_SERVICE",
72895+ "CAP_NET_BROADCAST",
72896+ "CAP_NET_ADMIN",
72897+ "CAP_NET_RAW",
72898+ "CAP_IPC_LOCK",
72899+ "CAP_IPC_OWNER",
72900+ "CAP_SYS_MODULE",
72901+ "CAP_SYS_RAWIO",
72902+ "CAP_SYS_CHROOT",
72903+ "CAP_SYS_PTRACE",
72904+ "CAP_SYS_PACCT",
72905+ "CAP_SYS_ADMIN",
72906+ "CAP_SYS_BOOT",
72907+ "CAP_SYS_NICE",
72908+ "CAP_SYS_RESOURCE",
72909+ "CAP_SYS_TIME",
72910+ "CAP_SYS_TTY_CONFIG",
72911+ "CAP_MKNOD",
72912+ "CAP_LEASE",
72913+ "CAP_AUDIT_WRITE",
72914+ "CAP_AUDIT_CONTROL",
72915+ "CAP_SETFCAP",
72916+ "CAP_MAC_OVERRIDE",
72917+ "CAP_MAC_ADMIN",
72918+ "CAP_SYSLOG",
72919+ "CAP_WAKE_ALARM"
72920+};
72921+
72922+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
72923+
72924+int gr_is_capable(const int cap)
72925+{
72926+#ifdef CONFIG_GRKERNSEC
72927+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
72928+ return 1;
72929+ return 0;
72930+#else
72931+ return 1;
72932+#endif
72933+}
72934+
72935+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72936+{
72937+#ifdef CONFIG_GRKERNSEC
72938+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
72939+ return 1;
72940+ return 0;
72941+#else
72942+ return 1;
72943+#endif
72944+}
72945+
72946+int gr_is_capable_nolog(const int cap)
72947+{
72948+#ifdef CONFIG_GRKERNSEC
72949+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
72950+ return 1;
72951+ return 0;
72952+#else
72953+ return 1;
72954+#endif
72955+}
72956+
72957+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
72958+{
72959+#ifdef CONFIG_GRKERNSEC
72960+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
72961+ return 1;
72962+ return 0;
72963+#else
72964+ return 1;
72965+#endif
72966+}
72967+
72968+EXPORT_SYMBOL_GPL(gr_is_capable);
72969+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
72970+EXPORT_SYMBOL_GPL(gr_task_is_capable);
72971+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
72972diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
72973new file mode 100644
72974index 0000000..06cc6ea
72975--- /dev/null
72976+++ b/grsecurity/grsec_fifo.c
72977@@ -0,0 +1,24 @@
72978+#include <linux/kernel.h>
72979+#include <linux/sched.h>
72980+#include <linux/fs.h>
72981+#include <linux/file.h>
72982+#include <linux/grinternal.h>
72983+
72984+int
72985+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
72986+ const struct dentry *dir, const int flag, const int acc_mode)
72987+{
72988+#ifdef CONFIG_GRKERNSEC_FIFO
72989+ const struct cred *cred = current_cred();
72990+
72991+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
72992+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
72993+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
72994+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
72995+ if (!inode_permission(dentry->d_inode, acc_mode))
72996+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
72997+ return -EACCES;
72998+ }
72999+#endif
73000+ return 0;
73001+}
73002diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
73003new file mode 100644
73004index 0000000..8ca18bf
73005--- /dev/null
73006+++ b/grsecurity/grsec_fork.c
73007@@ -0,0 +1,23 @@
73008+#include <linux/kernel.h>
73009+#include <linux/sched.h>
73010+#include <linux/grsecurity.h>
73011+#include <linux/grinternal.h>
73012+#include <linux/errno.h>
73013+
73014+void
73015+gr_log_forkfail(const int retval)
73016+{
73017+#ifdef CONFIG_GRKERNSEC_FORKFAIL
73018+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
73019+ switch (retval) {
73020+ case -EAGAIN:
73021+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
73022+ break;
73023+ case -ENOMEM:
73024+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
73025+ break;
73026+ }
73027+ }
73028+#endif
73029+ return;
73030+}
73031diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
73032new file mode 100644
73033index 0000000..ae6c028
73034--- /dev/null
73035+++ b/grsecurity/grsec_init.c
73036@@ -0,0 +1,272 @@
73037+#include <linux/kernel.h>
73038+#include <linux/sched.h>
73039+#include <linux/mm.h>
73040+#include <linux/gracl.h>
73041+#include <linux/slab.h>
73042+#include <linux/vmalloc.h>
73043+#include <linux/percpu.h>
73044+#include <linux/module.h>
73045+
73046+int grsec_enable_ptrace_readexec;
73047+int grsec_enable_setxid;
73048+int grsec_enable_symlinkown;
73049+kgid_t grsec_symlinkown_gid;
73050+int grsec_enable_brute;
73051+int grsec_enable_link;
73052+int grsec_enable_dmesg;
73053+int grsec_enable_harden_ptrace;
73054+int grsec_enable_harden_ipc;
73055+int grsec_enable_fifo;
73056+int grsec_enable_execlog;
73057+int grsec_enable_signal;
73058+int grsec_enable_forkfail;
73059+int grsec_enable_audit_ptrace;
73060+int grsec_enable_time;
73061+int grsec_enable_group;
73062+kgid_t grsec_audit_gid;
73063+int grsec_enable_chdir;
73064+int grsec_enable_mount;
73065+int grsec_enable_rofs;
73066+int grsec_deny_new_usb;
73067+int grsec_enable_chroot_findtask;
73068+int grsec_enable_chroot_mount;
73069+int grsec_enable_chroot_shmat;
73070+int grsec_enable_chroot_fchdir;
73071+int grsec_enable_chroot_double;
73072+int grsec_enable_chroot_pivot;
73073+int grsec_enable_chroot_chdir;
73074+int grsec_enable_chroot_chmod;
73075+int grsec_enable_chroot_mknod;
73076+int grsec_enable_chroot_nice;
73077+int grsec_enable_chroot_execlog;
73078+int grsec_enable_chroot_caps;
73079+int grsec_enable_chroot_sysctl;
73080+int grsec_enable_chroot_unix;
73081+int grsec_enable_tpe;
73082+kgid_t grsec_tpe_gid;
73083+int grsec_enable_blackhole;
73084+#ifdef CONFIG_IPV6_MODULE
73085+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
73086+#endif
73087+int grsec_lastack_retries;
73088+int grsec_enable_tpe_all;
73089+int grsec_enable_tpe_invert;
73090+int grsec_enable_socket_all;
73091+kgid_t grsec_socket_all_gid;
73092+int grsec_enable_socket_client;
73093+kgid_t grsec_socket_client_gid;
73094+int grsec_enable_socket_server;
73095+kgid_t grsec_socket_server_gid;
73096+int grsec_resource_logging;
73097+int grsec_disable_privio;
73098+int grsec_enable_log_rwxmaps;
73099+int grsec_lock;
73100+
73101+DEFINE_SPINLOCK(grsec_alert_lock);
73102+unsigned long grsec_alert_wtime = 0;
73103+unsigned long grsec_alert_fyet = 0;
73104+
73105+DEFINE_SPINLOCK(grsec_audit_lock);
73106+
73107+DEFINE_RWLOCK(grsec_exec_file_lock);
73108+
73109+char *gr_shared_page[4];
73110+
73111+char *gr_alert_log_fmt;
73112+char *gr_audit_log_fmt;
73113+char *gr_alert_log_buf;
73114+char *gr_audit_log_buf;
73115+
73116+void __init
73117+grsecurity_init(void)
73118+{
73119+ int j;
73120+ /* create the per-cpu shared pages */
73121+
73122+#ifdef CONFIG_X86
73123+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
73124+#endif
73125+
73126+ for (j = 0; j < 4; j++) {
73127+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
73128+ if (gr_shared_page[j] == NULL) {
73129+ panic("Unable to allocate grsecurity shared page");
73130+ return;
73131+ }
73132+ }
73133+
73134+ /* allocate log buffers */
73135+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
73136+ if (!gr_alert_log_fmt) {
73137+ panic("Unable to allocate grsecurity alert log format buffer");
73138+ return;
73139+ }
73140+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
73141+ if (!gr_audit_log_fmt) {
73142+ panic("Unable to allocate grsecurity audit log format buffer");
73143+ return;
73144+ }
73145+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
73146+ if (!gr_alert_log_buf) {
73147+ panic("Unable to allocate grsecurity alert log buffer");
73148+ return;
73149+ }
73150+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
73151+ if (!gr_audit_log_buf) {
73152+ panic("Unable to allocate grsecurity audit log buffer");
73153+ return;
73154+ }
73155+
73156+#ifdef CONFIG_GRKERNSEC_IO
73157+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
73158+ grsec_disable_privio = 1;
73159+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
73160+ grsec_disable_privio = 1;
73161+#else
73162+ grsec_disable_privio = 0;
73163+#endif
73164+#endif
73165+
73166+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
73167+ /* for backward compatibility, tpe_invert always defaults to on if
73168+ enabled in the kernel
73169+ */
73170+ grsec_enable_tpe_invert = 1;
73171+#endif
73172+
73173+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
73174+#ifndef CONFIG_GRKERNSEC_SYSCTL
73175+ grsec_lock = 1;
73176+#endif
73177+
73178+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
73179+ grsec_enable_log_rwxmaps = 1;
73180+#endif
73181+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
73182+ grsec_enable_group = 1;
73183+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
73184+#endif
73185+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
73186+ grsec_enable_ptrace_readexec = 1;
73187+#endif
73188+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
73189+ grsec_enable_chdir = 1;
73190+#endif
73191+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73192+ grsec_enable_harden_ptrace = 1;
73193+#endif
73194+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
73195+ grsec_enable_harden_ipc = 1;
73196+#endif
73197+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
73198+ grsec_enable_mount = 1;
73199+#endif
73200+#ifdef CONFIG_GRKERNSEC_LINK
73201+ grsec_enable_link = 1;
73202+#endif
73203+#ifdef CONFIG_GRKERNSEC_BRUTE
73204+ grsec_enable_brute = 1;
73205+#endif
73206+#ifdef CONFIG_GRKERNSEC_DMESG
73207+ grsec_enable_dmesg = 1;
73208+#endif
73209+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73210+ grsec_enable_blackhole = 1;
73211+ grsec_lastack_retries = 4;
73212+#endif
73213+#ifdef CONFIG_GRKERNSEC_FIFO
73214+ grsec_enable_fifo = 1;
73215+#endif
73216+#ifdef CONFIG_GRKERNSEC_EXECLOG
73217+ grsec_enable_execlog = 1;
73218+#endif
73219+#ifdef CONFIG_GRKERNSEC_SETXID
73220+ grsec_enable_setxid = 1;
73221+#endif
73222+#ifdef CONFIG_GRKERNSEC_SIGNAL
73223+ grsec_enable_signal = 1;
73224+#endif
73225+#ifdef CONFIG_GRKERNSEC_FORKFAIL
73226+ grsec_enable_forkfail = 1;
73227+#endif
73228+#ifdef CONFIG_GRKERNSEC_TIME
73229+ grsec_enable_time = 1;
73230+#endif
73231+#ifdef CONFIG_GRKERNSEC_RESLOG
73232+ grsec_resource_logging = 1;
73233+#endif
73234+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
73235+ grsec_enable_chroot_findtask = 1;
73236+#endif
73237+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
73238+ grsec_enable_chroot_unix = 1;
73239+#endif
73240+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
73241+ grsec_enable_chroot_mount = 1;
73242+#endif
73243+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
73244+ grsec_enable_chroot_fchdir = 1;
73245+#endif
73246+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
73247+ grsec_enable_chroot_shmat = 1;
73248+#endif
73249+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
73250+ grsec_enable_audit_ptrace = 1;
73251+#endif
73252+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
73253+ grsec_enable_chroot_double = 1;
73254+#endif
73255+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
73256+ grsec_enable_chroot_pivot = 1;
73257+#endif
73258+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
73259+ grsec_enable_chroot_chdir = 1;
73260+#endif
73261+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
73262+ grsec_enable_chroot_chmod = 1;
73263+#endif
73264+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
73265+ grsec_enable_chroot_mknod = 1;
73266+#endif
73267+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
73268+ grsec_enable_chroot_nice = 1;
73269+#endif
73270+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
73271+ grsec_enable_chroot_execlog = 1;
73272+#endif
73273+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
73274+ grsec_enable_chroot_caps = 1;
73275+#endif
73276+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
73277+ grsec_enable_chroot_sysctl = 1;
73278+#endif
73279+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
73280+ grsec_enable_symlinkown = 1;
73281+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
73282+#endif
73283+#ifdef CONFIG_GRKERNSEC_TPE
73284+ grsec_enable_tpe = 1;
73285+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
73286+#ifdef CONFIG_GRKERNSEC_TPE_ALL
73287+ grsec_enable_tpe_all = 1;
73288+#endif
73289+#endif
73290+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
73291+ grsec_enable_socket_all = 1;
73292+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
73293+#endif
73294+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
73295+ grsec_enable_socket_client = 1;
73296+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
73297+#endif
73298+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
73299+ grsec_enable_socket_server = 1;
73300+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
73301+#endif
73302+#endif
73303+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
73304+ grsec_deny_new_usb = 1;
73305+#endif
73306+
73307+ return;
73308+}
73309diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
73310new file mode 100644
73311index 0000000..78d1680
73312--- /dev/null
73313+++ b/grsecurity/grsec_ipc.c
73314@@ -0,0 +1,48 @@
73315+#include <linux/kernel.h>
73316+#include <linux/mm.h>
73317+#include <linux/sched.h>
73318+#include <linux/file.h>
73319+#include <linux/ipc.h>
73320+#include <linux/ipc_namespace.h>
73321+#include <linux/grsecurity.h>
73322+#include <linux/grinternal.h>
73323+
73324+int
73325+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
73326+{
73327+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
73328+ int write;
73329+ int orig_granted_mode;
73330+ kuid_t euid;
73331+ kgid_t egid;
73332+
73333+ if (!grsec_enable_harden_ipc)
73334+ return 0;
73335+
73336+ euid = current_euid();
73337+ egid = current_egid();
73338+
73339+ write = requested_mode & 00002;
73340+ orig_granted_mode = ipcp->mode;
73341+
73342+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
73343+ orig_granted_mode >>= 6;
73344+ else {
73345+ /* if likely wrong permissions, lock to user */
73346+ if (orig_granted_mode & 0007)
73347+ orig_granted_mode = 0;
73348+ /* otherwise do a egid-only check */
73349+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
73350+ orig_granted_mode >>= 3;
73351+ /* otherwise, no access */
73352+ else
73353+ orig_granted_mode = 0;
73354+ }
73355+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
73356+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
73357+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
73358+ return 0;
73359+ }
73360+#endif
73361+ return 1;
73362+}
73363diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
73364new file mode 100644
73365index 0000000..5e05e20
73366--- /dev/null
73367+++ b/grsecurity/grsec_link.c
73368@@ -0,0 +1,58 @@
73369+#include <linux/kernel.h>
73370+#include <linux/sched.h>
73371+#include <linux/fs.h>
73372+#include <linux/file.h>
73373+#include <linux/grinternal.h>
73374+
73375+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
73376+{
73377+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
73378+ const struct inode *link_inode = link->dentry->d_inode;
73379+
73380+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
73381+ /* ignore root-owned links, e.g. /proc/self */
73382+ gr_is_global_nonroot(link_inode->i_uid) && target &&
73383+ !uid_eq(link_inode->i_uid, target->i_uid)) {
73384+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
73385+ return 1;
73386+ }
73387+#endif
73388+ return 0;
73389+}
73390+
73391+int
73392+gr_handle_follow_link(const struct inode *parent,
73393+ const struct inode *inode,
73394+ const struct dentry *dentry, const struct vfsmount *mnt)
73395+{
73396+#ifdef CONFIG_GRKERNSEC_LINK
73397+ const struct cred *cred = current_cred();
73398+
73399+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
73400+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
73401+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
73402+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
73403+ return -EACCES;
73404+ }
73405+#endif
73406+ return 0;
73407+}
73408+
73409+int
73410+gr_handle_hardlink(const struct dentry *dentry,
73411+ const struct vfsmount *mnt,
73412+ struct inode *inode, const int mode, const struct filename *to)
73413+{
73414+#ifdef CONFIG_GRKERNSEC_LINK
73415+ const struct cred *cred = current_cred();
73416+
73417+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
73418+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
73419+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
73420+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
73421+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
73422+ return -EPERM;
73423+ }
73424+#endif
73425+ return 0;
73426+}
73427diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
73428new file mode 100644
73429index 0000000..dbe0a6b
73430--- /dev/null
73431+++ b/grsecurity/grsec_log.c
73432@@ -0,0 +1,341 @@
73433+#include <linux/kernel.h>
73434+#include <linux/sched.h>
73435+#include <linux/file.h>
73436+#include <linux/tty.h>
73437+#include <linux/fs.h>
73438+#include <linux/mm.h>
73439+#include <linux/grinternal.h>
73440+
73441+#ifdef CONFIG_TREE_PREEMPT_RCU
73442+#define DISABLE_PREEMPT() preempt_disable()
73443+#define ENABLE_PREEMPT() preempt_enable()
73444+#else
73445+#define DISABLE_PREEMPT()
73446+#define ENABLE_PREEMPT()
73447+#endif
73448+
73449+#define BEGIN_LOCKS(x) \
73450+ DISABLE_PREEMPT(); \
73451+ rcu_read_lock(); \
73452+ read_lock(&tasklist_lock); \
73453+ read_lock(&grsec_exec_file_lock); \
73454+ if (x != GR_DO_AUDIT) \
73455+ spin_lock(&grsec_alert_lock); \
73456+ else \
73457+ spin_lock(&grsec_audit_lock)
73458+
73459+#define END_LOCKS(x) \
73460+ if (x != GR_DO_AUDIT) \
73461+ spin_unlock(&grsec_alert_lock); \
73462+ else \
73463+ spin_unlock(&grsec_audit_lock); \
73464+ read_unlock(&grsec_exec_file_lock); \
73465+ read_unlock(&tasklist_lock); \
73466+ rcu_read_unlock(); \
73467+ ENABLE_PREEMPT(); \
73468+ if (x == GR_DONT_AUDIT) \
73469+ gr_handle_alertkill(current)
73470+
73471+enum {
73472+ FLOODING,
73473+ NO_FLOODING
73474+};
73475+
73476+extern char *gr_alert_log_fmt;
73477+extern char *gr_audit_log_fmt;
73478+extern char *gr_alert_log_buf;
73479+extern char *gr_audit_log_buf;
73480+
73481+static int gr_log_start(int audit)
73482+{
73483+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
73484+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
73485+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
73486+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
73487+ unsigned long curr_secs = get_seconds();
73488+
73489+ if (audit == GR_DO_AUDIT)
73490+ goto set_fmt;
73491+
73492+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
73493+ grsec_alert_wtime = curr_secs;
73494+ grsec_alert_fyet = 0;
73495+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
73496+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
73497+ grsec_alert_fyet++;
73498+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
73499+ grsec_alert_wtime = curr_secs;
73500+ grsec_alert_fyet++;
73501+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
73502+ return FLOODING;
73503+ }
73504+ else return FLOODING;
73505+
73506+set_fmt:
73507+#endif
73508+ memset(buf, 0, PAGE_SIZE);
73509+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
73510+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
73511+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
73512+ } else if (current->signal->curr_ip) {
73513+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
73514+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
73515+ } else if (gr_acl_is_enabled()) {
73516+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
73517+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
73518+ } else {
73519+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
73520+ strcpy(buf, fmt);
73521+ }
73522+
73523+ return NO_FLOODING;
73524+}
73525+
73526+static void gr_log_middle(int audit, const char *msg, va_list ap)
73527+ __attribute__ ((format (printf, 2, 0)));
73528+
73529+static void gr_log_middle(int audit, const char *msg, va_list ap)
73530+{
73531+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
73532+ unsigned int len = strlen(buf);
73533+
73534+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
73535+
73536+ return;
73537+}
73538+
73539+static void gr_log_middle_varargs(int audit, const char *msg, ...)
73540+ __attribute__ ((format (printf, 2, 3)));
73541+
73542+static void gr_log_middle_varargs(int audit, const char *msg, ...)
73543+{
73544+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
73545+ unsigned int len = strlen(buf);
73546+ va_list ap;
73547+
73548+ va_start(ap, msg);
73549+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
73550+ va_end(ap);
73551+
73552+ return;
73553+}
73554+
73555+static void gr_log_end(int audit, int append_default)
73556+{
73557+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
73558+ if (append_default) {
73559+ struct task_struct *task = current;
73560+ struct task_struct *parent = task->real_parent;
73561+ const struct cred *cred = __task_cred(task);
73562+ const struct cred *pcred = __task_cred(parent);
73563+ unsigned int len = strlen(buf);
73564+
73565+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
73566+ }
73567+
73568+ printk("%s\n", buf);
73569+
73570+ return;
73571+}
73572+
73573+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
73574+{
73575+ int logtype;
73576+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
73577+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
73578+ void *voidptr = NULL;
73579+ int num1 = 0, num2 = 0;
73580+ unsigned long ulong1 = 0, ulong2 = 0;
73581+ struct dentry *dentry = NULL;
73582+ struct vfsmount *mnt = NULL;
73583+ struct file *file = NULL;
73584+ struct task_struct *task = NULL;
73585+ struct vm_area_struct *vma = NULL;
73586+ const struct cred *cred, *pcred;
73587+ va_list ap;
73588+
73589+ BEGIN_LOCKS(audit);
73590+ logtype = gr_log_start(audit);
73591+ if (logtype == FLOODING) {
73592+ END_LOCKS(audit);
73593+ return;
73594+ }
73595+ va_start(ap, argtypes);
73596+ switch (argtypes) {
73597+ case GR_TTYSNIFF:
73598+ task = va_arg(ap, struct task_struct *);
73599+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
73600+ break;
73601+ case GR_SYSCTL_HIDDEN:
73602+ str1 = va_arg(ap, char *);
73603+ gr_log_middle_varargs(audit, msg, result, str1);
73604+ break;
73605+ case GR_RBAC:
73606+ dentry = va_arg(ap, struct dentry *);
73607+ mnt = va_arg(ap, struct vfsmount *);
73608+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
73609+ break;
73610+ case GR_RBAC_STR:
73611+ dentry = va_arg(ap, struct dentry *);
73612+ mnt = va_arg(ap, struct vfsmount *);
73613+ str1 = va_arg(ap, char *);
73614+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
73615+ break;
73616+ case GR_STR_RBAC:
73617+ str1 = va_arg(ap, char *);
73618+ dentry = va_arg(ap, struct dentry *);
73619+ mnt = va_arg(ap, struct vfsmount *);
73620+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
73621+ break;
73622+ case GR_RBAC_MODE2:
73623+ dentry = va_arg(ap, struct dentry *);
73624+ mnt = va_arg(ap, struct vfsmount *);
73625+ str1 = va_arg(ap, char *);
73626+ str2 = va_arg(ap, char *);
73627+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
73628+ break;
73629+ case GR_RBAC_MODE3:
73630+ dentry = va_arg(ap, struct dentry *);
73631+ mnt = va_arg(ap, struct vfsmount *);
73632+ str1 = va_arg(ap, char *);
73633+ str2 = va_arg(ap, char *);
73634+ str3 = va_arg(ap, char *);
73635+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
73636+ break;
73637+ case GR_FILENAME:
73638+ dentry = va_arg(ap, struct dentry *);
73639+ mnt = va_arg(ap, struct vfsmount *);
73640+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
73641+ break;
73642+ case GR_STR_FILENAME:
73643+ str1 = va_arg(ap, char *);
73644+ dentry = va_arg(ap, struct dentry *);
73645+ mnt = va_arg(ap, struct vfsmount *);
73646+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
73647+ break;
73648+ case GR_FILENAME_STR:
73649+ dentry = va_arg(ap, struct dentry *);
73650+ mnt = va_arg(ap, struct vfsmount *);
73651+ str1 = va_arg(ap, char *);
73652+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
73653+ break;
73654+ case GR_FILENAME_TWO_INT:
73655+ dentry = va_arg(ap, struct dentry *);
73656+ mnt = va_arg(ap, struct vfsmount *);
73657+ num1 = va_arg(ap, int);
73658+ num2 = va_arg(ap, int);
73659+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
73660+ break;
73661+ case GR_FILENAME_TWO_INT_STR:
73662+ dentry = va_arg(ap, struct dentry *);
73663+ mnt = va_arg(ap, struct vfsmount *);
73664+ num1 = va_arg(ap, int);
73665+ num2 = va_arg(ap, int);
73666+ str1 = va_arg(ap, char *);
73667+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
73668+ break;
73669+ case GR_TEXTREL:
73670+ file = va_arg(ap, struct file *);
73671+ ulong1 = va_arg(ap, unsigned long);
73672+ ulong2 = va_arg(ap, unsigned long);
73673+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
73674+ break;
73675+ case GR_PTRACE:
73676+ task = va_arg(ap, struct task_struct *);
73677+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
73678+ break;
73679+ case GR_RESOURCE:
73680+ task = va_arg(ap, struct task_struct *);
73681+ cred = __task_cred(task);
73682+ pcred = __task_cred(task->real_parent);
73683+ ulong1 = va_arg(ap, unsigned long);
73684+ str1 = va_arg(ap, char *);
73685+ ulong2 = va_arg(ap, unsigned long);
73686+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
73687+ break;
73688+ case GR_CAP:
73689+ task = va_arg(ap, struct task_struct *);
73690+ cred = __task_cred(task);
73691+ pcred = __task_cred(task->real_parent);
73692+ str1 = va_arg(ap, char *);
73693+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
73694+ break;
73695+ case GR_SIG:
73696+ str1 = va_arg(ap, char *);
73697+ voidptr = va_arg(ap, void *);
73698+ gr_log_middle_varargs(audit, msg, str1, voidptr);
73699+ break;
73700+ case GR_SIG2:
73701+ task = va_arg(ap, struct task_struct *);
73702+ cred = __task_cred(task);
73703+ pcred = __task_cred(task->real_parent);
73704+ num1 = va_arg(ap, int);
73705+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
73706+ break;
73707+ case GR_CRASH1:
73708+ task = va_arg(ap, struct task_struct *);
73709+ cred = __task_cred(task);
73710+ pcred = __task_cred(task->real_parent);
73711+ ulong1 = va_arg(ap, unsigned long);
73712+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
73713+ break;
73714+ case GR_CRASH2:
73715+ task = va_arg(ap, struct task_struct *);
73716+ cred = __task_cred(task);
73717+ pcred = __task_cred(task->real_parent);
73718+ ulong1 = va_arg(ap, unsigned long);
73719+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
73720+ break;
73721+ case GR_RWXMAP:
73722+ file = va_arg(ap, struct file *);
73723+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
73724+ break;
73725+ case GR_RWXMAPVMA:
73726+ vma = va_arg(ap, struct vm_area_struct *);
73727+ if (vma->vm_file)
73728+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
73729+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
73730+ str1 = "<stack>";
73731+ else if (vma->vm_start <= current->mm->brk &&
73732+ vma->vm_end >= current->mm->start_brk)
73733+ str1 = "<heap>";
73734+ else
73735+ str1 = "<anonymous mapping>";
73736+ gr_log_middle_varargs(audit, msg, str1);
73737+ break;
73738+ case GR_PSACCT:
73739+ {
73740+ unsigned int wday, cday;
73741+ __u8 whr, chr;
73742+ __u8 wmin, cmin;
73743+ __u8 wsec, csec;
73744+ char cur_tty[64] = { 0 };
73745+ char parent_tty[64] = { 0 };
73746+
73747+ task = va_arg(ap, struct task_struct *);
73748+ wday = va_arg(ap, unsigned int);
73749+ cday = va_arg(ap, unsigned int);
73750+ whr = va_arg(ap, int);
73751+ chr = va_arg(ap, int);
73752+ wmin = va_arg(ap, int);
73753+ cmin = va_arg(ap, int);
73754+ wsec = va_arg(ap, int);
73755+ csec = va_arg(ap, int);
73756+ ulong1 = va_arg(ap, unsigned long);
73757+ cred = __task_cred(task);
73758+ pcred = __task_cred(task->real_parent);
73759+
73760+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
73761+ }
73762+ break;
73763+ default:
73764+ gr_log_middle(audit, msg, ap);
73765+ }
73766+ va_end(ap);
73767+ // these don't need DEFAULTSECARGS printed on the end
73768+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
73769+ gr_log_end(audit, 0);
73770+ else
73771+ gr_log_end(audit, 1);
73772+ END_LOCKS(audit);
73773+}
73774diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
73775new file mode 100644
73776index 0000000..0e39d8c
73777--- /dev/null
73778+++ b/grsecurity/grsec_mem.c
73779@@ -0,0 +1,48 @@
73780+#include <linux/kernel.h>
73781+#include <linux/sched.h>
73782+#include <linux/mm.h>
73783+#include <linux/mman.h>
73784+#include <linux/module.h>
73785+#include <linux/grinternal.h>
73786+
73787+void gr_handle_msr_write(void)
73788+{
73789+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
73790+ return;
73791+}
73792+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
73793+
73794+void
73795+gr_handle_ioperm(void)
73796+{
73797+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
73798+ return;
73799+}
73800+
73801+void
73802+gr_handle_iopl(void)
73803+{
73804+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
73805+ return;
73806+}
73807+
73808+void
73809+gr_handle_mem_readwrite(u64 from, u64 to)
73810+{
73811+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
73812+ return;
73813+}
73814+
73815+void
73816+gr_handle_vm86(void)
73817+{
73818+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
73819+ return;
73820+}
73821+
73822+void
73823+gr_log_badprocpid(const char *entry)
73824+{
73825+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
73826+ return;
73827+}
73828diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
73829new file mode 100644
73830index 0000000..cd9e124
73831--- /dev/null
73832+++ b/grsecurity/grsec_mount.c
73833@@ -0,0 +1,65 @@
73834+#include <linux/kernel.h>
73835+#include <linux/sched.h>
73836+#include <linux/mount.h>
73837+#include <linux/major.h>
73838+#include <linux/grsecurity.h>
73839+#include <linux/grinternal.h>
73840+
73841+void
73842+gr_log_remount(const char *devname, const int retval)
73843+{
73844+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
73845+ if (grsec_enable_mount && (retval >= 0))
73846+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
73847+#endif
73848+ return;
73849+}
73850+
73851+void
73852+gr_log_unmount(const char *devname, const int retval)
73853+{
73854+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
73855+ if (grsec_enable_mount && (retval >= 0))
73856+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
73857+#endif
73858+ return;
73859+}
73860+
73861+void
73862+gr_log_mount(const char *from, const char *to, const int retval)
73863+{
73864+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
73865+ if (grsec_enable_mount && (retval >= 0))
73866+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
73867+#endif
73868+ return;
73869+}
73870+
73871+int
73872+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
73873+{
73874+#ifdef CONFIG_GRKERNSEC_ROFS
73875+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
73876+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
73877+ return -EPERM;
73878+ } else
73879+ return 0;
73880+#endif
73881+ return 0;
73882+}
73883+
73884+int
73885+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
73886+{
73887+#ifdef CONFIG_GRKERNSEC_ROFS
73888+ struct inode *inode = dentry->d_inode;
73889+
73890+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
73891+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
73892+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
73893+ return -EPERM;
73894+ } else
73895+ return 0;
73896+#endif
73897+ return 0;
73898+}
73899diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
73900new file mode 100644
73901index 0000000..6ee9d50
73902--- /dev/null
73903+++ b/grsecurity/grsec_pax.c
73904@@ -0,0 +1,45 @@
73905+#include <linux/kernel.h>
73906+#include <linux/sched.h>
73907+#include <linux/mm.h>
73908+#include <linux/file.h>
73909+#include <linux/grinternal.h>
73910+#include <linux/grsecurity.h>
73911+
73912+void
73913+gr_log_textrel(struct vm_area_struct * vma)
73914+{
73915+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
73916+ if (grsec_enable_log_rwxmaps)
73917+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
73918+#endif
73919+ return;
73920+}
73921+
73922+void gr_log_ptgnustack(struct file *file)
73923+{
73924+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
73925+ if (grsec_enable_log_rwxmaps)
73926+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
73927+#endif
73928+ return;
73929+}
73930+
73931+void
73932+gr_log_rwxmmap(struct file *file)
73933+{
73934+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
73935+ if (grsec_enable_log_rwxmaps)
73936+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
73937+#endif
73938+ return;
73939+}
73940+
73941+void
73942+gr_log_rwxmprotect(struct vm_area_struct *vma)
73943+{
73944+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
73945+ if (grsec_enable_log_rwxmaps)
73946+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
73947+#endif
73948+ return;
73949+}
73950diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
73951new file mode 100644
73952index 0000000..f7f29aa
73953--- /dev/null
73954+++ b/grsecurity/grsec_ptrace.c
73955@@ -0,0 +1,30 @@
73956+#include <linux/kernel.h>
73957+#include <linux/sched.h>
73958+#include <linux/grinternal.h>
73959+#include <linux/security.h>
73960+
73961+void
73962+gr_audit_ptrace(struct task_struct *task)
73963+{
73964+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
73965+ if (grsec_enable_audit_ptrace)
73966+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
73967+#endif
73968+ return;
73969+}
73970+
73971+int
73972+gr_ptrace_readexec(struct file *file, int unsafe_flags)
73973+{
73974+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
73975+ const struct dentry *dentry = file->f_path.dentry;
73976+ const struct vfsmount *mnt = file->f_path.mnt;
73977+
73978+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
73979+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
73980+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
73981+ return -EACCES;
73982+ }
73983+#endif
73984+ return 0;
73985+}
73986diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
73987new file mode 100644
73988index 0000000..3860c7e
73989--- /dev/null
73990+++ b/grsecurity/grsec_sig.c
73991@@ -0,0 +1,236 @@
73992+#include <linux/kernel.h>
73993+#include <linux/sched.h>
73994+#include <linux/fs.h>
73995+#include <linux/delay.h>
73996+#include <linux/grsecurity.h>
73997+#include <linux/grinternal.h>
73998+#include <linux/hardirq.h>
73999+
74000+char *signames[] = {
74001+ [SIGSEGV] = "Segmentation fault",
74002+ [SIGILL] = "Illegal instruction",
74003+ [SIGABRT] = "Abort",
74004+ [SIGBUS] = "Invalid alignment/Bus error"
74005+};
74006+
74007+void
74008+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
74009+{
74010+#ifdef CONFIG_GRKERNSEC_SIGNAL
74011+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
74012+ (sig == SIGABRT) || (sig == SIGBUS))) {
74013+ if (task_pid_nr(t) == task_pid_nr(current)) {
74014+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
74015+ } else {
74016+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
74017+ }
74018+ }
74019+#endif
74020+ return;
74021+}
74022+
74023+int
74024+gr_handle_signal(const struct task_struct *p, const int sig)
74025+{
74026+#ifdef CONFIG_GRKERNSEC
74027+ /* ignore the 0 signal for protected task checks */
74028+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
74029+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
74030+ return -EPERM;
74031+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
74032+ return -EPERM;
74033+ }
74034+#endif
74035+ return 0;
74036+}
74037+
74038+#ifdef CONFIG_GRKERNSEC
74039+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
74040+
74041+int gr_fake_force_sig(int sig, struct task_struct *t)
74042+{
74043+ unsigned long int flags;
74044+ int ret, blocked, ignored;
74045+ struct k_sigaction *action;
74046+
74047+ spin_lock_irqsave(&t->sighand->siglock, flags);
74048+ action = &t->sighand->action[sig-1];
74049+ ignored = action->sa.sa_handler == SIG_IGN;
74050+ blocked = sigismember(&t->blocked, sig);
74051+ if (blocked || ignored) {
74052+ action->sa.sa_handler = SIG_DFL;
74053+ if (blocked) {
74054+ sigdelset(&t->blocked, sig);
74055+ recalc_sigpending_and_wake(t);
74056+ }
74057+ }
74058+ if (action->sa.sa_handler == SIG_DFL)
74059+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
74060+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
74061+
74062+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
74063+
74064+ return ret;
74065+}
74066+#endif
74067+
74068+#define GR_USER_BAN_TIME (15 * 60)
74069+#define GR_DAEMON_BRUTE_TIME (30 * 60)
74070+
74071+void gr_handle_brute_attach(int dumpable)
74072+{
74073+#ifdef CONFIG_GRKERNSEC_BRUTE
74074+ struct task_struct *p = current;
74075+ kuid_t uid = GLOBAL_ROOT_UID;
74076+ int daemon = 0;
74077+
74078+ if (!grsec_enable_brute)
74079+ return;
74080+
74081+ rcu_read_lock();
74082+ read_lock(&tasklist_lock);
74083+ read_lock(&grsec_exec_file_lock);
74084+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
74085+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
74086+ p->real_parent->brute = 1;
74087+ daemon = 1;
74088+ } else {
74089+ const struct cred *cred = __task_cred(p), *cred2;
74090+ struct task_struct *tsk, *tsk2;
74091+
74092+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
74093+ struct user_struct *user;
74094+
74095+ uid = cred->uid;
74096+
74097+ /* this is put upon execution past expiration */
74098+ user = find_user(uid);
74099+ if (user == NULL)
74100+ goto unlock;
74101+ user->suid_banned = 1;
74102+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
74103+ if (user->suid_ban_expires == ~0UL)
74104+ user->suid_ban_expires--;
74105+
74106+ /* only kill other threads of the same binary, from the same user */
74107+ do_each_thread(tsk2, tsk) {
74108+ cred2 = __task_cred(tsk);
74109+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
74110+ gr_fake_force_sig(SIGKILL, tsk);
74111+ } while_each_thread(tsk2, tsk);
74112+ }
74113+ }
74114+unlock:
74115+ read_unlock(&grsec_exec_file_lock);
74116+ read_unlock(&tasklist_lock);
74117+ rcu_read_unlock();
74118+
74119+ if (gr_is_global_nonroot(uid))
74120+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
74121+ else if (daemon)
74122+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
74123+
74124+#endif
74125+ return;
74126+}
74127+
74128+void gr_handle_brute_check(void)
74129+{
74130+#ifdef CONFIG_GRKERNSEC_BRUTE
74131+ struct task_struct *p = current;
74132+
74133+ if (unlikely(p->brute)) {
74134+ if (!grsec_enable_brute)
74135+ p->brute = 0;
74136+ else if (time_before(get_seconds(), p->brute_expires))
74137+ msleep(30 * 1000);
74138+ }
74139+#endif
74140+ return;
74141+}
74142+
74143+void gr_handle_kernel_exploit(void)
74144+{
74145+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
74146+ const struct cred *cred;
74147+ struct task_struct *tsk, *tsk2;
74148+ struct user_struct *user;
74149+ kuid_t uid;
74150+
74151+ if (in_irq() || in_serving_softirq() || in_nmi())
74152+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
74153+
74154+ uid = current_uid();
74155+
74156+ if (gr_is_global_root(uid))
74157+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
74158+ else {
74159+ /* kill all the processes of this user, hold a reference
74160+ to their creds struct, and prevent them from creating
74161+ another process until system reset
74162+ */
74163+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
74164+ GR_GLOBAL_UID(uid));
74165+ /* we intentionally leak this ref */
74166+ user = get_uid(current->cred->user);
74167+ if (user)
74168+ user->kernel_banned = 1;
74169+
74170+ /* kill all processes of this user */
74171+ read_lock(&tasklist_lock);
74172+ do_each_thread(tsk2, tsk) {
74173+ cred = __task_cred(tsk);
74174+ if (uid_eq(cred->uid, uid))
74175+ gr_fake_force_sig(SIGKILL, tsk);
74176+ } while_each_thread(tsk2, tsk);
74177+ read_unlock(&tasklist_lock);
74178+ }
74179+#endif
74180+}
74181+
74182+#ifdef CONFIG_GRKERNSEC_BRUTE
74183+static bool suid_ban_expired(struct user_struct *user)
74184+{
74185+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
74186+ user->suid_banned = 0;
74187+ user->suid_ban_expires = 0;
74188+ free_uid(user);
74189+ return true;
74190+ }
74191+
74192+ return false;
74193+}
74194+#endif
74195+
74196+int gr_process_kernel_exec_ban(void)
74197+{
74198+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
74199+ if (unlikely(current->cred->user->kernel_banned))
74200+ return -EPERM;
74201+#endif
74202+ return 0;
74203+}
74204+
74205+int gr_process_kernel_setuid_ban(struct user_struct *user)
74206+{
74207+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
74208+ if (unlikely(user->kernel_banned))
74209+ gr_fake_force_sig(SIGKILL, current);
74210+#endif
74211+ return 0;
74212+}
74213+
74214+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
74215+{
74216+#ifdef CONFIG_GRKERNSEC_BRUTE
74217+ struct user_struct *user = current->cred->user;
74218+ if (unlikely(user->suid_banned)) {
74219+ if (suid_ban_expired(user))
74220+ return 0;
74221+ /* disallow execution of suid binaries only */
74222+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
74223+ return -EPERM;
74224+ }
74225+#endif
74226+ return 0;
74227+}
74228diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
74229new file mode 100644
74230index 0000000..c0aef3a
74231--- /dev/null
74232+++ b/grsecurity/grsec_sock.c
74233@@ -0,0 +1,244 @@
74234+#include <linux/kernel.h>
74235+#include <linux/module.h>
74236+#include <linux/sched.h>
74237+#include <linux/file.h>
74238+#include <linux/net.h>
74239+#include <linux/in.h>
74240+#include <linux/ip.h>
74241+#include <net/sock.h>
74242+#include <net/inet_sock.h>
74243+#include <linux/grsecurity.h>
74244+#include <linux/grinternal.h>
74245+#include <linux/gracl.h>
74246+
74247+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
74248+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
74249+
74250+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
74251+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
74252+
74253+#ifdef CONFIG_UNIX_MODULE
74254+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
74255+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
74256+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
74257+EXPORT_SYMBOL_GPL(gr_handle_create);
74258+#endif
74259+
74260+#ifdef CONFIG_GRKERNSEC
74261+#define gr_conn_table_size 32749
74262+struct conn_table_entry {
74263+ struct conn_table_entry *next;
74264+ struct signal_struct *sig;
74265+};
74266+
74267+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
74268+DEFINE_SPINLOCK(gr_conn_table_lock);
74269+
74270+extern const char * gr_socktype_to_name(unsigned char type);
74271+extern const char * gr_proto_to_name(unsigned char proto);
74272+extern const char * gr_sockfamily_to_name(unsigned char family);
74273+
74274+static __inline__ int
74275+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
74276+{
74277+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
74278+}
74279+
74280+static __inline__ int
74281+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
74282+ __u16 sport, __u16 dport)
74283+{
74284+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
74285+ sig->gr_sport == sport && sig->gr_dport == dport))
74286+ return 1;
74287+ else
74288+ return 0;
74289+}
74290+
74291+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
74292+{
74293+ struct conn_table_entry **match;
74294+ unsigned int index;
74295+
74296+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
74297+ sig->gr_sport, sig->gr_dport,
74298+ gr_conn_table_size);
74299+
74300+ newent->sig = sig;
74301+
74302+ match = &gr_conn_table[index];
74303+ newent->next = *match;
74304+ *match = newent;
74305+
74306+ return;
74307+}
74308+
74309+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
74310+{
74311+ struct conn_table_entry *match, *last = NULL;
74312+ unsigned int index;
74313+
74314+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
74315+ sig->gr_sport, sig->gr_dport,
74316+ gr_conn_table_size);
74317+
74318+ match = gr_conn_table[index];
74319+ while (match && !conn_match(match->sig,
74320+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
74321+ sig->gr_dport)) {
74322+ last = match;
74323+ match = match->next;
74324+ }
74325+
74326+ if (match) {
74327+ if (last)
74328+ last->next = match->next;
74329+ else
74330+ gr_conn_table[index] = NULL;
74331+ kfree(match);
74332+ }
74333+
74334+ return;
74335+}
74336+
74337+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
74338+ __u16 sport, __u16 dport)
74339+{
74340+ struct conn_table_entry *match;
74341+ unsigned int index;
74342+
74343+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
74344+
74345+ match = gr_conn_table[index];
74346+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
74347+ match = match->next;
74348+
74349+ if (match)
74350+ return match->sig;
74351+ else
74352+ return NULL;
74353+}
74354+
74355+#endif
74356+
74357+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
74358+{
74359+#ifdef CONFIG_GRKERNSEC
74360+ struct signal_struct *sig = task->signal;
74361+ struct conn_table_entry *newent;
74362+
74363+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
74364+ if (newent == NULL)
74365+ return;
74366+ /* no bh lock needed since we are called with bh disabled */
74367+ spin_lock(&gr_conn_table_lock);
74368+ gr_del_task_from_ip_table_nolock(sig);
74369+ sig->gr_saddr = inet->inet_rcv_saddr;
74370+ sig->gr_daddr = inet->inet_daddr;
74371+ sig->gr_sport = inet->inet_sport;
74372+ sig->gr_dport = inet->inet_dport;
74373+ gr_add_to_task_ip_table_nolock(sig, newent);
74374+ spin_unlock(&gr_conn_table_lock);
74375+#endif
74376+ return;
74377+}
74378+
74379+void gr_del_task_from_ip_table(struct task_struct *task)
74380+{
74381+#ifdef CONFIG_GRKERNSEC
74382+ spin_lock_bh(&gr_conn_table_lock);
74383+ gr_del_task_from_ip_table_nolock(task->signal);
74384+ spin_unlock_bh(&gr_conn_table_lock);
74385+#endif
74386+ return;
74387+}
74388+
74389+void
74390+gr_attach_curr_ip(const struct sock *sk)
74391+{
74392+#ifdef CONFIG_GRKERNSEC
74393+ struct signal_struct *p, *set;
74394+ const struct inet_sock *inet = inet_sk(sk);
74395+
74396+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
74397+ return;
74398+
74399+ set = current->signal;
74400+
74401+ spin_lock_bh(&gr_conn_table_lock);
74402+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
74403+ inet->inet_dport, inet->inet_sport);
74404+ if (unlikely(p != NULL)) {
74405+ set->curr_ip = p->curr_ip;
74406+ set->used_accept = 1;
74407+ gr_del_task_from_ip_table_nolock(p);
74408+ spin_unlock_bh(&gr_conn_table_lock);
74409+ return;
74410+ }
74411+ spin_unlock_bh(&gr_conn_table_lock);
74412+
74413+ set->curr_ip = inet->inet_daddr;
74414+ set->used_accept = 1;
74415+#endif
74416+ return;
74417+}
74418+
74419+int
74420+gr_handle_sock_all(const int family, const int type, const int protocol)
74421+{
74422+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
74423+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
74424+ (family != AF_UNIX)) {
74425+ if (family == AF_INET)
74426+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
74427+ else
74428+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
74429+ return -EACCES;
74430+ }
74431+#endif
74432+ return 0;
74433+}
74434+
74435+int
74436+gr_handle_sock_server(const struct sockaddr *sck)
74437+{
74438+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
74439+ if (grsec_enable_socket_server &&
74440+ in_group_p(grsec_socket_server_gid) &&
74441+ sck && (sck->sa_family != AF_UNIX) &&
74442+ (sck->sa_family != AF_LOCAL)) {
74443+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
74444+ return -EACCES;
74445+ }
74446+#endif
74447+ return 0;
74448+}
74449+
74450+int
74451+gr_handle_sock_server_other(const struct sock *sck)
74452+{
74453+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
74454+ if (grsec_enable_socket_server &&
74455+ in_group_p(grsec_socket_server_gid) &&
74456+ sck && (sck->sk_family != AF_UNIX) &&
74457+ (sck->sk_family != AF_LOCAL)) {
74458+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
74459+ return -EACCES;
74460+ }
74461+#endif
74462+ return 0;
74463+}
74464+
74465+int
74466+gr_handle_sock_client(const struct sockaddr *sck)
74467+{
74468+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
74469+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
74470+ sck && (sck->sa_family != AF_UNIX) &&
74471+ (sck->sa_family != AF_LOCAL)) {
74472+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
74473+ return -EACCES;
74474+ }
74475+#endif
74476+ return 0;
74477+}
74478diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
74479new file mode 100644
74480index 0000000..8159888
74481--- /dev/null
74482+++ b/grsecurity/grsec_sysctl.c
74483@@ -0,0 +1,479 @@
74484+#include <linux/kernel.h>
74485+#include <linux/sched.h>
74486+#include <linux/sysctl.h>
74487+#include <linux/grsecurity.h>
74488+#include <linux/grinternal.h>
74489+
74490+int
74491+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
74492+{
74493+#ifdef CONFIG_GRKERNSEC_SYSCTL
74494+ if (dirname == NULL || name == NULL)
74495+ return 0;
74496+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
74497+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
74498+ return -EACCES;
74499+ }
74500+#endif
74501+ return 0;
74502+}
74503+
74504+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
74505+static int __maybe_unused __read_only one = 1;
74506+#endif
74507+
74508+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
74509+ defined(CONFIG_GRKERNSEC_DENYUSB)
74510+struct ctl_table grsecurity_table[] = {
74511+#ifdef CONFIG_GRKERNSEC_SYSCTL
74512+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
74513+#ifdef CONFIG_GRKERNSEC_IO
74514+ {
74515+ .procname = "disable_priv_io",
74516+ .data = &grsec_disable_privio,
74517+ .maxlen = sizeof(int),
74518+ .mode = 0600,
74519+ .proc_handler = &proc_dointvec,
74520+ },
74521+#endif
74522+#endif
74523+#ifdef CONFIG_GRKERNSEC_LINK
74524+ {
74525+ .procname = "linking_restrictions",
74526+ .data = &grsec_enable_link,
74527+ .maxlen = sizeof(int),
74528+ .mode = 0600,
74529+ .proc_handler = &proc_dointvec,
74530+ },
74531+#endif
74532+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
74533+ {
74534+ .procname = "enforce_symlinksifowner",
74535+ .data = &grsec_enable_symlinkown,
74536+ .maxlen = sizeof(int),
74537+ .mode = 0600,
74538+ .proc_handler = &proc_dointvec,
74539+ },
74540+ {
74541+ .procname = "symlinkown_gid",
74542+ .data = &grsec_symlinkown_gid,
74543+ .maxlen = sizeof(int),
74544+ .mode = 0600,
74545+ .proc_handler = &proc_dointvec,
74546+ },
74547+#endif
74548+#ifdef CONFIG_GRKERNSEC_BRUTE
74549+ {
74550+ .procname = "deter_bruteforce",
74551+ .data = &grsec_enable_brute,
74552+ .maxlen = sizeof(int),
74553+ .mode = 0600,
74554+ .proc_handler = &proc_dointvec,
74555+ },
74556+#endif
74557+#ifdef CONFIG_GRKERNSEC_FIFO
74558+ {
74559+ .procname = "fifo_restrictions",
74560+ .data = &grsec_enable_fifo,
74561+ .maxlen = sizeof(int),
74562+ .mode = 0600,
74563+ .proc_handler = &proc_dointvec,
74564+ },
74565+#endif
74566+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
74567+ {
74568+ .procname = "ptrace_readexec",
74569+ .data = &grsec_enable_ptrace_readexec,
74570+ .maxlen = sizeof(int),
74571+ .mode = 0600,
74572+ .proc_handler = &proc_dointvec,
74573+ },
74574+#endif
74575+#ifdef CONFIG_GRKERNSEC_SETXID
74576+ {
74577+ .procname = "consistent_setxid",
74578+ .data = &grsec_enable_setxid,
74579+ .maxlen = sizeof(int),
74580+ .mode = 0600,
74581+ .proc_handler = &proc_dointvec,
74582+ },
74583+#endif
74584+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
74585+ {
74586+ .procname = "ip_blackhole",
74587+ .data = &grsec_enable_blackhole,
74588+ .maxlen = sizeof(int),
74589+ .mode = 0600,
74590+ .proc_handler = &proc_dointvec,
74591+ },
74592+ {
74593+ .procname = "lastack_retries",
74594+ .data = &grsec_lastack_retries,
74595+ .maxlen = sizeof(int),
74596+ .mode = 0600,
74597+ .proc_handler = &proc_dointvec,
74598+ },
74599+#endif
74600+#ifdef CONFIG_GRKERNSEC_EXECLOG
74601+ {
74602+ .procname = "exec_logging",
74603+ .data = &grsec_enable_execlog,
74604+ .maxlen = sizeof(int),
74605+ .mode = 0600,
74606+ .proc_handler = &proc_dointvec,
74607+ },
74608+#endif
74609+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
74610+ {
74611+ .procname = "rwxmap_logging",
74612+ .data = &grsec_enable_log_rwxmaps,
74613+ .maxlen = sizeof(int),
74614+ .mode = 0600,
74615+ .proc_handler = &proc_dointvec,
74616+ },
74617+#endif
74618+#ifdef CONFIG_GRKERNSEC_SIGNAL
74619+ {
74620+ .procname = "signal_logging",
74621+ .data = &grsec_enable_signal,
74622+ .maxlen = sizeof(int),
74623+ .mode = 0600,
74624+ .proc_handler = &proc_dointvec,
74625+ },
74626+#endif
74627+#ifdef CONFIG_GRKERNSEC_FORKFAIL
74628+ {
74629+ .procname = "forkfail_logging",
74630+ .data = &grsec_enable_forkfail,
74631+ .maxlen = sizeof(int),
74632+ .mode = 0600,
74633+ .proc_handler = &proc_dointvec,
74634+ },
74635+#endif
74636+#ifdef CONFIG_GRKERNSEC_TIME
74637+ {
74638+ .procname = "timechange_logging",
74639+ .data = &grsec_enable_time,
74640+ .maxlen = sizeof(int),
74641+ .mode = 0600,
74642+ .proc_handler = &proc_dointvec,
74643+ },
74644+#endif
74645+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
74646+ {
74647+ .procname = "chroot_deny_shmat",
74648+ .data = &grsec_enable_chroot_shmat,
74649+ .maxlen = sizeof(int),
74650+ .mode = 0600,
74651+ .proc_handler = &proc_dointvec,
74652+ },
74653+#endif
74654+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
74655+ {
74656+ .procname = "chroot_deny_unix",
74657+ .data = &grsec_enable_chroot_unix,
74658+ .maxlen = sizeof(int),
74659+ .mode = 0600,
74660+ .proc_handler = &proc_dointvec,
74661+ },
74662+#endif
74663+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
74664+ {
74665+ .procname = "chroot_deny_mount",
74666+ .data = &grsec_enable_chroot_mount,
74667+ .maxlen = sizeof(int),
74668+ .mode = 0600,
74669+ .proc_handler = &proc_dointvec,
74670+ },
74671+#endif
74672+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
74673+ {
74674+ .procname = "chroot_deny_fchdir",
74675+ .data = &grsec_enable_chroot_fchdir,
74676+ .maxlen = sizeof(int),
74677+ .mode = 0600,
74678+ .proc_handler = &proc_dointvec,
74679+ },
74680+#endif
74681+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
74682+ {
74683+ .procname = "chroot_deny_chroot",
74684+ .data = &grsec_enable_chroot_double,
74685+ .maxlen = sizeof(int),
74686+ .mode = 0600,
74687+ .proc_handler = &proc_dointvec,
74688+ },
74689+#endif
74690+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
74691+ {
74692+ .procname = "chroot_deny_pivot",
74693+ .data = &grsec_enable_chroot_pivot,
74694+ .maxlen = sizeof(int),
74695+ .mode = 0600,
74696+ .proc_handler = &proc_dointvec,
74697+ },
74698+#endif
74699+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
74700+ {
74701+ .procname = "chroot_enforce_chdir",
74702+ .data = &grsec_enable_chroot_chdir,
74703+ .maxlen = sizeof(int),
74704+ .mode = 0600,
74705+ .proc_handler = &proc_dointvec,
74706+ },
74707+#endif
74708+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
74709+ {
74710+ .procname = "chroot_deny_chmod",
74711+ .data = &grsec_enable_chroot_chmod,
74712+ .maxlen = sizeof(int),
74713+ .mode = 0600,
74714+ .proc_handler = &proc_dointvec,
74715+ },
74716+#endif
74717+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
74718+ {
74719+ .procname = "chroot_deny_mknod",
74720+ .data = &grsec_enable_chroot_mknod,
74721+ .maxlen = sizeof(int),
74722+ .mode = 0600,
74723+ .proc_handler = &proc_dointvec,
74724+ },
74725+#endif
74726+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
74727+ {
74728+ .procname = "chroot_restrict_nice",
74729+ .data = &grsec_enable_chroot_nice,
74730+ .maxlen = sizeof(int),
74731+ .mode = 0600,
74732+ .proc_handler = &proc_dointvec,
74733+ },
74734+#endif
74735+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
74736+ {
74737+ .procname = "chroot_execlog",
74738+ .data = &grsec_enable_chroot_execlog,
74739+ .maxlen = sizeof(int),
74740+ .mode = 0600,
74741+ .proc_handler = &proc_dointvec,
74742+ },
74743+#endif
74744+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
74745+ {
74746+ .procname = "chroot_caps",
74747+ .data = &grsec_enable_chroot_caps,
74748+ .maxlen = sizeof(int),
74749+ .mode = 0600,
74750+ .proc_handler = &proc_dointvec,
74751+ },
74752+#endif
74753+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
74754+ {
74755+ .procname = "chroot_deny_sysctl",
74756+ .data = &grsec_enable_chroot_sysctl,
74757+ .maxlen = sizeof(int),
74758+ .mode = 0600,
74759+ .proc_handler = &proc_dointvec,
74760+ },
74761+#endif
74762+#ifdef CONFIG_GRKERNSEC_TPE
74763+ {
74764+ .procname = "tpe",
74765+ .data = &grsec_enable_tpe,
74766+ .maxlen = sizeof(int),
74767+ .mode = 0600,
74768+ .proc_handler = &proc_dointvec,
74769+ },
74770+ {
74771+ .procname = "tpe_gid",
74772+ .data = &grsec_tpe_gid,
74773+ .maxlen = sizeof(int),
74774+ .mode = 0600,
74775+ .proc_handler = &proc_dointvec,
74776+ },
74777+#endif
74778+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
74779+ {
74780+ .procname = "tpe_invert",
74781+ .data = &grsec_enable_tpe_invert,
74782+ .maxlen = sizeof(int),
74783+ .mode = 0600,
74784+ .proc_handler = &proc_dointvec,
74785+ },
74786+#endif
74787+#ifdef CONFIG_GRKERNSEC_TPE_ALL
74788+ {
74789+ .procname = "tpe_restrict_all",
74790+ .data = &grsec_enable_tpe_all,
74791+ .maxlen = sizeof(int),
74792+ .mode = 0600,
74793+ .proc_handler = &proc_dointvec,
74794+ },
74795+#endif
74796+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
74797+ {
74798+ .procname = "socket_all",
74799+ .data = &grsec_enable_socket_all,
74800+ .maxlen = sizeof(int),
74801+ .mode = 0600,
74802+ .proc_handler = &proc_dointvec,
74803+ },
74804+ {
74805+ .procname = "socket_all_gid",
74806+ .data = &grsec_socket_all_gid,
74807+ .maxlen = sizeof(int),
74808+ .mode = 0600,
74809+ .proc_handler = &proc_dointvec,
74810+ },
74811+#endif
74812+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
74813+ {
74814+ .procname = "socket_client",
74815+ .data = &grsec_enable_socket_client,
74816+ .maxlen = sizeof(int),
74817+ .mode = 0600,
74818+ .proc_handler = &proc_dointvec,
74819+ },
74820+ {
74821+ .procname = "socket_client_gid",
74822+ .data = &grsec_socket_client_gid,
74823+ .maxlen = sizeof(int),
74824+ .mode = 0600,
74825+ .proc_handler = &proc_dointvec,
74826+ },
74827+#endif
74828+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
74829+ {
74830+ .procname = "socket_server",
74831+ .data = &grsec_enable_socket_server,
74832+ .maxlen = sizeof(int),
74833+ .mode = 0600,
74834+ .proc_handler = &proc_dointvec,
74835+ },
74836+ {
74837+ .procname = "socket_server_gid",
74838+ .data = &grsec_socket_server_gid,
74839+ .maxlen = sizeof(int),
74840+ .mode = 0600,
74841+ .proc_handler = &proc_dointvec,
74842+ },
74843+#endif
74844+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
74845+ {
74846+ .procname = "audit_group",
74847+ .data = &grsec_enable_group,
74848+ .maxlen = sizeof(int),
74849+ .mode = 0600,
74850+ .proc_handler = &proc_dointvec,
74851+ },
74852+ {
74853+ .procname = "audit_gid",
74854+ .data = &grsec_audit_gid,
74855+ .maxlen = sizeof(int),
74856+ .mode = 0600,
74857+ .proc_handler = &proc_dointvec,
74858+ },
74859+#endif
74860+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
74861+ {
74862+ .procname = "audit_chdir",
74863+ .data = &grsec_enable_chdir,
74864+ .maxlen = sizeof(int),
74865+ .mode = 0600,
74866+ .proc_handler = &proc_dointvec,
74867+ },
74868+#endif
74869+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
74870+ {
74871+ .procname = "audit_mount",
74872+ .data = &grsec_enable_mount,
74873+ .maxlen = sizeof(int),
74874+ .mode = 0600,
74875+ .proc_handler = &proc_dointvec,
74876+ },
74877+#endif
74878+#ifdef CONFIG_GRKERNSEC_DMESG
74879+ {
74880+ .procname = "dmesg",
74881+ .data = &grsec_enable_dmesg,
74882+ .maxlen = sizeof(int),
74883+ .mode = 0600,
74884+ .proc_handler = &proc_dointvec,
74885+ },
74886+#endif
74887+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
74888+ {
74889+ .procname = "chroot_findtask",
74890+ .data = &grsec_enable_chroot_findtask,
74891+ .maxlen = sizeof(int),
74892+ .mode = 0600,
74893+ .proc_handler = &proc_dointvec,
74894+ },
74895+#endif
74896+#ifdef CONFIG_GRKERNSEC_RESLOG
74897+ {
74898+ .procname = "resource_logging",
74899+ .data = &grsec_resource_logging,
74900+ .maxlen = sizeof(int),
74901+ .mode = 0600,
74902+ .proc_handler = &proc_dointvec,
74903+ },
74904+#endif
74905+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
74906+ {
74907+ .procname = "audit_ptrace",
74908+ .data = &grsec_enable_audit_ptrace,
74909+ .maxlen = sizeof(int),
74910+ .mode = 0600,
74911+ .proc_handler = &proc_dointvec,
74912+ },
74913+#endif
74914+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
74915+ {
74916+ .procname = "harden_ptrace",
74917+ .data = &grsec_enable_harden_ptrace,
74918+ .maxlen = sizeof(int),
74919+ .mode = 0600,
74920+ .proc_handler = &proc_dointvec,
74921+ },
74922+#endif
74923+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
74924+ {
74925+ .procname = "harden_ipc",
74926+ .data = &grsec_enable_harden_ipc,
74927+ .maxlen = sizeof(int),
74928+ .mode = 0600,
74929+ .proc_handler = &proc_dointvec,
74930+ },
74931+#endif
74932+ {
74933+ .procname = "grsec_lock",
74934+ .data = &grsec_lock,
74935+ .maxlen = sizeof(int),
74936+ .mode = 0600,
74937+ .proc_handler = &proc_dointvec,
74938+ },
74939+#endif
74940+#ifdef CONFIG_GRKERNSEC_ROFS
74941+ {
74942+ .procname = "romount_protect",
74943+ .data = &grsec_enable_rofs,
74944+ .maxlen = sizeof(int),
74945+ .mode = 0600,
74946+ .proc_handler = &proc_dointvec_minmax,
74947+ .extra1 = &one,
74948+ .extra2 = &one,
74949+ },
74950+#endif
74951+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
74952+ {
74953+ .procname = "deny_new_usb",
74954+ .data = &grsec_deny_new_usb,
74955+ .maxlen = sizeof(int),
74956+ .mode = 0600,
74957+ .proc_handler = &proc_dointvec,
74958+ },
74959+#endif
74960+ { }
74961+};
74962+#endif
74963diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
74964new file mode 100644
74965index 0000000..61b514e
74966--- /dev/null
74967+++ b/grsecurity/grsec_time.c
74968@@ -0,0 +1,16 @@
74969+#include <linux/kernel.h>
74970+#include <linux/sched.h>
74971+#include <linux/grinternal.h>
74972+#include <linux/module.h>
74973+
74974+void
74975+gr_log_timechange(void)
74976+{
74977+#ifdef CONFIG_GRKERNSEC_TIME
74978+ if (grsec_enable_time)
74979+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
74980+#endif
74981+ return;
74982+}
74983+
74984+EXPORT_SYMBOL_GPL(gr_log_timechange);
74985diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
74986new file mode 100644
74987index 0000000..ee57dcf
74988--- /dev/null
74989+++ b/grsecurity/grsec_tpe.c
74990@@ -0,0 +1,73 @@
74991+#include <linux/kernel.h>
74992+#include <linux/sched.h>
74993+#include <linux/file.h>
74994+#include <linux/fs.h>
74995+#include <linux/grinternal.h>
74996+
74997+extern int gr_acl_tpe_check(void);
74998+
74999+int
75000+gr_tpe_allow(const struct file *file)
75001+{
75002+#ifdef CONFIG_GRKERNSEC
75003+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
75004+ const struct cred *cred = current_cred();
75005+ char *msg = NULL;
75006+ char *msg2 = NULL;
75007+
75008+ // never restrict root
75009+ if (gr_is_global_root(cred->uid))
75010+ return 1;
75011+
75012+ if (grsec_enable_tpe) {
75013+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
75014+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
75015+ msg = "not being in trusted group";
75016+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
75017+ msg = "being in untrusted group";
75018+#else
75019+ if (in_group_p(grsec_tpe_gid))
75020+ msg = "being in untrusted group";
75021+#endif
75022+ }
75023+ if (!msg && gr_acl_tpe_check())
75024+ msg = "being in untrusted role";
75025+
75026+ // not in any affected group/role
75027+ if (!msg)
75028+ goto next_check;
75029+
75030+ if (gr_is_global_nonroot(inode->i_uid))
75031+ msg2 = "file in non-root-owned directory";
75032+ else if (inode->i_mode & S_IWOTH)
75033+ msg2 = "file in world-writable directory";
75034+ else if (inode->i_mode & S_IWGRP)
75035+ msg2 = "file in group-writable directory";
75036+
75037+ if (msg && msg2) {
75038+ char fullmsg[70] = {0};
75039+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
75040+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
75041+ return 0;
75042+ }
75043+ msg = NULL;
75044+next_check:
75045+#ifdef CONFIG_GRKERNSEC_TPE_ALL
75046+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
75047+ return 1;
75048+
75049+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
75050+ msg = "directory not owned by user";
75051+ else if (inode->i_mode & S_IWOTH)
75052+ msg = "file in world-writable directory";
75053+ else if (inode->i_mode & S_IWGRP)
75054+ msg = "file in group-writable directory";
75055+
75056+ if (msg) {
75057+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
75058+ return 0;
75059+ }
75060+#endif
75061+#endif
75062+ return 1;
75063+}
75064diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
75065new file mode 100644
75066index 0000000..ae02d8e
75067--- /dev/null
75068+++ b/grsecurity/grsec_usb.c
75069@@ -0,0 +1,15 @@
75070+#include <linux/kernel.h>
75071+#include <linux/grinternal.h>
75072+#include <linux/module.h>
75073+
75074+int gr_handle_new_usb(void)
75075+{
75076+#ifdef CONFIG_GRKERNSEC_DENYUSB
75077+ if (grsec_deny_new_usb) {
75078+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
75079+ return 1;
75080+ }
75081+#endif
75082+ return 0;
75083+}
75084+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
75085diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
75086new file mode 100644
75087index 0000000..9f7b1ac
75088--- /dev/null
75089+++ b/grsecurity/grsum.c
75090@@ -0,0 +1,61 @@
75091+#include <linux/err.h>
75092+#include <linux/kernel.h>
75093+#include <linux/sched.h>
75094+#include <linux/mm.h>
75095+#include <linux/scatterlist.h>
75096+#include <linux/crypto.h>
75097+#include <linux/gracl.h>
75098+
75099+
75100+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
75101+#error "crypto and sha256 must be built into the kernel"
75102+#endif
75103+
75104+int
75105+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
75106+{
75107+ char *p;
75108+ struct crypto_hash *tfm;
75109+ struct hash_desc desc;
75110+ struct scatterlist sg;
75111+ unsigned char temp_sum[GR_SHA_LEN];
75112+ volatile int retval = 0;
75113+ volatile int dummy = 0;
75114+ unsigned int i;
75115+
75116+ sg_init_table(&sg, 1);
75117+
75118+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
75119+ if (IS_ERR(tfm)) {
75120+ /* should never happen, since sha256 should be built in */
75121+ return 1;
75122+ }
75123+
75124+ desc.tfm = tfm;
75125+ desc.flags = 0;
75126+
75127+ crypto_hash_init(&desc);
75128+
75129+ p = salt;
75130+ sg_set_buf(&sg, p, GR_SALT_LEN);
75131+ crypto_hash_update(&desc, &sg, sg.length);
75132+
75133+ p = entry->pw;
75134+ sg_set_buf(&sg, p, strlen(p));
75135+
75136+ crypto_hash_update(&desc, &sg, sg.length);
75137+
75138+ crypto_hash_final(&desc, temp_sum);
75139+
75140+ memset(entry->pw, 0, GR_PW_LEN);
75141+
75142+ for (i = 0; i < GR_SHA_LEN; i++)
75143+ if (sum[i] != temp_sum[i])
75144+ retval = 1;
75145+ else
75146+ dummy = 1; // waste a cycle
75147+
75148+ crypto_free_hash(tfm);
75149+
75150+ return retval;
75151+}
75152diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
75153index 77ff547..181834f 100644
75154--- a/include/asm-generic/4level-fixup.h
75155+++ b/include/asm-generic/4level-fixup.h
75156@@ -13,8 +13,10 @@
75157 #define pmd_alloc(mm, pud, address) \
75158 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
75159 NULL: pmd_offset(pud, address))
75160+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
75161
75162 #define pud_alloc(mm, pgd, address) (pgd)
75163+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
75164 #define pud_offset(pgd, start) (pgd)
75165 #define pud_none(pud) 0
75166 #define pud_bad(pud) 0
75167diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
75168index b7babf0..97f4c4f 100644
75169--- a/include/asm-generic/atomic-long.h
75170+++ b/include/asm-generic/atomic-long.h
75171@@ -22,6 +22,12 @@
75172
75173 typedef atomic64_t atomic_long_t;
75174
75175+#ifdef CONFIG_PAX_REFCOUNT
75176+typedef atomic64_unchecked_t atomic_long_unchecked_t;
75177+#else
75178+typedef atomic64_t atomic_long_unchecked_t;
75179+#endif
75180+
75181 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
75182
75183 static inline long atomic_long_read(atomic_long_t *l)
75184@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
75185 return (long)atomic64_read(v);
75186 }
75187
75188+#ifdef CONFIG_PAX_REFCOUNT
75189+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
75190+{
75191+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75192+
75193+ return (long)atomic64_read_unchecked(v);
75194+}
75195+#endif
75196+
75197 static inline void atomic_long_set(atomic_long_t *l, long i)
75198 {
75199 atomic64_t *v = (atomic64_t *)l;
75200@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
75201 atomic64_set(v, i);
75202 }
75203
75204+#ifdef CONFIG_PAX_REFCOUNT
75205+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
75206+{
75207+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75208+
75209+ atomic64_set_unchecked(v, i);
75210+}
75211+#endif
75212+
75213 static inline void atomic_long_inc(atomic_long_t *l)
75214 {
75215 atomic64_t *v = (atomic64_t *)l;
75216@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
75217 atomic64_inc(v);
75218 }
75219
75220+#ifdef CONFIG_PAX_REFCOUNT
75221+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
75222+{
75223+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75224+
75225+ atomic64_inc_unchecked(v);
75226+}
75227+#endif
75228+
75229 static inline void atomic_long_dec(atomic_long_t *l)
75230 {
75231 atomic64_t *v = (atomic64_t *)l;
75232@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
75233 atomic64_dec(v);
75234 }
75235
75236+#ifdef CONFIG_PAX_REFCOUNT
75237+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
75238+{
75239+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75240+
75241+ atomic64_dec_unchecked(v);
75242+}
75243+#endif
75244+
75245 static inline void atomic_long_add(long i, atomic_long_t *l)
75246 {
75247 atomic64_t *v = (atomic64_t *)l;
75248@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
75249 atomic64_add(i, v);
75250 }
75251
75252+#ifdef CONFIG_PAX_REFCOUNT
75253+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
75254+{
75255+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75256+
75257+ atomic64_add_unchecked(i, v);
75258+}
75259+#endif
75260+
75261 static inline void atomic_long_sub(long i, atomic_long_t *l)
75262 {
75263 atomic64_t *v = (atomic64_t *)l;
75264@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
75265 atomic64_sub(i, v);
75266 }
75267
75268+#ifdef CONFIG_PAX_REFCOUNT
75269+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
75270+{
75271+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75272+
75273+ atomic64_sub_unchecked(i, v);
75274+}
75275+#endif
75276+
75277 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
75278 {
75279 atomic64_t *v = (atomic64_t *)l;
75280@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
75281 return atomic64_add_negative(i, v);
75282 }
75283
75284-static inline long atomic_long_add_return(long i, atomic_long_t *l)
75285+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
75286 {
75287 atomic64_t *v = (atomic64_t *)l;
75288
75289 return (long)atomic64_add_return(i, v);
75290 }
75291
75292+#ifdef CONFIG_PAX_REFCOUNT
75293+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
75294+{
75295+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75296+
75297+ return (long)atomic64_add_return_unchecked(i, v);
75298+}
75299+#endif
75300+
75301 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
75302 {
75303 atomic64_t *v = (atomic64_t *)l;
75304@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
75305 return (long)atomic64_inc_return(v);
75306 }
75307
75308+#ifdef CONFIG_PAX_REFCOUNT
75309+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
75310+{
75311+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75312+
75313+ return (long)atomic64_inc_return_unchecked(v);
75314+}
75315+#endif
75316+
75317 static inline long atomic_long_dec_return(atomic_long_t *l)
75318 {
75319 atomic64_t *v = (atomic64_t *)l;
75320@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
75321
75322 typedef atomic_t atomic_long_t;
75323
75324+#ifdef CONFIG_PAX_REFCOUNT
75325+typedef atomic_unchecked_t atomic_long_unchecked_t;
75326+#else
75327+typedef atomic_t atomic_long_unchecked_t;
75328+#endif
75329+
75330 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
75331 static inline long atomic_long_read(atomic_long_t *l)
75332 {
75333@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
75334 return (long)atomic_read(v);
75335 }
75336
75337+#ifdef CONFIG_PAX_REFCOUNT
75338+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
75339+{
75340+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75341+
75342+ return (long)atomic_read_unchecked(v);
75343+}
75344+#endif
75345+
75346 static inline void atomic_long_set(atomic_long_t *l, long i)
75347 {
75348 atomic_t *v = (atomic_t *)l;
75349@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
75350 atomic_set(v, i);
75351 }
75352
75353+#ifdef CONFIG_PAX_REFCOUNT
75354+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
75355+{
75356+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75357+
75358+ atomic_set_unchecked(v, i);
75359+}
75360+#endif
75361+
75362 static inline void atomic_long_inc(atomic_long_t *l)
75363 {
75364 atomic_t *v = (atomic_t *)l;
75365@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
75366 atomic_inc(v);
75367 }
75368
75369+#ifdef CONFIG_PAX_REFCOUNT
75370+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
75371+{
75372+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75373+
75374+ atomic_inc_unchecked(v);
75375+}
75376+#endif
75377+
75378 static inline void atomic_long_dec(atomic_long_t *l)
75379 {
75380 atomic_t *v = (atomic_t *)l;
75381@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
75382 atomic_dec(v);
75383 }
75384
75385+#ifdef CONFIG_PAX_REFCOUNT
75386+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
75387+{
75388+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75389+
75390+ atomic_dec_unchecked(v);
75391+}
75392+#endif
75393+
75394 static inline void atomic_long_add(long i, atomic_long_t *l)
75395 {
75396 atomic_t *v = (atomic_t *)l;
75397@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
75398 atomic_add(i, v);
75399 }
75400
75401+#ifdef CONFIG_PAX_REFCOUNT
75402+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
75403+{
75404+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75405+
75406+ atomic_add_unchecked(i, v);
75407+}
75408+#endif
75409+
75410 static inline void atomic_long_sub(long i, atomic_long_t *l)
75411 {
75412 atomic_t *v = (atomic_t *)l;
75413@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
75414 atomic_sub(i, v);
75415 }
75416
75417+#ifdef CONFIG_PAX_REFCOUNT
75418+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
75419+{
75420+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75421+
75422+ atomic_sub_unchecked(i, v);
75423+}
75424+#endif
75425+
75426 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
75427 {
75428 atomic_t *v = (atomic_t *)l;
75429@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
75430 return (long)atomic_add_return(i, v);
75431 }
75432
75433+#ifdef CONFIG_PAX_REFCOUNT
75434+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
75435+{
75436+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75437+
75438+ return (long)atomic_add_return_unchecked(i, v);
75439+}
75440+
75441+#endif
75442+
75443 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
75444 {
75445 atomic_t *v = (atomic_t *)l;
75446@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
75447 return (long)atomic_inc_return(v);
75448 }
75449
75450+#ifdef CONFIG_PAX_REFCOUNT
75451+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
75452+{
75453+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
75454+
75455+ return (long)atomic_inc_return_unchecked(v);
75456+}
75457+#endif
75458+
75459 static inline long atomic_long_dec_return(atomic_long_t *l)
75460 {
75461 atomic_t *v = (atomic_t *)l;
75462@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
75463
75464 #endif /* BITS_PER_LONG == 64 */
75465
75466+#ifdef CONFIG_PAX_REFCOUNT
75467+static inline void pax_refcount_needs_these_functions(void)
75468+{
75469+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
75470+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
75471+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
75472+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
75473+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
75474+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
75475+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
75476+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
75477+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
75478+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
75479+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
75480+#ifdef CONFIG_X86
75481+ atomic_clear_mask_unchecked(0, NULL);
75482+ atomic_set_mask_unchecked(0, NULL);
75483+#endif
75484+
75485+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
75486+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
75487+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
75488+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
75489+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
75490+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
75491+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
75492+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
75493+}
75494+#else
75495+#define atomic_read_unchecked(v) atomic_read(v)
75496+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
75497+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
75498+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
75499+#define atomic_inc_unchecked(v) atomic_inc(v)
75500+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
75501+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
75502+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
75503+#define atomic_dec_unchecked(v) atomic_dec(v)
75504+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
75505+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
75506+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
75507+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
75508+
75509+#define atomic_long_read_unchecked(v) atomic_long_read(v)
75510+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
75511+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
75512+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
75513+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
75514+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
75515+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
75516+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
75517+#endif
75518+
75519 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
75520diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
75521index 33bd2de..f31bff97 100644
75522--- a/include/asm-generic/atomic.h
75523+++ b/include/asm-generic/atomic.h
75524@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
75525 * Atomically clears the bits set in @mask from @v
75526 */
75527 #ifndef atomic_clear_mask
75528-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
75529+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
75530 {
75531 unsigned long flags;
75532
75533diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
75534index b18ce4f..2ee2843 100644
75535--- a/include/asm-generic/atomic64.h
75536+++ b/include/asm-generic/atomic64.h
75537@@ -16,6 +16,8 @@ typedef struct {
75538 long long counter;
75539 } atomic64_t;
75540
75541+typedef atomic64_t atomic64_unchecked_t;
75542+
75543 #define ATOMIC64_INIT(i) { (i) }
75544
75545 extern long long atomic64_read(const atomic64_t *v);
75546@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
75547 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
75548 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
75549
75550+#define atomic64_read_unchecked(v) atomic64_read(v)
75551+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
75552+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
75553+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
75554+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
75555+#define atomic64_inc_unchecked(v) atomic64_inc(v)
75556+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
75557+#define atomic64_dec_unchecked(v) atomic64_dec(v)
75558+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
75559+
75560 #endif /* _ASM_GENERIC_ATOMIC64_H */
75561diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
75562index a60a7cc..0fe12f2 100644
75563--- a/include/asm-generic/bitops/__fls.h
75564+++ b/include/asm-generic/bitops/__fls.h
75565@@ -9,7 +9,7 @@
75566 *
75567 * Undefined if no set bit exists, so code should check against 0 first.
75568 */
75569-static __always_inline unsigned long __fls(unsigned long word)
75570+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
75571 {
75572 int num = BITS_PER_LONG - 1;
75573
75574diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
75575index 0576d1f..dad6c71 100644
75576--- a/include/asm-generic/bitops/fls.h
75577+++ b/include/asm-generic/bitops/fls.h
75578@@ -9,7 +9,7 @@
75579 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
75580 */
75581
75582-static __always_inline int fls(int x)
75583+static __always_inline int __intentional_overflow(-1) fls(int x)
75584 {
75585 int r = 32;
75586
75587diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
75588index b097cf8..3d40e14 100644
75589--- a/include/asm-generic/bitops/fls64.h
75590+++ b/include/asm-generic/bitops/fls64.h
75591@@ -15,7 +15,7 @@
75592 * at position 64.
75593 */
75594 #if BITS_PER_LONG == 32
75595-static __always_inline int fls64(__u64 x)
75596+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
75597 {
75598 __u32 h = x >> 32;
75599 if (h)
75600@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
75601 return fls(x);
75602 }
75603 #elif BITS_PER_LONG == 64
75604-static __always_inline int fls64(__u64 x)
75605+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
75606 {
75607 if (x == 0)
75608 return 0;
75609diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
75610index 1bfcfe5..e04c5c9 100644
75611--- a/include/asm-generic/cache.h
75612+++ b/include/asm-generic/cache.h
75613@@ -6,7 +6,7 @@
75614 * cache lines need to provide their own cache.h.
75615 */
75616
75617-#define L1_CACHE_SHIFT 5
75618-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
75619+#define L1_CACHE_SHIFT 5UL
75620+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
75621
75622 #endif /* __ASM_GENERIC_CACHE_H */
75623diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
75624index 0d68a1e..b74a761 100644
75625--- a/include/asm-generic/emergency-restart.h
75626+++ b/include/asm-generic/emergency-restart.h
75627@@ -1,7 +1,7 @@
75628 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
75629 #define _ASM_GENERIC_EMERGENCY_RESTART_H
75630
75631-static inline void machine_emergency_restart(void)
75632+static inline __noreturn void machine_emergency_restart(void)
75633 {
75634 machine_restart(NULL);
75635 }
75636diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
75637index 90f99c7..00ce236 100644
75638--- a/include/asm-generic/kmap_types.h
75639+++ b/include/asm-generic/kmap_types.h
75640@@ -2,9 +2,9 @@
75641 #define _ASM_GENERIC_KMAP_TYPES_H
75642
75643 #ifdef __WITH_KM_FENCE
75644-# define KM_TYPE_NR 41
75645+# define KM_TYPE_NR 42
75646 #else
75647-# define KM_TYPE_NR 20
75648+# define KM_TYPE_NR 21
75649 #endif
75650
75651 #endif
75652diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
75653index 9ceb03b..62b0b8f 100644
75654--- a/include/asm-generic/local.h
75655+++ b/include/asm-generic/local.h
75656@@ -23,24 +23,37 @@ typedef struct
75657 atomic_long_t a;
75658 } local_t;
75659
75660+typedef struct {
75661+ atomic_long_unchecked_t a;
75662+} local_unchecked_t;
75663+
75664 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
75665
75666 #define local_read(l) atomic_long_read(&(l)->a)
75667+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
75668 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
75669+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
75670 #define local_inc(l) atomic_long_inc(&(l)->a)
75671+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
75672 #define local_dec(l) atomic_long_dec(&(l)->a)
75673+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
75674 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
75675+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
75676 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
75677+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
75678
75679 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
75680 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
75681 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
75682 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
75683 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
75684+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
75685 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
75686 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
75687+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
75688
75689 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
75690+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
75691 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
75692 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
75693 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
75694diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
75695index 725612b..9cc513a 100644
75696--- a/include/asm-generic/pgtable-nopmd.h
75697+++ b/include/asm-generic/pgtable-nopmd.h
75698@@ -1,14 +1,19 @@
75699 #ifndef _PGTABLE_NOPMD_H
75700 #define _PGTABLE_NOPMD_H
75701
75702-#ifndef __ASSEMBLY__
75703-
75704 #include <asm-generic/pgtable-nopud.h>
75705
75706-struct mm_struct;
75707-
75708 #define __PAGETABLE_PMD_FOLDED
75709
75710+#define PMD_SHIFT PUD_SHIFT
75711+#define PTRS_PER_PMD 1
75712+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
75713+#define PMD_MASK (~(PMD_SIZE-1))
75714+
75715+#ifndef __ASSEMBLY__
75716+
75717+struct mm_struct;
75718+
75719 /*
75720 * Having the pmd type consist of a pud gets the size right, and allows
75721 * us to conceptually access the pud entry that this pmd is folded into
75722@@ -16,11 +21,6 @@ struct mm_struct;
75723 */
75724 typedef struct { pud_t pud; } pmd_t;
75725
75726-#define PMD_SHIFT PUD_SHIFT
75727-#define PTRS_PER_PMD 1
75728-#define PMD_SIZE (1UL << PMD_SHIFT)
75729-#define PMD_MASK (~(PMD_SIZE-1))
75730-
75731 /*
75732 * The "pud_xxx()" functions here are trivial for a folded two-level
75733 * setup: the pmd is never bad, and a pmd always exists (as it's folded
75734diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
75735index 810431d..0ec4804f 100644
75736--- a/include/asm-generic/pgtable-nopud.h
75737+++ b/include/asm-generic/pgtable-nopud.h
75738@@ -1,10 +1,15 @@
75739 #ifndef _PGTABLE_NOPUD_H
75740 #define _PGTABLE_NOPUD_H
75741
75742-#ifndef __ASSEMBLY__
75743-
75744 #define __PAGETABLE_PUD_FOLDED
75745
75746+#define PUD_SHIFT PGDIR_SHIFT
75747+#define PTRS_PER_PUD 1
75748+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
75749+#define PUD_MASK (~(PUD_SIZE-1))
75750+
75751+#ifndef __ASSEMBLY__
75752+
75753 /*
75754 * Having the pud type consist of a pgd gets the size right, and allows
75755 * us to conceptually access the pgd entry that this pud is folded into
75756@@ -12,11 +17,6 @@
75757 */
75758 typedef struct { pgd_t pgd; } pud_t;
75759
75760-#define PUD_SHIFT PGDIR_SHIFT
75761-#define PTRS_PER_PUD 1
75762-#define PUD_SIZE (1UL << PUD_SHIFT)
75763-#define PUD_MASK (~(PUD_SIZE-1))
75764-
75765 /*
75766 * The "pgd_xxx()" functions here are trivial for a folded two-level
75767 * setup: the pud is never bad, and a pud always exists (as it's folded
75768@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
75769 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
75770
75771 #define pgd_populate(mm, pgd, pud) do { } while (0)
75772+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
75773 /*
75774 * (puds are folded into pgds so this doesn't get actually called,
75775 * but the define is needed for a generic inline function.)
75776diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
75777index db09234..86683e3 100644
75778--- a/include/asm-generic/pgtable.h
75779+++ b/include/asm-generic/pgtable.h
75780@@ -736,6 +736,22 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
75781 }
75782 #endif /* CONFIG_NUMA_BALANCING */
75783
75784+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
75785+#ifdef CONFIG_PAX_KERNEXEC
75786+#error KERNEXEC requires pax_open_kernel
75787+#else
75788+static inline unsigned long pax_open_kernel(void) { return 0; }
75789+#endif
75790+#endif
75791+
75792+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
75793+#ifdef CONFIG_PAX_KERNEXEC
75794+#error KERNEXEC requires pax_close_kernel
75795+#else
75796+static inline unsigned long pax_close_kernel(void) { return 0; }
75797+#endif
75798+#endif
75799+
75800 #endif /* CONFIG_MMU */
75801
75802 #endif /* !__ASSEMBLY__ */
75803diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
75804index dc1269c..48a4f51 100644
75805--- a/include/asm-generic/uaccess.h
75806+++ b/include/asm-generic/uaccess.h
75807@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
75808 return __clear_user(to, n);
75809 }
75810
75811+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
75812+#ifdef CONFIG_PAX_MEMORY_UDEREF
75813+#error UDEREF requires pax_open_userland
75814+#else
75815+static inline unsigned long pax_open_userland(void) { return 0; }
75816+#endif
75817+#endif
75818+
75819+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
75820+#ifdef CONFIG_PAX_MEMORY_UDEREF
75821+#error UDEREF requires pax_close_userland
75822+#else
75823+static inline unsigned long pax_close_userland(void) { return 0; }
75824+#endif
75825+#endif
75826+
75827 #endif /* __ASM_GENERIC_UACCESS_H */
75828diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
75829index bc2121f..2f41f9a 100644
75830--- a/include/asm-generic/vmlinux.lds.h
75831+++ b/include/asm-generic/vmlinux.lds.h
75832@@ -232,6 +232,7 @@
75833 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
75834 VMLINUX_SYMBOL(__start_rodata) = .; \
75835 *(.rodata) *(.rodata.*) \
75836+ *(.data..read_only) \
75837 *(__vermagic) /* Kernel version magic */ \
75838 . = ALIGN(8); \
75839 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
75840@@ -716,17 +717,18 @@
75841 * section in the linker script will go there too. @phdr should have
75842 * a leading colon.
75843 *
75844- * Note that this macros defines __per_cpu_load as an absolute symbol.
75845+ * Note that this macros defines per_cpu_load as an absolute symbol.
75846 * If there is no need to put the percpu section at a predetermined
75847 * address, use PERCPU_SECTION.
75848 */
75849 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
75850- VMLINUX_SYMBOL(__per_cpu_load) = .; \
75851- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
75852+ per_cpu_load = .; \
75853+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
75854 - LOAD_OFFSET) { \
75855+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
75856 PERCPU_INPUT(cacheline) \
75857 } phdr \
75858- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
75859+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
75860
75861 /**
75862 * PERCPU_SECTION - define output section for percpu area, simple version
75863diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
75864index e73c19e..5b89e00 100644
75865--- a/include/crypto/algapi.h
75866+++ b/include/crypto/algapi.h
75867@@ -34,7 +34,7 @@ struct crypto_type {
75868 unsigned int maskclear;
75869 unsigned int maskset;
75870 unsigned int tfmsize;
75871-};
75872+} __do_const;
75873
75874 struct crypto_instance {
75875 struct crypto_alg alg;
75876diff --git a/include/drm/drmP.h b/include/drm/drmP.h
75877index 1d4a920..da65658 100644
75878--- a/include/drm/drmP.h
75879+++ b/include/drm/drmP.h
75880@@ -66,6 +66,7 @@
75881 #include <linux/workqueue.h>
75882 #include <linux/poll.h>
75883 #include <asm/pgalloc.h>
75884+#include <asm/local.h>
75885 #include <drm/drm.h>
75886 #include <drm/drm_sarea.h>
75887 #include <drm/drm_vma_manager.h>
75888@@ -278,10 +279,12 @@ do { \
75889 * \param cmd command.
75890 * \param arg argument.
75891 */
75892-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
75893+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
75894+ struct drm_file *file_priv);
75895+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
75896 struct drm_file *file_priv);
75897
75898-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
75899+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
75900 unsigned long arg);
75901
75902 #define DRM_IOCTL_NR(n) _IOC_NR(n)
75903@@ -297,10 +300,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
75904 struct drm_ioctl_desc {
75905 unsigned int cmd;
75906 int flags;
75907- drm_ioctl_t *func;
75908+ drm_ioctl_t func;
75909 unsigned int cmd_drv;
75910 const char *name;
75911-};
75912+} __do_const;
75913
75914 /**
75915 * Creates a driver or general drm_ioctl_desc array entry for the given
75916@@ -1013,7 +1016,8 @@ struct drm_info_list {
75917 int (*show)(struct seq_file*, void*); /** show callback */
75918 u32 driver_features; /**< Required driver features for this entry */
75919 void *data;
75920-};
75921+} __do_const;
75922+typedef struct drm_info_list __no_const drm_info_list_no_const;
75923
75924 /**
75925 * debugfs node structure. This structure represents a debugfs file.
75926@@ -1097,7 +1101,7 @@ struct drm_device {
75927
75928 /** \name Usage Counters */
75929 /*@{ */
75930- int open_count; /**< Outstanding files open */
75931+ local_t open_count; /**< Outstanding files open */
75932 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
75933 atomic_t vma_count; /**< Outstanding vma areas open */
75934 int buf_use; /**< Buffers in use -- cannot alloc */
75935diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
75936index ef6ad3a..be34b16 100644
75937--- a/include/drm/drm_crtc_helper.h
75938+++ b/include/drm/drm_crtc_helper.h
75939@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
75940 struct drm_connector *connector);
75941 /* disable encoder when not in use - more explicit than dpms off */
75942 void (*disable)(struct drm_encoder *encoder);
75943-};
75944+} __no_const;
75945
75946 /**
75947 * drm_connector_helper_funcs - helper operations for connectors
75948diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
75949index 940ece4..8cb727f 100644
75950--- a/include/drm/i915_pciids.h
75951+++ b/include/drm/i915_pciids.h
75952@@ -37,7 +37,7 @@
75953 */
75954 #define INTEL_VGA_DEVICE(id, info) { \
75955 0x8086, id, \
75956- ~0, ~0, \
75957+ PCI_ANY_ID, PCI_ANY_ID, \
75958 0x030000, 0xff0000, \
75959 (unsigned long) info }
75960
75961diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
75962index 72dcbe8..8db58d7 100644
75963--- a/include/drm/ttm/ttm_memory.h
75964+++ b/include/drm/ttm/ttm_memory.h
75965@@ -48,7 +48,7 @@
75966
75967 struct ttm_mem_shrink {
75968 int (*do_shrink) (struct ttm_mem_shrink *);
75969-};
75970+} __no_const;
75971
75972 /**
75973 * struct ttm_mem_global - Global memory accounting structure.
75974diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
75975index d1f61bf..2239439 100644
75976--- a/include/drm/ttm/ttm_page_alloc.h
75977+++ b/include/drm/ttm/ttm_page_alloc.h
75978@@ -78,6 +78,7 @@ void ttm_dma_page_alloc_fini(void);
75979 */
75980 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
75981
75982+struct device;
75983 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
75984 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
75985
75986diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
75987index 4b840e8..155d235 100644
75988--- a/include/keys/asymmetric-subtype.h
75989+++ b/include/keys/asymmetric-subtype.h
75990@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
75991 /* Verify the signature on a key of this subtype (optional) */
75992 int (*verify_signature)(const struct key *key,
75993 const struct public_key_signature *sig);
75994-};
75995+} __do_const;
75996
75997 /**
75998 * asymmetric_key_subtype - Get the subtype from an asymmetric key
75999diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
76000index c1da539..1dcec55 100644
76001--- a/include/linux/atmdev.h
76002+++ b/include/linux/atmdev.h
76003@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
76004 #endif
76005
76006 struct k_atm_aal_stats {
76007-#define __HANDLE_ITEM(i) atomic_t i
76008+#define __HANDLE_ITEM(i) atomic_unchecked_t i
76009 __AAL_STAT_ITEMS
76010 #undef __HANDLE_ITEM
76011 };
76012@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
76013 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
76014 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
76015 struct module *owner;
76016-};
76017+} __do_const ;
76018
76019 struct atmphy_ops {
76020 int (*start)(struct atm_dev *dev);
76021diff --git a/include/linux/audit.h b/include/linux/audit.h
76022index a406419..c2bb164 100644
76023--- a/include/linux/audit.h
76024+++ b/include/linux/audit.h
76025@@ -195,7 +195,7 @@ static inline void audit_ptrace(struct task_struct *t)
76026 extern unsigned int audit_serial(void);
76027 extern int auditsc_get_stamp(struct audit_context *ctx,
76028 struct timespec *t, unsigned int *serial);
76029-extern int audit_set_loginuid(kuid_t loginuid);
76030+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
76031
76032 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
76033 {
76034diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
76035index fd8bf32..2cccd5a 100644
76036--- a/include/linux/binfmts.h
76037+++ b/include/linux/binfmts.h
76038@@ -74,8 +74,10 @@ struct linux_binfmt {
76039 int (*load_binary)(struct linux_binprm *);
76040 int (*load_shlib)(struct file *);
76041 int (*core_dump)(struct coredump_params *cprm);
76042+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
76043+ void (*handle_mmap)(struct file *);
76044 unsigned long min_coredump; /* minimal dump size */
76045-};
76046+} __do_const;
76047
76048 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
76049
76050diff --git a/include/linux/bitops.h b/include/linux/bitops.h
76051index abc9ca7..e54ee27 100644
76052--- a/include/linux/bitops.h
76053+++ b/include/linux/bitops.h
76054@@ -102,7 +102,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
76055 * @word: value to rotate
76056 * @shift: bits to roll
76057 */
76058-static inline __u32 rol32(__u32 word, unsigned int shift)
76059+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
76060 {
76061 return (word << shift) | (word >> (32 - shift));
76062 }
76063@@ -112,7 +112,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
76064 * @word: value to rotate
76065 * @shift: bits to roll
76066 */
76067-static inline __u32 ror32(__u32 word, unsigned int shift)
76068+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
76069 {
76070 return (word >> shift) | (word << (32 - shift));
76071 }
76072@@ -168,7 +168,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
76073 return (__s32)(value << shift) >> shift;
76074 }
76075
76076-static inline unsigned fls_long(unsigned long l)
76077+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
76078 {
76079 if (sizeof(l) == 4)
76080 return fls(l);
76081diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
76082index 1b135d4..59fc876 100644
76083--- a/include/linux/blkdev.h
76084+++ b/include/linux/blkdev.h
76085@@ -1578,7 +1578,7 @@ struct block_device_operations {
76086 /* this callback is with swap_lock and sometimes page table lock held */
76087 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
76088 struct module *owner;
76089-};
76090+} __do_const;
76091
76092 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
76093 unsigned long);
76094diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
76095index afc1343..9735539 100644
76096--- a/include/linux/blktrace_api.h
76097+++ b/include/linux/blktrace_api.h
76098@@ -25,7 +25,7 @@ struct blk_trace {
76099 struct dentry *dropped_file;
76100 struct dentry *msg_file;
76101 struct list_head running_list;
76102- atomic_t dropped;
76103+ atomic_unchecked_t dropped;
76104 };
76105
76106 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
76107diff --git a/include/linux/cache.h b/include/linux/cache.h
76108index 4c57065..40346da 100644
76109--- a/include/linux/cache.h
76110+++ b/include/linux/cache.h
76111@@ -16,6 +16,14 @@
76112 #define __read_mostly
76113 #endif
76114
76115+#ifndef __read_only
76116+#ifdef CONFIG_PAX_KERNEXEC
76117+#error KERNEXEC requires __read_only
76118+#else
76119+#define __read_only __read_mostly
76120+#endif
76121+#endif
76122+
76123 #ifndef ____cacheline_aligned
76124 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
76125 #endif
76126diff --git a/include/linux/capability.h b/include/linux/capability.h
76127index a6ee1f9..e1ca49d 100644
76128--- a/include/linux/capability.h
76129+++ b/include/linux/capability.h
76130@@ -212,8 +212,13 @@ extern bool capable(int cap);
76131 extern bool ns_capable(struct user_namespace *ns, int cap);
76132 extern bool inode_capable(const struct inode *inode, int cap);
76133 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
76134+extern bool capable_nolog(int cap);
76135+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
76136+extern bool inode_capable_nolog(const struct inode *inode, int cap);
76137
76138 /* audit system wants to get cap info from files as well */
76139 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
76140
76141+extern int is_privileged_binary(const struct dentry *dentry);
76142+
76143 #endif /* !_LINUX_CAPABILITY_H */
76144diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
76145index 8609d57..86e4d79 100644
76146--- a/include/linux/cdrom.h
76147+++ b/include/linux/cdrom.h
76148@@ -87,7 +87,6 @@ struct cdrom_device_ops {
76149
76150 /* driver specifications */
76151 const int capability; /* capability flags */
76152- int n_minors; /* number of active minor devices */
76153 /* handle uniform packets for scsi type devices (scsi,atapi) */
76154 int (*generic_packet) (struct cdrom_device_info *,
76155 struct packet_command *);
76156diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
76157index 4ce9056..86caac6 100644
76158--- a/include/linux/cleancache.h
76159+++ b/include/linux/cleancache.h
76160@@ -31,7 +31,7 @@ struct cleancache_ops {
76161 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
76162 void (*invalidate_inode)(int, struct cleancache_filekey);
76163 void (*invalidate_fs)(int);
76164-};
76165+} __no_const;
76166
76167 extern struct cleancache_ops *
76168 cleancache_register_ops(struct cleancache_ops *ops);
76169diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
76170index 7e59253..d6e4cae 100644
76171--- a/include/linux/clk-provider.h
76172+++ b/include/linux/clk-provider.h
76173@@ -141,6 +141,7 @@ struct clk_ops {
76174 unsigned long);
76175 void (*init)(struct clk_hw *hw);
76176 };
76177+typedef struct clk_ops __no_const clk_ops_no_const;
76178
76179 /**
76180 * struct clk_init_data - holds init data that's common to all clocks and is
76181diff --git a/include/linux/compat.h b/include/linux/compat.h
76182index eb8a49d..6b66ed9 100644
76183--- a/include/linux/compat.h
76184+++ b/include/linux/compat.h
76185@@ -313,7 +313,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
76186 compat_size_t __user *len_ptr);
76187
76188 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
76189-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
76190+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
76191 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
76192 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
76193 compat_ssize_t msgsz, int msgflg);
76194@@ -420,7 +420,7 @@ extern int compat_ptrace_request(struct task_struct *child,
76195 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
76196 compat_ulong_t addr, compat_ulong_t data);
76197 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
76198- compat_long_t addr, compat_long_t data);
76199+ compat_ulong_t addr, compat_ulong_t data);
76200
76201 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
76202 /*
76203diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
76204index ded4299..ddcbe31 100644
76205--- a/include/linux/compiler-gcc4.h
76206+++ b/include/linux/compiler-gcc4.h
76207@@ -39,9 +39,34 @@
76208 # define __compiletime_warning(message) __attribute__((warning(message)))
76209 # define __compiletime_error(message) __attribute__((error(message)))
76210 #endif /* __CHECKER__ */
76211+
76212+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
76213+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
76214+#define __bos0(ptr) __bos((ptr), 0)
76215+#define __bos1(ptr) __bos((ptr), 1)
76216 #endif /* GCC_VERSION >= 40300 */
76217
76218 #if GCC_VERSION >= 40500
76219+
76220+#ifdef RANDSTRUCT_PLUGIN
76221+#define __randomize_layout __attribute__((randomize_layout))
76222+#define __no_randomize_layout __attribute__((no_randomize_layout))
76223+#endif
76224+
76225+#ifdef CONSTIFY_PLUGIN
76226+#define __no_const __attribute__((no_const))
76227+#define __do_const __attribute__((do_const))
76228+#endif
76229+
76230+#ifdef SIZE_OVERFLOW_PLUGIN
76231+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
76232+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
76233+#endif
76234+
76235+#ifdef LATENT_ENTROPY_PLUGIN
76236+#define __latent_entropy __attribute__((latent_entropy))
76237+#endif
76238+
76239 /*
76240 * Mark a position in code as unreachable. This can be used to
76241 * suppress control flow warnings after asm blocks that transfer
76242diff --git a/include/linux/compiler.h b/include/linux/compiler.h
76243index 92669cd..cc564c0 100644
76244--- a/include/linux/compiler.h
76245+++ b/include/linux/compiler.h
76246@@ -5,11 +5,14 @@
76247
76248 #ifdef __CHECKER__
76249 # define __user __attribute__((noderef, address_space(1)))
76250+# define __force_user __force __user
76251 # define __kernel __attribute__((address_space(0)))
76252+# define __force_kernel __force __kernel
76253 # define __safe __attribute__((safe))
76254 # define __force __attribute__((force))
76255 # define __nocast __attribute__((nocast))
76256 # define __iomem __attribute__((noderef, address_space(2)))
76257+# define __force_iomem __force __iomem
76258 # define __must_hold(x) __attribute__((context(x,1,1)))
76259 # define __acquires(x) __attribute__((context(x,0,1)))
76260 # define __releases(x) __attribute__((context(x,1,0)))
76261@@ -17,20 +20,37 @@
76262 # define __release(x) __context__(x,-1)
76263 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
76264 # define __percpu __attribute__((noderef, address_space(3)))
76265+# define __force_percpu __force __percpu
76266 #ifdef CONFIG_SPARSE_RCU_POINTER
76267 # define __rcu __attribute__((noderef, address_space(4)))
76268+# define __force_rcu __force __rcu
76269 #else
76270 # define __rcu
76271+# define __force_rcu
76272 #endif
76273 extern void __chk_user_ptr(const volatile void __user *);
76274 extern void __chk_io_ptr(const volatile void __iomem *);
76275 #else
76276-# define __user
76277-# define __kernel
76278+# ifdef CHECKER_PLUGIN
76279+//# define __user
76280+//# define __force_user
76281+//# define __kernel
76282+//# define __force_kernel
76283+# else
76284+# ifdef STRUCTLEAK_PLUGIN
76285+# define __user __attribute__((user))
76286+# else
76287+# define __user
76288+# endif
76289+# define __force_user
76290+# define __kernel
76291+# define __force_kernel
76292+# endif
76293 # define __safe
76294 # define __force
76295 # define __nocast
76296 # define __iomem
76297+# define __force_iomem
76298 # define __chk_user_ptr(x) (void)0
76299 # define __chk_io_ptr(x) (void)0
76300 # define __builtin_warning(x, y...) (1)
76301@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
76302 # define __release(x) (void)0
76303 # define __cond_lock(x,c) (c)
76304 # define __percpu
76305+# define __force_percpu
76306 # define __rcu
76307+# define __force_rcu
76308 #endif
76309
76310 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
76311@@ -275,6 +297,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
76312 # define __attribute_const__ /* unimplemented */
76313 #endif
76314
76315+#ifndef __randomize_layout
76316+# define __randomize_layout
76317+#endif
76318+
76319+#ifndef __no_randomize_layout
76320+# define __no_randomize_layout
76321+#endif
76322+
76323+#ifndef __no_const
76324+# define __no_const
76325+#endif
76326+
76327+#ifndef __do_const
76328+# define __do_const
76329+#endif
76330+
76331+#ifndef __size_overflow
76332+# define __size_overflow(...)
76333+#endif
76334+
76335+#ifndef __intentional_overflow
76336+# define __intentional_overflow(...)
76337+#endif
76338+
76339+#ifndef __latent_entropy
76340+# define __latent_entropy
76341+#endif
76342+
76343 /*
76344 * Tell gcc if a function is cold. The compiler will assume any path
76345 * directly leading to the call is unlikely.
76346@@ -284,6 +334,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
76347 #define __cold
76348 #endif
76349
76350+#ifndef __alloc_size
76351+#define __alloc_size(...)
76352+#endif
76353+
76354+#ifndef __bos
76355+#define __bos(ptr, arg)
76356+#endif
76357+
76358+#ifndef __bos0
76359+#define __bos0(ptr)
76360+#endif
76361+
76362+#ifndef __bos1
76363+#define __bos1(ptr)
76364+#endif
76365+
76366 /* Simple shorthand for a section definition */
76367 #ifndef __section
76368 # define __section(S) __attribute__ ((__section__(#S)))
76369@@ -349,7 +415,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
76370 * use is to mediate communication between process-level code and irq/NMI
76371 * handlers, all running on the same CPU.
76372 */
76373-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
76374+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
76375+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
76376
76377 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
76378 #ifdef CONFIG_KPROBES
76379diff --git a/include/linux/completion.h b/include/linux/completion.h
76380index 5d5aaae..0ea9b84 100644
76381--- a/include/linux/completion.h
76382+++ b/include/linux/completion.h
76383@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
76384
76385 extern void wait_for_completion(struct completion *);
76386 extern void wait_for_completion_io(struct completion *);
76387-extern int wait_for_completion_interruptible(struct completion *x);
76388-extern int wait_for_completion_killable(struct completion *x);
76389+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
76390+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
76391 extern unsigned long wait_for_completion_timeout(struct completion *x,
76392- unsigned long timeout);
76393+ unsigned long timeout) __intentional_overflow(-1);
76394 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
76395- unsigned long timeout);
76396+ unsigned long timeout) __intentional_overflow(-1);
76397 extern long wait_for_completion_interruptible_timeout(
76398- struct completion *x, unsigned long timeout);
76399+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
76400 extern long wait_for_completion_killable_timeout(
76401- struct completion *x, unsigned long timeout);
76402+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
76403 extern bool try_wait_for_completion(struct completion *x);
76404 extern bool completion_done(struct completion *x);
76405
76406diff --git a/include/linux/configfs.h b/include/linux/configfs.h
76407index 34025df..d94bbbc 100644
76408--- a/include/linux/configfs.h
76409+++ b/include/linux/configfs.h
76410@@ -125,7 +125,7 @@ struct configfs_attribute {
76411 const char *ca_name;
76412 struct module *ca_owner;
76413 umode_t ca_mode;
76414-};
76415+} __do_const;
76416
76417 /*
76418 * Users often need to create attribute structures for their configurable
76419diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
76420index dc196bb..c55a50f 100644
76421--- a/include/linux/cpufreq.h
76422+++ b/include/linux/cpufreq.h
76423@@ -189,6 +189,7 @@ struct global_attr {
76424 ssize_t (*store)(struct kobject *a, struct attribute *b,
76425 const char *c, size_t count);
76426 };
76427+typedef struct global_attr __no_const global_attr_no_const;
76428
76429 #define define_one_global_ro(_name) \
76430 static struct global_attr _name = \
76431@@ -225,7 +226,7 @@ struct cpufreq_driver {
76432 int (*suspend) (struct cpufreq_policy *policy);
76433 int (*resume) (struct cpufreq_policy *policy);
76434 struct freq_attr **attr;
76435-};
76436+} __do_const;
76437
76438 /* flags */
76439 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
76440diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
76441index 50fcbb0..9d2dbd9 100644
76442--- a/include/linux/cpuidle.h
76443+++ b/include/linux/cpuidle.h
76444@@ -50,7 +50,8 @@ struct cpuidle_state {
76445 int index);
76446
76447 int (*enter_dead) (struct cpuidle_device *dev, int index);
76448-};
76449+} __do_const;
76450+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
76451
76452 /* Idle State Flags */
76453 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
76454@@ -192,7 +193,7 @@ struct cpuidle_governor {
76455 void (*reflect) (struct cpuidle_device *dev, int index);
76456
76457 struct module *owner;
76458-};
76459+} __do_const;
76460
76461 #ifdef CONFIG_CPU_IDLE
76462 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
76463diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
76464index d08e4d2..95fad61 100644
76465--- a/include/linux/cpumask.h
76466+++ b/include/linux/cpumask.h
76467@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
76468 }
76469
76470 /* Valid inputs for n are -1 and 0. */
76471-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
76472+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
76473 {
76474 return n+1;
76475 }
76476
76477-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
76478+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
76479 {
76480 return n+1;
76481 }
76482
76483-static inline unsigned int cpumask_next_and(int n,
76484+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
76485 const struct cpumask *srcp,
76486 const struct cpumask *andp)
76487 {
76488@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
76489 *
76490 * Returns >= nr_cpu_ids if no further cpus set.
76491 */
76492-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
76493+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
76494 {
76495 /* -1 is a legal arg here. */
76496 if (n != -1)
76497@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
76498 *
76499 * Returns >= nr_cpu_ids if no further cpus unset.
76500 */
76501-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
76502+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
76503 {
76504 /* -1 is a legal arg here. */
76505 if (n != -1)
76506@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
76507 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
76508 }
76509
76510-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
76511+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
76512 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
76513
76514 /**
76515diff --git a/include/linux/cred.h b/include/linux/cred.h
76516index 04421e8..117e17a 100644
76517--- a/include/linux/cred.h
76518+++ b/include/linux/cred.h
76519@@ -35,7 +35,7 @@ struct group_info {
76520 int nblocks;
76521 kgid_t small_block[NGROUPS_SMALL];
76522 kgid_t *blocks[0];
76523-};
76524+} __randomize_layout;
76525
76526 /**
76527 * get_group_info - Get a reference to a group info structure
76528@@ -136,7 +136,7 @@ struct cred {
76529 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
76530 struct group_info *group_info; /* supplementary groups for euid/fsgid */
76531 struct rcu_head rcu; /* RCU deletion hook */
76532-};
76533+} __randomize_layout;
76534
76535 extern void __put_cred(struct cred *);
76536 extern void exit_creds(struct task_struct *);
76537@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
76538 static inline void validate_process_creds(void)
76539 {
76540 }
76541+static inline void validate_task_creds(struct task_struct *task)
76542+{
76543+}
76544 #endif
76545
76546 /**
76547diff --git a/include/linux/crypto.h b/include/linux/crypto.h
76548index b92eadf..b4ecdc1 100644
76549--- a/include/linux/crypto.h
76550+++ b/include/linux/crypto.h
76551@@ -373,7 +373,7 @@ struct cipher_tfm {
76552 const u8 *key, unsigned int keylen);
76553 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
76554 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
76555-};
76556+} __no_const;
76557
76558 struct hash_tfm {
76559 int (*init)(struct hash_desc *desc);
76560@@ -394,13 +394,13 @@ struct compress_tfm {
76561 int (*cot_decompress)(struct crypto_tfm *tfm,
76562 const u8 *src, unsigned int slen,
76563 u8 *dst, unsigned int *dlen);
76564-};
76565+} __no_const;
76566
76567 struct rng_tfm {
76568 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
76569 unsigned int dlen);
76570 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
76571-};
76572+} __no_const;
76573
76574 #define crt_ablkcipher crt_u.ablkcipher
76575 #define crt_aead crt_u.aead
76576diff --git a/include/linux/ctype.h b/include/linux/ctype.h
76577index 653589e..4ef254a 100644
76578--- a/include/linux/ctype.h
76579+++ b/include/linux/ctype.h
76580@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
76581 * Fast implementation of tolower() for internal usage. Do not use in your
76582 * code.
76583 */
76584-static inline char _tolower(const char c)
76585+static inline unsigned char _tolower(const unsigned char c)
76586 {
76587 return c | 0x20;
76588 }
76589diff --git a/include/linux/dcache.h b/include/linux/dcache.h
76590index bf72e9a..4ca7927 100644
76591--- a/include/linux/dcache.h
76592+++ b/include/linux/dcache.h
76593@@ -133,7 +133,7 @@ struct dentry {
76594 } d_u;
76595 struct list_head d_subdirs; /* our children */
76596 struct hlist_node d_alias; /* inode alias list */
76597-};
76598+} __randomize_layout;
76599
76600 /*
76601 * dentry->d_lock spinlock nesting subclasses:
76602diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
76603index 7925bf0..d5143d2 100644
76604--- a/include/linux/decompress/mm.h
76605+++ b/include/linux/decompress/mm.h
76606@@ -77,7 +77,7 @@ static void free(void *where)
76607 * warnings when not needed (indeed large_malloc / large_free are not
76608 * needed by inflate */
76609
76610-#define malloc(a) kmalloc(a, GFP_KERNEL)
76611+#define malloc(a) kmalloc((a), GFP_KERNEL)
76612 #define free(a) kfree(a)
76613
76614 #define large_malloc(a) vmalloc(a)
76615diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
76616index d48dc00..211ee54 100644
76617--- a/include/linux/devfreq.h
76618+++ b/include/linux/devfreq.h
76619@@ -114,7 +114,7 @@ struct devfreq_governor {
76620 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
76621 int (*event_handler)(struct devfreq *devfreq,
76622 unsigned int event, void *data);
76623-};
76624+} __do_const;
76625
76626 /**
76627 * struct devfreq - Device devfreq structure
76628diff --git a/include/linux/device.h b/include/linux/device.h
76629index 952b010..d5b7691 100644
76630--- a/include/linux/device.h
76631+++ b/include/linux/device.h
76632@@ -310,7 +310,7 @@ struct subsys_interface {
76633 struct list_head node;
76634 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
76635 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
76636-};
76637+} __do_const;
76638
76639 int subsys_interface_register(struct subsys_interface *sif);
76640 void subsys_interface_unregister(struct subsys_interface *sif);
76641@@ -506,7 +506,7 @@ struct device_type {
76642 void (*release)(struct device *dev);
76643
76644 const struct dev_pm_ops *pm;
76645-};
76646+} __do_const;
76647
76648 /* interface for exporting device attributes */
76649 struct device_attribute {
76650@@ -516,11 +516,12 @@ struct device_attribute {
76651 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
76652 const char *buf, size_t count);
76653 };
76654+typedef struct device_attribute __no_const device_attribute_no_const;
76655
76656 struct dev_ext_attribute {
76657 struct device_attribute attr;
76658 void *var;
76659-};
76660+} __do_const;
76661
76662 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
76663 char *buf);
76664diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
76665index fd4aee2..1f28db9 100644
76666--- a/include/linux/dma-mapping.h
76667+++ b/include/linux/dma-mapping.h
76668@@ -54,7 +54,7 @@ struct dma_map_ops {
76669 u64 (*get_required_mask)(struct device *dev);
76670 #endif
76671 int is_phys;
76672-};
76673+} __do_const;
76674
76675 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
76676
76677diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
76678index 41cf0c3..f3b771c 100644
76679--- a/include/linux/dmaengine.h
76680+++ b/include/linux/dmaengine.h
76681@@ -1114,9 +1114,9 @@ struct dma_pinned_list {
76682 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
76683 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
76684
76685-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
76686+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
76687 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
76688-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
76689+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
76690 struct dma_pinned_list *pinned_list, struct page *page,
76691 unsigned int offset, size_t len);
76692
76693diff --git a/include/linux/efi.h b/include/linux/efi.h
76694index 11ce678..7b8c69c 100644
76695--- a/include/linux/efi.h
76696+++ b/include/linux/efi.h
76697@@ -764,6 +764,7 @@ struct efivar_operations {
76698 efi_set_variable_t *set_variable;
76699 efi_query_variable_store_t *query_variable_store;
76700 };
76701+typedef struct efivar_operations __no_const efivar_operations_no_const;
76702
76703 struct efivars {
76704 /*
76705diff --git a/include/linux/elf.h b/include/linux/elf.h
76706index 67a5fa7..b817372 100644
76707--- a/include/linux/elf.h
76708+++ b/include/linux/elf.h
76709@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
76710 #define elf_note elf32_note
76711 #define elf_addr_t Elf32_Off
76712 #define Elf_Half Elf32_Half
76713+#define elf_dyn Elf32_Dyn
76714
76715 #else
76716
76717@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
76718 #define elf_note elf64_note
76719 #define elf_addr_t Elf64_Off
76720 #define Elf_Half Elf64_Half
76721+#define elf_dyn Elf64_Dyn
76722
76723 #endif
76724
76725diff --git a/include/linux/err.h b/include/linux/err.h
76726index 15f92e0..e825a8e 100644
76727--- a/include/linux/err.h
76728+++ b/include/linux/err.h
76729@@ -19,12 +19,12 @@
76730
76731 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
76732
76733-static inline void * __must_check ERR_PTR(long error)
76734+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
76735 {
76736 return (void *) error;
76737 }
76738
76739-static inline long __must_check PTR_ERR(__force const void *ptr)
76740+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
76741 {
76742 return (long) ptr;
76743 }
76744diff --git a/include/linux/extcon.h b/include/linux/extcon.h
76745index 21c59af..6057a03 100644
76746--- a/include/linux/extcon.h
76747+++ b/include/linux/extcon.h
76748@@ -135,7 +135,7 @@ struct extcon_dev {
76749 /* /sys/class/extcon/.../mutually_exclusive/... */
76750 struct attribute_group attr_g_muex;
76751 struct attribute **attrs_muex;
76752- struct device_attribute *d_attrs_muex;
76753+ device_attribute_no_const *d_attrs_muex;
76754 };
76755
76756 /**
76757diff --git a/include/linux/fb.h b/include/linux/fb.h
76758index 70c4836..ff3daec 100644
76759--- a/include/linux/fb.h
76760+++ b/include/linux/fb.h
76761@@ -304,7 +304,7 @@ struct fb_ops {
76762 /* called at KDB enter and leave time to prepare the console */
76763 int (*fb_debug_enter)(struct fb_info *info);
76764 int (*fb_debug_leave)(struct fb_info *info);
76765-};
76766+} __do_const;
76767
76768 #ifdef CONFIG_FB_TILEBLITTING
76769 #define FB_TILE_CURSOR_NONE 0
76770diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
76771index 085197b..0fa6f0b 100644
76772--- a/include/linux/fdtable.h
76773+++ b/include/linux/fdtable.h
76774@@ -95,7 +95,7 @@ struct files_struct *get_files_struct(struct task_struct *);
76775 void put_files_struct(struct files_struct *fs);
76776 void reset_files_struct(struct files_struct *);
76777 int unshare_files(struct files_struct **);
76778-struct files_struct *dup_fd(struct files_struct *, int *);
76779+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
76780 void do_close_on_exec(struct files_struct *);
76781 int iterate_fd(struct files_struct *, unsigned,
76782 int (*)(const void *, struct file *, unsigned),
76783diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
76784index 8293262..2b3b8bd 100644
76785--- a/include/linux/frontswap.h
76786+++ b/include/linux/frontswap.h
76787@@ -11,7 +11,7 @@ struct frontswap_ops {
76788 int (*load)(unsigned, pgoff_t, struct page *);
76789 void (*invalidate_page)(unsigned, pgoff_t);
76790 void (*invalidate_area)(unsigned);
76791-};
76792+} __no_const;
76793
76794 extern bool frontswap_enabled;
76795 extern struct frontswap_ops *
76796diff --git a/include/linux/fs.h b/include/linux/fs.h
76797index 121f11f..0f2a863 100644
76798--- a/include/linux/fs.h
76799+++ b/include/linux/fs.h
76800@@ -423,7 +423,7 @@ struct address_space {
76801 spinlock_t private_lock; /* for use by the address_space */
76802 struct list_head private_list; /* ditto */
76803 void *private_data; /* ditto */
76804-} __attribute__((aligned(sizeof(long))));
76805+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
76806 /*
76807 * On most architectures that alignment is already the case; but
76808 * must be enforced here for CRIS, to let the least significant bit
76809@@ -466,7 +466,7 @@ struct block_device {
76810 int bd_fsfreeze_count;
76811 /* Mutex for freeze */
76812 struct mutex bd_fsfreeze_mutex;
76813-};
76814+} __randomize_layout;
76815
76816 /*
76817 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
76818@@ -610,7 +610,7 @@ struct inode {
76819 atomic_t i_readcount; /* struct files open RO */
76820 #endif
76821 void *i_private; /* fs or device private pointer */
76822-};
76823+} __randomize_layout;
76824
76825 static inline int inode_unhashed(struct inode *inode)
76826 {
76827@@ -808,7 +808,7 @@ struct file {
76828 #ifdef CONFIG_DEBUG_WRITECOUNT
76829 unsigned long f_mnt_write_state;
76830 #endif
76831-};
76832+} __randomize_layout;
76833
76834 struct file_handle {
76835 __u32 handle_bytes;
76836@@ -978,7 +978,7 @@ struct file_lock {
76837 int state; /* state of grant or error if -ve */
76838 } afs;
76839 } fl_u;
76840-};
76841+} __randomize_layout;
76842
76843 /* The following constant reflects the upper bound of the file/locking space */
76844 #ifndef OFFSET_MAX
76845@@ -1325,7 +1325,7 @@ struct super_block {
76846 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
76847 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
76848 struct rcu_head rcu;
76849-};
76850+} __randomize_layout;
76851
76852 extern struct timespec current_fs_time(struct super_block *sb);
76853
76854@@ -1547,7 +1547,8 @@ struct file_operations {
76855 long (*fallocate)(struct file *file, int mode, loff_t offset,
76856 loff_t len);
76857 int (*show_fdinfo)(struct seq_file *m, struct file *f);
76858-};
76859+} __do_const __randomize_layout;
76860+typedef struct file_operations __no_const file_operations_no_const;
76861
76862 struct inode_operations {
76863 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
76864@@ -2808,4 +2809,14 @@ static inline bool dir_relax(struct inode *inode)
76865 return !IS_DEADDIR(inode);
76866 }
76867
76868+static inline bool is_sidechannel_device(const struct inode *inode)
76869+{
76870+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
76871+ umode_t mode = inode->i_mode;
76872+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
76873+#else
76874+ return false;
76875+#endif
76876+}
76877+
76878 #endif /* _LINUX_FS_H */
76879diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
76880index 0efc3e6..fd23610 100644
76881--- a/include/linux/fs_struct.h
76882+++ b/include/linux/fs_struct.h
76883@@ -6,13 +6,13 @@
76884 #include <linux/seqlock.h>
76885
76886 struct fs_struct {
76887- int users;
76888+ atomic_t users;
76889 spinlock_t lock;
76890 seqcount_t seq;
76891 int umask;
76892 int in_exec;
76893 struct path root, pwd;
76894-};
76895+} __randomize_layout;
76896
76897 extern struct kmem_cache *fs_cachep;
76898
76899diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
76900index 7714849..a4a5c7a 100644
76901--- a/include/linux/fscache-cache.h
76902+++ b/include/linux/fscache-cache.h
76903@@ -113,7 +113,7 @@ struct fscache_operation {
76904 fscache_operation_release_t release;
76905 };
76906
76907-extern atomic_t fscache_op_debug_id;
76908+extern atomic_unchecked_t fscache_op_debug_id;
76909 extern void fscache_op_work_func(struct work_struct *work);
76910
76911 extern void fscache_enqueue_operation(struct fscache_operation *);
76912@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
76913 INIT_WORK(&op->work, fscache_op_work_func);
76914 atomic_set(&op->usage, 1);
76915 op->state = FSCACHE_OP_ST_INITIALISED;
76916- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
76917+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
76918 op->processor = processor;
76919 op->release = release;
76920 INIT_LIST_HEAD(&op->pend_link);
76921diff --git a/include/linux/fscache.h b/include/linux/fscache.h
76922index 115bb81..e7b812b 100644
76923--- a/include/linux/fscache.h
76924+++ b/include/linux/fscache.h
76925@@ -152,7 +152,7 @@ struct fscache_cookie_def {
76926 * - this is mandatory for any object that may have data
76927 */
76928 void (*now_uncached)(void *cookie_netfs_data);
76929-};
76930+} __do_const;
76931
76932 /*
76933 * fscache cached network filesystem type
76934diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
76935index 1c804b0..1432c2b 100644
76936--- a/include/linux/fsnotify.h
76937+++ b/include/linux/fsnotify.h
76938@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
76939 struct inode *inode = file_inode(file);
76940 __u32 mask = FS_ACCESS;
76941
76942+ if (is_sidechannel_device(inode))
76943+ return;
76944+
76945 if (S_ISDIR(inode->i_mode))
76946 mask |= FS_ISDIR;
76947
76948@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
76949 struct inode *inode = file_inode(file);
76950 __u32 mask = FS_MODIFY;
76951
76952+ if (is_sidechannel_device(inode))
76953+ return;
76954+
76955 if (S_ISDIR(inode->i_mode))
76956 mask |= FS_ISDIR;
76957
76958@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
76959 */
76960 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
76961 {
76962- return kstrdup(name, GFP_KERNEL);
76963+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
76964 }
76965
76966 /*
76967diff --git a/include/linux/genhd.h b/include/linux/genhd.h
76968index 9f3c275..8bdff5d 100644
76969--- a/include/linux/genhd.h
76970+++ b/include/linux/genhd.h
76971@@ -194,7 +194,7 @@ struct gendisk {
76972 struct kobject *slave_dir;
76973
76974 struct timer_rand_state *random;
76975- atomic_t sync_io; /* RAID */
76976+ atomic_unchecked_t sync_io; /* RAID */
76977 struct disk_events *ev;
76978 #ifdef CONFIG_BLK_DEV_INTEGRITY
76979 struct blk_integrity *integrity;
76980@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
76981 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
76982
76983 /* drivers/char/random.c */
76984-extern void add_disk_randomness(struct gendisk *disk);
76985+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
76986 extern void rand_initialize_disk(struct gendisk *disk);
76987
76988 static inline sector_t get_start_sect(struct block_device *bdev)
76989diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
76990index c0894dd..2fbf10c 100644
76991--- a/include/linux/genl_magic_func.h
76992+++ b/include/linux/genl_magic_func.h
76993@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
76994 },
76995
76996 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
76997-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
76998+static struct genl_ops ZZZ_genl_ops[] = {
76999 #include GENL_MAGIC_INCLUDE_FILE
77000 };
77001
77002diff --git a/include/linux/gfp.h b/include/linux/gfp.h
77003index 9b4dd49..61fd41d 100644
77004--- a/include/linux/gfp.h
77005+++ b/include/linux/gfp.h
77006@@ -35,6 +35,13 @@ struct vm_area_struct;
77007 #define ___GFP_NO_KSWAPD 0x400000u
77008 #define ___GFP_OTHER_NODE 0x800000u
77009 #define ___GFP_WRITE 0x1000000u
77010+
77011+#ifdef CONFIG_PAX_USERCOPY_SLABS
77012+#define ___GFP_USERCOPY 0x2000000u
77013+#else
77014+#define ___GFP_USERCOPY 0
77015+#endif
77016+
77017 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
77018
77019 /*
77020@@ -92,6 +99,7 @@ struct vm_area_struct;
77021 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
77022 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
77023 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
77024+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
77025
77026 /*
77027 * This may seem redundant, but it's a way of annotating false positives vs.
77028@@ -99,7 +107,7 @@ struct vm_area_struct;
77029 */
77030 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
77031
77032-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
77033+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
77034 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
77035
77036 /* This equals 0, but use constants in case they ever change */
77037@@ -153,6 +161,8 @@ struct vm_area_struct;
77038 /* 4GB DMA on some platforms */
77039 #define GFP_DMA32 __GFP_DMA32
77040
77041+#define GFP_USERCOPY __GFP_USERCOPY
77042+
77043 /* Convert GFP flags to their corresponding migrate type */
77044 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
77045 {
77046diff --git a/include/linux/gracl.h b/include/linux/gracl.h
77047new file mode 100644
77048index 0000000..edb2cb6
77049--- /dev/null
77050+++ b/include/linux/gracl.h
77051@@ -0,0 +1,340 @@
77052+#ifndef GR_ACL_H
77053+#define GR_ACL_H
77054+
77055+#include <linux/grdefs.h>
77056+#include <linux/resource.h>
77057+#include <linux/capability.h>
77058+#include <linux/dcache.h>
77059+#include <asm/resource.h>
77060+
77061+/* Major status information */
77062+
77063+#define GR_VERSION "grsecurity 3.0"
77064+#define GRSECURITY_VERSION 0x3000
77065+
77066+enum {
77067+ GR_SHUTDOWN = 0,
77068+ GR_ENABLE = 1,
77069+ GR_SPROLE = 2,
77070+ GR_OLDRELOAD = 3,
77071+ GR_SEGVMOD = 4,
77072+ GR_STATUS = 5,
77073+ GR_UNSPROLE = 6,
77074+ GR_PASSSET = 7,
77075+ GR_SPROLEPAM = 8,
77076+ GR_RELOAD = 9,
77077+};
77078+
77079+/* Password setup definitions
77080+ * kernel/grhash.c */
77081+enum {
77082+ GR_PW_LEN = 128,
77083+ GR_SALT_LEN = 16,
77084+ GR_SHA_LEN = 32,
77085+};
77086+
77087+enum {
77088+ GR_SPROLE_LEN = 64,
77089+};
77090+
77091+enum {
77092+ GR_NO_GLOB = 0,
77093+ GR_REG_GLOB,
77094+ GR_CREATE_GLOB
77095+};
77096+
77097+#define GR_NLIMITS 32
77098+
77099+/* Begin Data Structures */
77100+
77101+struct sprole_pw {
77102+ unsigned char *rolename;
77103+ unsigned char salt[GR_SALT_LEN];
77104+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
77105+};
77106+
77107+struct name_entry {
77108+ __u32 key;
77109+ ino_t inode;
77110+ dev_t device;
77111+ char *name;
77112+ __u16 len;
77113+ __u8 deleted;
77114+ struct name_entry *prev;
77115+ struct name_entry *next;
77116+};
77117+
77118+struct inodev_entry {
77119+ struct name_entry *nentry;
77120+ struct inodev_entry *prev;
77121+ struct inodev_entry *next;
77122+};
77123+
77124+struct acl_role_db {
77125+ struct acl_role_label **r_hash;
77126+ __u32 r_size;
77127+};
77128+
77129+struct inodev_db {
77130+ struct inodev_entry **i_hash;
77131+ __u32 i_size;
77132+};
77133+
77134+struct name_db {
77135+ struct name_entry **n_hash;
77136+ __u32 n_size;
77137+};
77138+
77139+struct crash_uid {
77140+ uid_t uid;
77141+ unsigned long expires;
77142+};
77143+
77144+struct gr_hash_struct {
77145+ void **table;
77146+ void **nametable;
77147+ void *first;
77148+ __u32 table_size;
77149+ __u32 used_size;
77150+ int type;
77151+};
77152+
77153+/* Userspace Grsecurity ACL data structures */
77154+
77155+struct acl_subject_label {
77156+ char *filename;
77157+ ino_t inode;
77158+ dev_t device;
77159+ __u32 mode;
77160+ kernel_cap_t cap_mask;
77161+ kernel_cap_t cap_lower;
77162+ kernel_cap_t cap_invert_audit;
77163+
77164+ struct rlimit res[GR_NLIMITS];
77165+ __u32 resmask;
77166+
77167+ __u8 user_trans_type;
77168+ __u8 group_trans_type;
77169+ uid_t *user_transitions;
77170+ gid_t *group_transitions;
77171+ __u16 user_trans_num;
77172+ __u16 group_trans_num;
77173+
77174+ __u32 sock_families[2];
77175+ __u32 ip_proto[8];
77176+ __u32 ip_type;
77177+ struct acl_ip_label **ips;
77178+ __u32 ip_num;
77179+ __u32 inaddr_any_override;
77180+
77181+ __u32 crashes;
77182+ unsigned long expires;
77183+
77184+ struct acl_subject_label *parent_subject;
77185+ struct gr_hash_struct *hash;
77186+ struct acl_subject_label *prev;
77187+ struct acl_subject_label *next;
77188+
77189+ struct acl_object_label **obj_hash;
77190+ __u32 obj_hash_size;
77191+ __u16 pax_flags;
77192+};
77193+
77194+struct role_allowed_ip {
77195+ __u32 addr;
77196+ __u32 netmask;
77197+
77198+ struct role_allowed_ip *prev;
77199+ struct role_allowed_ip *next;
77200+};
77201+
77202+struct role_transition {
77203+ char *rolename;
77204+
77205+ struct role_transition *prev;
77206+ struct role_transition *next;
77207+};
77208+
77209+struct acl_role_label {
77210+ char *rolename;
77211+ uid_t uidgid;
77212+ __u16 roletype;
77213+
77214+ __u16 auth_attempts;
77215+ unsigned long expires;
77216+
77217+ struct acl_subject_label *root_label;
77218+ struct gr_hash_struct *hash;
77219+
77220+ struct acl_role_label *prev;
77221+ struct acl_role_label *next;
77222+
77223+ struct role_transition *transitions;
77224+ struct role_allowed_ip *allowed_ips;
77225+ uid_t *domain_children;
77226+ __u16 domain_child_num;
77227+
77228+ umode_t umask;
77229+
77230+ struct acl_subject_label **subj_hash;
77231+ __u32 subj_hash_size;
77232+};
77233+
77234+struct user_acl_role_db {
77235+ struct acl_role_label **r_table;
77236+ __u32 num_pointers; /* Number of allocations to track */
77237+ __u32 num_roles; /* Number of roles */
77238+ __u32 num_domain_children; /* Number of domain children */
77239+ __u32 num_subjects; /* Number of subjects */
77240+ __u32 num_objects; /* Number of objects */
77241+};
77242+
77243+struct acl_object_label {
77244+ char *filename;
77245+ ino_t inode;
77246+ dev_t device;
77247+ __u32 mode;
77248+
77249+ struct acl_subject_label *nested;
77250+ struct acl_object_label *globbed;
77251+
77252+ /* next two structures not used */
77253+
77254+ struct acl_object_label *prev;
77255+ struct acl_object_label *next;
77256+};
77257+
77258+struct acl_ip_label {
77259+ char *iface;
77260+ __u32 addr;
77261+ __u32 netmask;
77262+ __u16 low, high;
77263+ __u8 mode;
77264+ __u32 type;
77265+ __u32 proto[8];
77266+
77267+ /* next two structures not used */
77268+
77269+ struct acl_ip_label *prev;
77270+ struct acl_ip_label *next;
77271+};
77272+
77273+struct gr_arg {
77274+ struct user_acl_role_db role_db;
77275+ unsigned char pw[GR_PW_LEN];
77276+ unsigned char salt[GR_SALT_LEN];
77277+ unsigned char sum[GR_SHA_LEN];
77278+ unsigned char sp_role[GR_SPROLE_LEN];
77279+ struct sprole_pw *sprole_pws;
77280+ dev_t segv_device;
77281+ ino_t segv_inode;
77282+ uid_t segv_uid;
77283+ __u16 num_sprole_pws;
77284+ __u16 mode;
77285+};
77286+
77287+struct gr_arg_wrapper {
77288+ struct gr_arg *arg;
77289+ __u32 version;
77290+ __u32 size;
77291+};
77292+
77293+struct subject_map {
77294+ struct acl_subject_label *user;
77295+ struct acl_subject_label *kernel;
77296+ struct subject_map *prev;
77297+ struct subject_map *next;
77298+};
77299+
77300+struct acl_subj_map_db {
77301+ struct subject_map **s_hash;
77302+ __u32 s_size;
77303+};
77304+
77305+struct gr_policy_state {
77306+ struct sprole_pw **acl_special_roles;
77307+ __u16 num_sprole_pws;
77308+ struct acl_role_label *kernel_role;
77309+ struct acl_role_label *role_list;
77310+ struct acl_role_label *default_role;
77311+ struct acl_role_db acl_role_set;
77312+ struct acl_subj_map_db subj_map_set;
77313+ struct name_db name_set;
77314+ struct inodev_db inodev_set;
77315+};
77316+
77317+struct gr_alloc_state {
77318+ unsigned long alloc_stack_next;
77319+ unsigned long alloc_stack_size;
77320+ void **alloc_stack;
77321+};
77322+
77323+struct gr_reload_state {
77324+ struct gr_policy_state oldpolicy;
77325+ struct gr_alloc_state oldalloc;
77326+ struct gr_policy_state newpolicy;
77327+ struct gr_alloc_state newalloc;
77328+ struct gr_policy_state *oldpolicy_ptr;
77329+ struct gr_alloc_state *oldalloc_ptr;
77330+ unsigned char oldmode;
77331+};
77332+
77333+/* End Data Structures Section */
77334+
77335+/* Hash functions generated by empirical testing by Brad Spengler
77336+ Makes good use of the low bits of the inode. Generally 0-1 times
77337+ in loop for successful match. 0-3 for unsuccessful match.
77338+ Shift/add algorithm with modulus of table size and an XOR*/
77339+
77340+static __inline__ unsigned int
77341+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
77342+{
77343+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
77344+}
77345+
77346+ static __inline__ unsigned int
77347+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
77348+{
77349+ return ((const unsigned long)userp % sz);
77350+}
77351+
77352+static __inline__ unsigned int
77353+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
77354+{
77355+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
77356+}
77357+
77358+static __inline__ unsigned int
77359+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
77360+{
77361+ return full_name_hash((const unsigned char *)name, len) % sz;
77362+}
77363+
77364+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
77365+ subj = NULL; \
77366+ iter = 0; \
77367+ while (iter < role->subj_hash_size) { \
77368+ if (subj == NULL) \
77369+ subj = role->subj_hash[iter]; \
77370+ if (subj == NULL) { \
77371+ iter++; \
77372+ continue; \
77373+ }
77374+
77375+#define FOR_EACH_SUBJECT_END(subj,iter) \
77376+ subj = subj->next; \
77377+ if (subj == NULL) \
77378+ iter++; \
77379+ }
77380+
77381+
77382+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
77383+ subj = role->hash->first; \
77384+ while (subj != NULL) {
77385+
77386+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
77387+ subj = subj->next; \
77388+ }
77389+
77390+#endif
77391+
77392diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
77393new file mode 100644
77394index 0000000..33ebd1f
77395--- /dev/null
77396+++ b/include/linux/gracl_compat.h
77397@@ -0,0 +1,156 @@
77398+#ifndef GR_ACL_COMPAT_H
77399+#define GR_ACL_COMPAT_H
77400+
77401+#include <linux/resource.h>
77402+#include <asm/resource.h>
77403+
77404+struct sprole_pw_compat {
77405+ compat_uptr_t rolename;
77406+ unsigned char salt[GR_SALT_LEN];
77407+ unsigned char sum[GR_SHA_LEN];
77408+};
77409+
77410+struct gr_hash_struct_compat {
77411+ compat_uptr_t table;
77412+ compat_uptr_t nametable;
77413+ compat_uptr_t first;
77414+ __u32 table_size;
77415+ __u32 used_size;
77416+ int type;
77417+};
77418+
77419+struct acl_subject_label_compat {
77420+ compat_uptr_t filename;
77421+ compat_ino_t inode;
77422+ __u32 device;
77423+ __u32 mode;
77424+ kernel_cap_t cap_mask;
77425+ kernel_cap_t cap_lower;
77426+ kernel_cap_t cap_invert_audit;
77427+
77428+ struct compat_rlimit res[GR_NLIMITS];
77429+ __u32 resmask;
77430+
77431+ __u8 user_trans_type;
77432+ __u8 group_trans_type;
77433+ compat_uptr_t user_transitions;
77434+ compat_uptr_t group_transitions;
77435+ __u16 user_trans_num;
77436+ __u16 group_trans_num;
77437+
77438+ __u32 sock_families[2];
77439+ __u32 ip_proto[8];
77440+ __u32 ip_type;
77441+ compat_uptr_t ips;
77442+ __u32 ip_num;
77443+ __u32 inaddr_any_override;
77444+
77445+ __u32 crashes;
77446+ compat_ulong_t expires;
77447+
77448+ compat_uptr_t parent_subject;
77449+ compat_uptr_t hash;
77450+ compat_uptr_t prev;
77451+ compat_uptr_t next;
77452+
77453+ compat_uptr_t obj_hash;
77454+ __u32 obj_hash_size;
77455+ __u16 pax_flags;
77456+};
77457+
77458+struct role_allowed_ip_compat {
77459+ __u32 addr;
77460+ __u32 netmask;
77461+
77462+ compat_uptr_t prev;
77463+ compat_uptr_t next;
77464+};
77465+
77466+struct role_transition_compat {
77467+ compat_uptr_t rolename;
77468+
77469+ compat_uptr_t prev;
77470+ compat_uptr_t next;
77471+};
77472+
77473+struct acl_role_label_compat {
77474+ compat_uptr_t rolename;
77475+ uid_t uidgid;
77476+ __u16 roletype;
77477+
77478+ __u16 auth_attempts;
77479+ compat_ulong_t expires;
77480+
77481+ compat_uptr_t root_label;
77482+ compat_uptr_t hash;
77483+
77484+ compat_uptr_t prev;
77485+ compat_uptr_t next;
77486+
77487+ compat_uptr_t transitions;
77488+ compat_uptr_t allowed_ips;
77489+ compat_uptr_t domain_children;
77490+ __u16 domain_child_num;
77491+
77492+ umode_t umask;
77493+
77494+ compat_uptr_t subj_hash;
77495+ __u32 subj_hash_size;
77496+};
77497+
77498+struct user_acl_role_db_compat {
77499+ compat_uptr_t r_table;
77500+ __u32 num_pointers;
77501+ __u32 num_roles;
77502+ __u32 num_domain_children;
77503+ __u32 num_subjects;
77504+ __u32 num_objects;
77505+};
77506+
77507+struct acl_object_label_compat {
77508+ compat_uptr_t filename;
77509+ compat_ino_t inode;
77510+ __u32 device;
77511+ __u32 mode;
77512+
77513+ compat_uptr_t nested;
77514+ compat_uptr_t globbed;
77515+
77516+ compat_uptr_t prev;
77517+ compat_uptr_t next;
77518+};
77519+
77520+struct acl_ip_label_compat {
77521+ compat_uptr_t iface;
77522+ __u32 addr;
77523+ __u32 netmask;
77524+ __u16 low, high;
77525+ __u8 mode;
77526+ __u32 type;
77527+ __u32 proto[8];
77528+
77529+ compat_uptr_t prev;
77530+ compat_uptr_t next;
77531+};
77532+
77533+struct gr_arg_compat {
77534+ struct user_acl_role_db_compat role_db;
77535+ unsigned char pw[GR_PW_LEN];
77536+ unsigned char salt[GR_SALT_LEN];
77537+ unsigned char sum[GR_SHA_LEN];
77538+ unsigned char sp_role[GR_SPROLE_LEN];
77539+ compat_uptr_t sprole_pws;
77540+ __u32 segv_device;
77541+ compat_ino_t segv_inode;
77542+ uid_t segv_uid;
77543+ __u16 num_sprole_pws;
77544+ __u16 mode;
77545+};
77546+
77547+struct gr_arg_wrapper_compat {
77548+ compat_uptr_t arg;
77549+ __u32 version;
77550+ __u32 size;
77551+};
77552+
77553+#endif
77554diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
77555new file mode 100644
77556index 0000000..323ecf2
77557--- /dev/null
77558+++ b/include/linux/gralloc.h
77559@@ -0,0 +1,9 @@
77560+#ifndef __GRALLOC_H
77561+#define __GRALLOC_H
77562+
77563+void acl_free_all(void);
77564+int acl_alloc_stack_init(unsigned long size);
77565+void *acl_alloc(unsigned long len);
77566+void *acl_alloc_num(unsigned long num, unsigned long len);
77567+
77568+#endif
77569diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
77570new file mode 100644
77571index 0000000..be66033
77572--- /dev/null
77573+++ b/include/linux/grdefs.h
77574@@ -0,0 +1,140 @@
77575+#ifndef GRDEFS_H
77576+#define GRDEFS_H
77577+
77578+/* Begin grsecurity status declarations */
77579+
77580+enum {
77581+ GR_READY = 0x01,
77582+ GR_STATUS_INIT = 0x00 // disabled state
77583+};
77584+
77585+/* Begin ACL declarations */
77586+
77587+/* Role flags */
77588+
77589+enum {
77590+ GR_ROLE_USER = 0x0001,
77591+ GR_ROLE_GROUP = 0x0002,
77592+ GR_ROLE_DEFAULT = 0x0004,
77593+ GR_ROLE_SPECIAL = 0x0008,
77594+ GR_ROLE_AUTH = 0x0010,
77595+ GR_ROLE_NOPW = 0x0020,
77596+ GR_ROLE_GOD = 0x0040,
77597+ GR_ROLE_LEARN = 0x0080,
77598+ GR_ROLE_TPE = 0x0100,
77599+ GR_ROLE_DOMAIN = 0x0200,
77600+ GR_ROLE_PAM = 0x0400,
77601+ GR_ROLE_PERSIST = 0x0800
77602+};
77603+
77604+/* ACL Subject and Object mode flags */
77605+enum {
77606+ GR_DELETED = 0x80000000
77607+};
77608+
77609+/* ACL Object-only mode flags */
77610+enum {
77611+ GR_READ = 0x00000001,
77612+ GR_APPEND = 0x00000002,
77613+ GR_WRITE = 0x00000004,
77614+ GR_EXEC = 0x00000008,
77615+ GR_FIND = 0x00000010,
77616+ GR_INHERIT = 0x00000020,
77617+ GR_SETID = 0x00000040,
77618+ GR_CREATE = 0x00000080,
77619+ GR_DELETE = 0x00000100,
77620+ GR_LINK = 0x00000200,
77621+ GR_AUDIT_READ = 0x00000400,
77622+ GR_AUDIT_APPEND = 0x00000800,
77623+ GR_AUDIT_WRITE = 0x00001000,
77624+ GR_AUDIT_EXEC = 0x00002000,
77625+ GR_AUDIT_FIND = 0x00004000,
77626+ GR_AUDIT_INHERIT= 0x00008000,
77627+ GR_AUDIT_SETID = 0x00010000,
77628+ GR_AUDIT_CREATE = 0x00020000,
77629+ GR_AUDIT_DELETE = 0x00040000,
77630+ GR_AUDIT_LINK = 0x00080000,
77631+ GR_PTRACERD = 0x00100000,
77632+ GR_NOPTRACE = 0x00200000,
77633+ GR_SUPPRESS = 0x00400000,
77634+ GR_NOLEARN = 0x00800000,
77635+ GR_INIT_TRANSFER= 0x01000000
77636+};
77637+
77638+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
77639+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
77640+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
77641+
77642+/* ACL subject-only mode flags */
77643+enum {
77644+ GR_KILL = 0x00000001,
77645+ GR_VIEW = 0x00000002,
77646+ GR_PROTECTED = 0x00000004,
77647+ GR_LEARN = 0x00000008,
77648+ GR_OVERRIDE = 0x00000010,
77649+ /* just a placeholder, this mode is only used in userspace */
77650+ GR_DUMMY = 0x00000020,
77651+ GR_PROTSHM = 0x00000040,
77652+ GR_KILLPROC = 0x00000080,
77653+ GR_KILLIPPROC = 0x00000100,
77654+ /* just a placeholder, this mode is only used in userspace */
77655+ GR_NOTROJAN = 0x00000200,
77656+ GR_PROTPROCFD = 0x00000400,
77657+ GR_PROCACCT = 0x00000800,
77658+ GR_RELAXPTRACE = 0x00001000,
77659+ //GR_NESTED = 0x00002000,
77660+ GR_INHERITLEARN = 0x00004000,
77661+ GR_PROCFIND = 0x00008000,
77662+ GR_POVERRIDE = 0x00010000,
77663+ GR_KERNELAUTH = 0x00020000,
77664+ GR_ATSECURE = 0x00040000,
77665+ GR_SHMEXEC = 0x00080000
77666+};
77667+
77668+enum {
77669+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
77670+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
77671+ GR_PAX_ENABLE_MPROTECT = 0x0004,
77672+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
77673+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
77674+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
77675+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
77676+ GR_PAX_DISABLE_MPROTECT = 0x0400,
77677+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
77678+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
77679+};
77680+
77681+enum {
77682+ GR_ID_USER = 0x01,
77683+ GR_ID_GROUP = 0x02,
77684+};
77685+
77686+enum {
77687+ GR_ID_ALLOW = 0x01,
77688+ GR_ID_DENY = 0x02,
77689+};
77690+
77691+#define GR_CRASH_RES 31
77692+#define GR_UIDTABLE_MAX 500
77693+
77694+/* begin resource learning section */
77695+enum {
77696+ GR_RLIM_CPU_BUMP = 60,
77697+ GR_RLIM_FSIZE_BUMP = 50000,
77698+ GR_RLIM_DATA_BUMP = 10000,
77699+ GR_RLIM_STACK_BUMP = 1000,
77700+ GR_RLIM_CORE_BUMP = 10000,
77701+ GR_RLIM_RSS_BUMP = 500000,
77702+ GR_RLIM_NPROC_BUMP = 1,
77703+ GR_RLIM_NOFILE_BUMP = 5,
77704+ GR_RLIM_MEMLOCK_BUMP = 50000,
77705+ GR_RLIM_AS_BUMP = 500000,
77706+ GR_RLIM_LOCKS_BUMP = 2,
77707+ GR_RLIM_SIGPENDING_BUMP = 5,
77708+ GR_RLIM_MSGQUEUE_BUMP = 10000,
77709+ GR_RLIM_NICE_BUMP = 1,
77710+ GR_RLIM_RTPRIO_BUMP = 1,
77711+ GR_RLIM_RTTIME_BUMP = 1000000
77712+};
77713+
77714+#endif
77715diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
77716new file mode 100644
77717index 0000000..d25522e
77718--- /dev/null
77719+++ b/include/linux/grinternal.h
77720@@ -0,0 +1,229 @@
77721+#ifndef __GRINTERNAL_H
77722+#define __GRINTERNAL_H
77723+
77724+#ifdef CONFIG_GRKERNSEC
77725+
77726+#include <linux/fs.h>
77727+#include <linux/mnt_namespace.h>
77728+#include <linux/nsproxy.h>
77729+#include <linux/gracl.h>
77730+#include <linux/grdefs.h>
77731+#include <linux/grmsg.h>
77732+
77733+void gr_add_learn_entry(const char *fmt, ...)
77734+ __attribute__ ((format (printf, 1, 2)));
77735+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
77736+ const struct vfsmount *mnt);
77737+__u32 gr_check_create(const struct dentry *new_dentry,
77738+ const struct dentry *parent,
77739+ const struct vfsmount *mnt, const __u32 mode);
77740+int gr_check_protected_task(const struct task_struct *task);
77741+__u32 to_gr_audit(const __u32 reqmode);
77742+int gr_set_acls(const int type);
77743+int gr_acl_is_enabled(void);
77744+char gr_roletype_to_char(void);
77745+
77746+void gr_handle_alertkill(struct task_struct *task);
77747+char *gr_to_filename(const struct dentry *dentry,
77748+ const struct vfsmount *mnt);
77749+char *gr_to_filename1(const struct dentry *dentry,
77750+ const struct vfsmount *mnt);
77751+char *gr_to_filename2(const struct dentry *dentry,
77752+ const struct vfsmount *mnt);
77753+char *gr_to_filename3(const struct dentry *dentry,
77754+ const struct vfsmount *mnt);
77755+
77756+extern int grsec_enable_ptrace_readexec;
77757+extern int grsec_enable_harden_ptrace;
77758+extern int grsec_enable_link;
77759+extern int grsec_enable_fifo;
77760+extern int grsec_enable_execve;
77761+extern int grsec_enable_shm;
77762+extern int grsec_enable_execlog;
77763+extern int grsec_enable_signal;
77764+extern int grsec_enable_audit_ptrace;
77765+extern int grsec_enable_forkfail;
77766+extern int grsec_enable_time;
77767+extern int grsec_enable_rofs;
77768+extern int grsec_deny_new_usb;
77769+extern int grsec_enable_chroot_shmat;
77770+extern int grsec_enable_chroot_mount;
77771+extern int grsec_enable_chroot_double;
77772+extern int grsec_enable_chroot_pivot;
77773+extern int grsec_enable_chroot_chdir;
77774+extern int grsec_enable_chroot_chmod;
77775+extern int grsec_enable_chroot_mknod;
77776+extern int grsec_enable_chroot_fchdir;
77777+extern int grsec_enable_chroot_nice;
77778+extern int grsec_enable_chroot_execlog;
77779+extern int grsec_enable_chroot_caps;
77780+extern int grsec_enable_chroot_sysctl;
77781+extern int grsec_enable_chroot_unix;
77782+extern int grsec_enable_symlinkown;
77783+extern kgid_t grsec_symlinkown_gid;
77784+extern int grsec_enable_tpe;
77785+extern kgid_t grsec_tpe_gid;
77786+extern int grsec_enable_tpe_all;
77787+extern int grsec_enable_tpe_invert;
77788+extern int grsec_enable_socket_all;
77789+extern kgid_t grsec_socket_all_gid;
77790+extern int grsec_enable_socket_client;
77791+extern kgid_t grsec_socket_client_gid;
77792+extern int grsec_enable_socket_server;
77793+extern kgid_t grsec_socket_server_gid;
77794+extern kgid_t grsec_audit_gid;
77795+extern int grsec_enable_group;
77796+extern int grsec_enable_log_rwxmaps;
77797+extern int grsec_enable_mount;
77798+extern int grsec_enable_chdir;
77799+extern int grsec_resource_logging;
77800+extern int grsec_enable_blackhole;
77801+extern int grsec_lastack_retries;
77802+extern int grsec_enable_brute;
77803+extern int grsec_enable_harden_ipc;
77804+extern int grsec_lock;
77805+
77806+extern spinlock_t grsec_alert_lock;
77807+extern unsigned long grsec_alert_wtime;
77808+extern unsigned long grsec_alert_fyet;
77809+
77810+extern spinlock_t grsec_audit_lock;
77811+
77812+extern rwlock_t grsec_exec_file_lock;
77813+
77814+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
77815+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
77816+ (tsk)->exec_file->f_path.mnt) : "/")
77817+
77818+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
77819+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
77820+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
77821+
77822+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
77823+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
77824+ (tsk)->exec_file->f_path.mnt) : "/")
77825+
77826+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
77827+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
77828+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
77829+
77830+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
77831+
77832+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
77833+
77834+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
77835+{
77836+ if (file1 && file2) {
77837+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
77838+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
77839+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
77840+ return true;
77841+ }
77842+
77843+ return false;
77844+}
77845+
77846+#define GR_CHROOT_CAPS {{ \
77847+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
77848+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
77849+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
77850+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
77851+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
77852+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
77853+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
77854+
77855+#define security_learn(normal_msg,args...) \
77856+({ \
77857+ read_lock(&grsec_exec_file_lock); \
77858+ gr_add_learn_entry(normal_msg "\n", ## args); \
77859+ read_unlock(&grsec_exec_file_lock); \
77860+})
77861+
77862+enum {
77863+ GR_DO_AUDIT,
77864+ GR_DONT_AUDIT,
77865+ /* used for non-audit messages that we shouldn't kill the task on */
77866+ GR_DONT_AUDIT_GOOD
77867+};
77868+
77869+enum {
77870+ GR_TTYSNIFF,
77871+ GR_RBAC,
77872+ GR_RBAC_STR,
77873+ GR_STR_RBAC,
77874+ GR_RBAC_MODE2,
77875+ GR_RBAC_MODE3,
77876+ GR_FILENAME,
77877+ GR_SYSCTL_HIDDEN,
77878+ GR_NOARGS,
77879+ GR_ONE_INT,
77880+ GR_ONE_INT_TWO_STR,
77881+ GR_ONE_STR,
77882+ GR_STR_INT,
77883+ GR_TWO_STR_INT,
77884+ GR_TWO_INT,
77885+ GR_TWO_U64,
77886+ GR_THREE_INT,
77887+ GR_FIVE_INT_TWO_STR,
77888+ GR_TWO_STR,
77889+ GR_THREE_STR,
77890+ GR_FOUR_STR,
77891+ GR_STR_FILENAME,
77892+ GR_FILENAME_STR,
77893+ GR_FILENAME_TWO_INT,
77894+ GR_FILENAME_TWO_INT_STR,
77895+ GR_TEXTREL,
77896+ GR_PTRACE,
77897+ GR_RESOURCE,
77898+ GR_CAP,
77899+ GR_SIG,
77900+ GR_SIG2,
77901+ GR_CRASH1,
77902+ GR_CRASH2,
77903+ GR_PSACCT,
77904+ GR_RWXMAP,
77905+ GR_RWXMAPVMA
77906+};
77907+
77908+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
77909+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
77910+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
77911+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
77912+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
77913+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
77914+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
77915+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
77916+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
77917+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
77918+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
77919+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
77920+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
77921+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
77922+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
77923+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
77924+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
77925+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
77926+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
77927+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
77928+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
77929+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
77930+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
77931+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
77932+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
77933+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
77934+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
77935+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
77936+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
77937+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
77938+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
77939+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
77940+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
77941+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
77942+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
77943+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
77944+
77945+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
77946+
77947+#endif
77948+
77949+#endif
77950diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
77951new file mode 100644
77952index 0000000..ba93581
77953--- /dev/null
77954+++ b/include/linux/grmsg.h
77955@@ -0,0 +1,116 @@
77956+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
77957+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
77958+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
77959+#define GR_STOPMOD_MSG "denied modification of module state by "
77960+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
77961+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
77962+#define GR_IOPERM_MSG "denied use of ioperm() by "
77963+#define GR_IOPL_MSG "denied use of iopl() by "
77964+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
77965+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
77966+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
77967+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
77968+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
77969+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
77970+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
77971+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
77972+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
77973+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
77974+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
77975+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
77976+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
77977+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
77978+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
77979+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
77980+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
77981+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
77982+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
77983+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
77984+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
77985+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
77986+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
77987+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
77988+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
77989+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
77990+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
77991+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
77992+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
77993+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
77994+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
77995+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
77996+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
77997+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
77998+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
77999+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
78000+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
78001+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
78002+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
78003+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
78004+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
78005+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
78006+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
78007+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
78008+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
78009+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
78010+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
78011+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
78012+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
78013+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
78014+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
78015+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
78016+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
78017+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
78018+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
78019+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
78020+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
78021+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
78022+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
78023+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
78024+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
78025+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
78026+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
78027+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
78028+#define GR_FAILFORK_MSG "failed fork with errno %s by "
78029+#define GR_NICE_CHROOT_MSG "denied priority change by "
78030+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
78031+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
78032+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
78033+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
78034+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
78035+#define GR_TIME_MSG "time set by "
78036+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
78037+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
78038+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
78039+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
78040+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
78041+#define GR_BIND_MSG "denied bind() by "
78042+#define GR_CONNECT_MSG "denied connect() by "
78043+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
78044+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
78045+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
78046+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
78047+#define GR_CAP_ACL_MSG "use of %s denied for "
78048+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
78049+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
78050+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
78051+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
78052+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
78053+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
78054+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
78055+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
78056+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
78057+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
78058+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
78059+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
78060+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
78061+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
78062+#define GR_VM86_MSG "denied use of vm86 by "
78063+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
78064+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
78065+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
78066+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
78067+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
78068+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
78069+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
78070+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
78071+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
78072diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
78073new file mode 100644
78074index 0000000..8108301
78075--- /dev/null
78076+++ b/include/linux/grsecurity.h
78077@@ -0,0 +1,246 @@
78078+#ifndef GR_SECURITY_H
78079+#define GR_SECURITY_H
78080+#include <linux/fs.h>
78081+#include <linux/fs_struct.h>
78082+#include <linux/binfmts.h>
78083+#include <linux/gracl.h>
78084+
78085+/* notify of brain-dead configs */
78086+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78087+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
78088+#endif
78089+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
78090+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
78091+#endif
78092+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
78093+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
78094+#endif
78095+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
78096+#error "CONFIG_PAX enabled, but no PaX options are enabled."
78097+#endif
78098+
78099+int gr_handle_new_usb(void);
78100+
78101+void gr_handle_brute_attach(int dumpable);
78102+void gr_handle_brute_check(void);
78103+void gr_handle_kernel_exploit(void);
78104+
78105+char gr_roletype_to_char(void);
78106+
78107+int gr_acl_enable_at_secure(void);
78108+
78109+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
78110+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
78111+
78112+void gr_del_task_from_ip_table(struct task_struct *p);
78113+
78114+int gr_pid_is_chrooted(struct task_struct *p);
78115+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
78116+int gr_handle_chroot_nice(void);
78117+int gr_handle_chroot_sysctl(const int op);
78118+int gr_handle_chroot_setpriority(struct task_struct *p,
78119+ const int niceval);
78120+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
78121+int gr_handle_chroot_chroot(const struct dentry *dentry,
78122+ const struct vfsmount *mnt);
78123+void gr_handle_chroot_chdir(const struct path *path);
78124+int gr_handle_chroot_chmod(const struct dentry *dentry,
78125+ const struct vfsmount *mnt, const int mode);
78126+int gr_handle_chroot_mknod(const struct dentry *dentry,
78127+ const struct vfsmount *mnt, const int mode);
78128+int gr_handle_chroot_mount(const struct dentry *dentry,
78129+ const struct vfsmount *mnt,
78130+ const char *dev_name);
78131+int gr_handle_chroot_pivot(void);
78132+int gr_handle_chroot_unix(const pid_t pid);
78133+
78134+int gr_handle_rawio(const struct inode *inode);
78135+
78136+void gr_handle_ioperm(void);
78137+void gr_handle_iopl(void);
78138+void gr_handle_msr_write(void);
78139+
78140+umode_t gr_acl_umask(void);
78141+
78142+int gr_tpe_allow(const struct file *file);
78143+
78144+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
78145+void gr_clear_chroot_entries(struct task_struct *task);
78146+
78147+void gr_log_forkfail(const int retval);
78148+void gr_log_timechange(void);
78149+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
78150+void gr_log_chdir(const struct dentry *dentry,
78151+ const struct vfsmount *mnt);
78152+void gr_log_chroot_exec(const struct dentry *dentry,
78153+ const struct vfsmount *mnt);
78154+void gr_log_remount(const char *devname, const int retval);
78155+void gr_log_unmount(const char *devname, const int retval);
78156+void gr_log_mount(const char *from, const char *to, const int retval);
78157+void gr_log_textrel(struct vm_area_struct *vma);
78158+void gr_log_ptgnustack(struct file *file);
78159+void gr_log_rwxmmap(struct file *file);
78160+void gr_log_rwxmprotect(struct vm_area_struct *vma);
78161+
78162+int gr_handle_follow_link(const struct inode *parent,
78163+ const struct inode *inode,
78164+ const struct dentry *dentry,
78165+ const struct vfsmount *mnt);
78166+int gr_handle_fifo(const struct dentry *dentry,
78167+ const struct vfsmount *mnt,
78168+ const struct dentry *dir, const int flag,
78169+ const int acc_mode);
78170+int gr_handle_hardlink(const struct dentry *dentry,
78171+ const struct vfsmount *mnt,
78172+ struct inode *inode,
78173+ const int mode, const struct filename *to);
78174+
78175+int gr_is_capable(const int cap);
78176+int gr_is_capable_nolog(const int cap);
78177+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78178+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
78179+
78180+void gr_copy_label(struct task_struct *tsk);
78181+void gr_handle_crash(struct task_struct *task, const int sig);
78182+int gr_handle_signal(const struct task_struct *p, const int sig);
78183+int gr_check_crash_uid(const kuid_t uid);
78184+int gr_check_protected_task(const struct task_struct *task);
78185+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
78186+int gr_acl_handle_mmap(const struct file *file,
78187+ const unsigned long prot);
78188+int gr_acl_handle_mprotect(const struct file *file,
78189+ const unsigned long prot);
78190+int gr_check_hidden_task(const struct task_struct *tsk);
78191+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
78192+ const struct vfsmount *mnt);
78193+__u32 gr_acl_handle_utime(const struct dentry *dentry,
78194+ const struct vfsmount *mnt);
78195+__u32 gr_acl_handle_access(const struct dentry *dentry,
78196+ const struct vfsmount *mnt, const int fmode);
78197+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
78198+ const struct vfsmount *mnt, umode_t *mode);
78199+__u32 gr_acl_handle_chown(const struct dentry *dentry,
78200+ const struct vfsmount *mnt);
78201+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
78202+ const struct vfsmount *mnt);
78203+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
78204+ const struct vfsmount *mnt);
78205+int gr_handle_ptrace(struct task_struct *task, const long request);
78206+int gr_handle_proc_ptrace(struct task_struct *task);
78207+__u32 gr_acl_handle_execve(const struct dentry *dentry,
78208+ const struct vfsmount *mnt);
78209+int gr_check_crash_exec(const struct file *filp);
78210+int gr_acl_is_enabled(void);
78211+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
78212+ const kgid_t gid);
78213+int gr_set_proc_label(const struct dentry *dentry,
78214+ const struct vfsmount *mnt,
78215+ const int unsafe_flags);
78216+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
78217+ const struct vfsmount *mnt);
78218+__u32 gr_acl_handle_open(const struct dentry *dentry,
78219+ const struct vfsmount *mnt, int acc_mode);
78220+__u32 gr_acl_handle_creat(const struct dentry *dentry,
78221+ const struct dentry *p_dentry,
78222+ const struct vfsmount *p_mnt,
78223+ int open_flags, int acc_mode, const int imode);
78224+void gr_handle_create(const struct dentry *dentry,
78225+ const struct vfsmount *mnt);
78226+void gr_handle_proc_create(const struct dentry *dentry,
78227+ const struct inode *inode);
78228+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
78229+ const struct dentry *parent_dentry,
78230+ const struct vfsmount *parent_mnt,
78231+ const int mode);
78232+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
78233+ const struct dentry *parent_dentry,
78234+ const struct vfsmount *parent_mnt);
78235+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
78236+ const struct vfsmount *mnt);
78237+void gr_handle_delete(const ino_t ino, const dev_t dev);
78238+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
78239+ const struct vfsmount *mnt);
78240+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
78241+ const struct dentry *parent_dentry,
78242+ const struct vfsmount *parent_mnt,
78243+ const struct filename *from);
78244+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
78245+ const struct dentry *parent_dentry,
78246+ const struct vfsmount *parent_mnt,
78247+ const struct dentry *old_dentry,
78248+ const struct vfsmount *old_mnt, const struct filename *to);
78249+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
78250+int gr_acl_handle_rename(struct dentry *new_dentry,
78251+ struct dentry *parent_dentry,
78252+ const struct vfsmount *parent_mnt,
78253+ struct dentry *old_dentry,
78254+ struct inode *old_parent_inode,
78255+ struct vfsmount *old_mnt, const struct filename *newname);
78256+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
78257+ struct dentry *old_dentry,
78258+ struct dentry *new_dentry,
78259+ struct vfsmount *mnt, const __u8 replace);
78260+__u32 gr_check_link(const struct dentry *new_dentry,
78261+ const struct dentry *parent_dentry,
78262+ const struct vfsmount *parent_mnt,
78263+ const struct dentry *old_dentry,
78264+ const struct vfsmount *old_mnt);
78265+int gr_acl_handle_filldir(const struct file *file, const char *name,
78266+ const unsigned int namelen, const ino_t ino);
78267+
78268+__u32 gr_acl_handle_unix(const struct dentry *dentry,
78269+ const struct vfsmount *mnt);
78270+void gr_acl_handle_exit(void);
78271+void gr_acl_handle_psacct(struct task_struct *task, const long code);
78272+int gr_acl_handle_procpidmem(const struct task_struct *task);
78273+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
78274+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
78275+void gr_audit_ptrace(struct task_struct *task);
78276+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
78277+void gr_put_exec_file(struct task_struct *task);
78278+
78279+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
78280+
78281+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
78282+extern void gr_learn_resource(const struct task_struct *task, const int res,
78283+ const unsigned long wanted, const int gt);
78284+#else
78285+static inline void gr_learn_resource(const struct task_struct *task, const int res,
78286+ const unsigned long wanted, const int gt)
78287+{
78288+}
78289+#endif
78290+
78291+#ifdef CONFIG_GRKERNSEC_RESLOG
78292+extern void gr_log_resource(const struct task_struct *task, const int res,
78293+ const unsigned long wanted, const int gt);
78294+#else
78295+static inline void gr_log_resource(const struct task_struct *task, const int res,
78296+ const unsigned long wanted, const int gt)
78297+{
78298+}
78299+#endif
78300+
78301+#ifdef CONFIG_GRKERNSEC
78302+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
78303+void gr_handle_vm86(void);
78304+void gr_handle_mem_readwrite(u64 from, u64 to);
78305+
78306+void gr_log_badprocpid(const char *entry);
78307+
78308+extern int grsec_enable_dmesg;
78309+extern int grsec_disable_privio;
78310+
78311+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
78312+extern kgid_t grsec_proc_gid;
78313+#endif
78314+
78315+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
78316+extern int grsec_enable_chroot_findtask;
78317+#endif
78318+#ifdef CONFIG_GRKERNSEC_SETXID
78319+extern int grsec_enable_setxid;
78320+#endif
78321+#endif
78322+
78323+#endif
78324diff --git a/include/linux/grsock.h b/include/linux/grsock.h
78325new file mode 100644
78326index 0000000..e7ffaaf
78327--- /dev/null
78328+++ b/include/linux/grsock.h
78329@@ -0,0 +1,19 @@
78330+#ifndef __GRSOCK_H
78331+#define __GRSOCK_H
78332+
78333+extern void gr_attach_curr_ip(const struct sock *sk);
78334+extern int gr_handle_sock_all(const int family, const int type,
78335+ const int protocol);
78336+extern int gr_handle_sock_server(const struct sockaddr *sck);
78337+extern int gr_handle_sock_server_other(const struct sock *sck);
78338+extern int gr_handle_sock_client(const struct sockaddr *sck);
78339+extern int gr_search_connect(struct socket * sock,
78340+ struct sockaddr_in * addr);
78341+extern int gr_search_bind(struct socket * sock,
78342+ struct sockaddr_in * addr);
78343+extern int gr_search_listen(struct socket * sock);
78344+extern int gr_search_accept(struct socket * sock);
78345+extern int gr_search_socket(const int domain, const int type,
78346+ const int protocol);
78347+
78348+#endif
78349diff --git a/include/linux/highmem.h b/include/linux/highmem.h
78350index 7fb31da..08b5114 100644
78351--- a/include/linux/highmem.h
78352+++ b/include/linux/highmem.h
78353@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
78354 kunmap_atomic(kaddr);
78355 }
78356
78357+static inline void sanitize_highpage(struct page *page)
78358+{
78359+ void *kaddr;
78360+ unsigned long flags;
78361+
78362+ local_irq_save(flags);
78363+ kaddr = kmap_atomic(page);
78364+ clear_page(kaddr);
78365+ kunmap_atomic(kaddr);
78366+ local_irq_restore(flags);
78367+}
78368+
78369 static inline void zero_user_segments(struct page *page,
78370 unsigned start1, unsigned end1,
78371 unsigned start2, unsigned end2)
78372diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
78373index 1c7b89a..7dda400 100644
78374--- a/include/linux/hwmon-sysfs.h
78375+++ b/include/linux/hwmon-sysfs.h
78376@@ -25,7 +25,8 @@
78377 struct sensor_device_attribute{
78378 struct device_attribute dev_attr;
78379 int index;
78380-};
78381+} __do_const;
78382+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
78383 #define to_sensor_dev_attr(_dev_attr) \
78384 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
78385
78386@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
78387 struct device_attribute dev_attr;
78388 u8 index;
78389 u8 nr;
78390-};
78391+} __do_const;
78392+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
78393 #define to_sensor_dev_attr_2(_dev_attr) \
78394 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
78395
78396diff --git a/include/linux/i2c.h b/include/linux/i2c.h
78397index d9c8dbd3..def6e5a 100644
78398--- a/include/linux/i2c.h
78399+++ b/include/linux/i2c.h
78400@@ -364,6 +364,7 @@ struct i2c_algorithm {
78401 /* To determine what the adapter supports */
78402 u32 (*functionality) (struct i2c_adapter *);
78403 };
78404+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
78405
78406 /**
78407 * struct i2c_bus_recovery_info - I2C bus recovery information
78408diff --git a/include/linux/i2o.h b/include/linux/i2o.h
78409index d23c3c2..eb63c81 100644
78410--- a/include/linux/i2o.h
78411+++ b/include/linux/i2o.h
78412@@ -565,7 +565,7 @@ struct i2o_controller {
78413 struct i2o_device *exec; /* Executive */
78414 #if BITS_PER_LONG == 64
78415 spinlock_t context_list_lock; /* lock for context_list */
78416- atomic_t context_list_counter; /* needed for unique contexts */
78417+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
78418 struct list_head context_list; /* list of context id's
78419 and pointers */
78420 #endif
78421diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
78422index aff7ad8..3942bbd 100644
78423--- a/include/linux/if_pppox.h
78424+++ b/include/linux/if_pppox.h
78425@@ -76,7 +76,7 @@ struct pppox_proto {
78426 int (*ioctl)(struct socket *sock, unsigned int cmd,
78427 unsigned long arg);
78428 struct module *owner;
78429-};
78430+} __do_const;
78431
78432 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
78433 extern void unregister_pppox_proto(int proto_num);
78434diff --git a/include/linux/init.h b/include/linux/init.h
78435index 8e68a64..3f977a0 100644
78436--- a/include/linux/init.h
78437+++ b/include/linux/init.h
78438@@ -37,9 +37,17 @@
78439 * section.
78440 */
78441
78442+#define add_init_latent_entropy __latent_entropy
78443+
78444+#ifdef CONFIG_MEMORY_HOTPLUG
78445+#define add_meminit_latent_entropy
78446+#else
78447+#define add_meminit_latent_entropy __latent_entropy
78448+#endif
78449+
78450 /* These are for everybody (although not all archs will actually
78451 discard it in modules) */
78452-#define __init __section(.init.text) __cold notrace
78453+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
78454 #define __initdata __section(.init.data)
78455 #define __initconst __constsection(.init.rodata)
78456 #define __exitdata __section(.exit.data)
78457@@ -100,7 +108,7 @@
78458 #define __cpuexitconst
78459
78460 /* Used for MEMORY_HOTPLUG */
78461-#define __meminit __section(.meminit.text) __cold notrace
78462+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
78463 #define __meminitdata __section(.meminit.data)
78464 #define __meminitconst __constsection(.meminit.rodata)
78465 #define __memexit __section(.memexit.text) __exitused __cold notrace
78466diff --git a/include/linux/init_task.h b/include/linux/init_task.h
78467index b0ed422..d79ea23 100644
78468--- a/include/linux/init_task.h
78469+++ b/include/linux/init_task.h
78470@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
78471
78472 #define INIT_TASK_COMM "swapper"
78473
78474+#ifdef CONFIG_X86
78475+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
78476+#else
78477+#define INIT_TASK_THREAD_INFO
78478+#endif
78479+
78480 /*
78481 * INIT_TASK is used to set up the first task table, touch at
78482 * your own risk!. Base=0, limit=0x1fffff (=2MB)
78483@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
78484 RCU_POINTER_INITIALIZER(cred, &init_cred), \
78485 .comm = INIT_TASK_COMM, \
78486 .thread = INIT_THREAD, \
78487+ INIT_TASK_THREAD_INFO \
78488 .fs = &init_fs, \
78489 .files = &init_files, \
78490 .signal = &init_signals, \
78491diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
78492index db43b58..5d5084b 100644
78493--- a/include/linux/interrupt.h
78494+++ b/include/linux/interrupt.h
78495@@ -360,7 +360,7 @@ enum
78496 /* map softirq index to softirq name. update 'softirq_to_name' in
78497 * kernel/softirq.c when adding a new softirq.
78498 */
78499-extern char *softirq_to_name[NR_SOFTIRQS];
78500+extern const char * const softirq_to_name[NR_SOFTIRQS];
78501
78502 /* softirq mask and active fields moved to irq_cpustat_t in
78503 * asm/hardirq.h to get better cache usage. KAO
78504@@ -368,8 +368,8 @@ extern char *softirq_to_name[NR_SOFTIRQS];
78505
78506 struct softirq_action
78507 {
78508- void (*action)(struct softirq_action *);
78509-};
78510+ void (*action)(void);
78511+} __no_const;
78512
78513 asmlinkage void do_softirq(void);
78514 asmlinkage void __do_softirq(void);
78515@@ -383,7 +383,7 @@ static inline void do_softirq_own_stack(void)
78516 }
78517 #endif
78518
78519-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
78520+extern void open_softirq(int nr, void (*action)(void));
78521 extern void softirq_init(void);
78522 extern void __raise_softirq_irqoff(unsigned int nr);
78523
78524diff --git a/include/linux/iommu.h b/include/linux/iommu.h
78525index a444c79..8c41ea9 100644
78526--- a/include/linux/iommu.h
78527+++ b/include/linux/iommu.h
78528@@ -130,7 +130,7 @@ struct iommu_ops {
78529 u32 (*domain_get_windows)(struct iommu_domain *domain);
78530
78531 unsigned long pgsize_bitmap;
78532-};
78533+} __do_const;
78534
78535 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
78536 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
78537diff --git a/include/linux/ioport.h b/include/linux/ioport.h
78538index 89b7c24..382af74 100644
78539--- a/include/linux/ioport.h
78540+++ b/include/linux/ioport.h
78541@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
78542 int adjust_resource(struct resource *res, resource_size_t start,
78543 resource_size_t size);
78544 resource_size_t resource_alignment(struct resource *res);
78545-static inline resource_size_t resource_size(const struct resource *res)
78546+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
78547 {
78548 return res->end - res->start + 1;
78549 }
78550diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
78551index f6c82de..de8619e 100644
78552--- a/include/linux/ipc_namespace.h
78553+++ b/include/linux/ipc_namespace.h
78554@@ -70,7 +70,7 @@ struct ipc_namespace {
78555 struct user_namespace *user_ns;
78556
78557 unsigned int proc_inum;
78558-};
78559+} __randomize_layout;
78560
78561 extern struct ipc_namespace init_ipc_ns;
78562 extern atomic_t nr_ipc_ns;
78563diff --git a/include/linux/irq.h b/include/linux/irq.h
78564index 7dc1003..407327b 100644
78565--- a/include/linux/irq.h
78566+++ b/include/linux/irq.h
78567@@ -338,7 +338,8 @@ struct irq_chip {
78568 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
78569
78570 unsigned long flags;
78571-};
78572+} __do_const;
78573+typedef struct irq_chip __no_const irq_chip_no_const;
78574
78575 /*
78576 * irq_chip specific flags
78577diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
78578index cac496b..ffa0567 100644
78579--- a/include/linux/irqchip/arm-gic.h
78580+++ b/include/linux/irqchip/arm-gic.h
78581@@ -61,9 +61,11 @@
78582
78583 #ifndef __ASSEMBLY__
78584
78585+#include <linux/irq.h>
78586+
78587 struct device_node;
78588
78589-extern struct irq_chip gic_arch_extn;
78590+extern irq_chip_no_const gic_arch_extn;
78591
78592 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
78593 u32 offset, struct device_node *);
78594diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
78595index d235e88..8ccbe74 100644
78596--- a/include/linux/jiffies.h
78597+++ b/include/linux/jiffies.h
78598@@ -292,14 +292,14 @@ extern unsigned long preset_lpj;
78599 /*
78600 * Convert various time units to each other:
78601 */
78602-extern unsigned int jiffies_to_msecs(const unsigned long j);
78603-extern unsigned int jiffies_to_usecs(const unsigned long j);
78604-extern unsigned long msecs_to_jiffies(const unsigned int m);
78605-extern unsigned long usecs_to_jiffies(const unsigned int u);
78606-extern unsigned long timespec_to_jiffies(const struct timespec *value);
78607+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
78608+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
78609+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
78610+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
78611+extern unsigned long timespec_to_jiffies(const struct timespec *value) __intentional_overflow(-1);
78612 extern void jiffies_to_timespec(const unsigned long jiffies,
78613 struct timespec *value);
78614-extern unsigned long timeval_to_jiffies(const struct timeval *value);
78615+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
78616 extern void jiffies_to_timeval(const unsigned long jiffies,
78617 struct timeval *value);
78618
78619diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
78620index 6883e19..e854fcb 100644
78621--- a/include/linux/kallsyms.h
78622+++ b/include/linux/kallsyms.h
78623@@ -15,7 +15,8 @@
78624
78625 struct module;
78626
78627-#ifdef CONFIG_KALLSYMS
78628+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
78629+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
78630 /* Lookup the address for a symbol. Returns 0 if not found. */
78631 unsigned long kallsyms_lookup_name(const char *name);
78632
78633@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
78634 /* Stupid that this does nothing, but I didn't create this mess. */
78635 #define __print_symbol(fmt, addr)
78636 #endif /*CONFIG_KALLSYMS*/
78637+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
78638+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
78639+extern unsigned long kallsyms_lookup_name(const char *name);
78640+extern void __print_symbol(const char *fmt, unsigned long address);
78641+extern int sprint_backtrace(char *buffer, unsigned long address);
78642+extern int sprint_symbol(char *buffer, unsigned long address);
78643+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
78644+const char *kallsyms_lookup(unsigned long addr,
78645+ unsigned long *symbolsize,
78646+ unsigned long *offset,
78647+ char **modname, char *namebuf);
78648+extern int kallsyms_lookup_size_offset(unsigned long addr,
78649+ unsigned long *symbolsize,
78650+ unsigned long *offset);
78651+#endif
78652
78653 /* This macro allows us to keep printk typechecking */
78654 static __printf(1, 2)
78655diff --git a/include/linux/key-type.h b/include/linux/key-type.h
78656index a74c3a8..28d3f21 100644
78657--- a/include/linux/key-type.h
78658+++ b/include/linux/key-type.h
78659@@ -131,7 +131,7 @@ struct key_type {
78660 /* internal fields */
78661 struct list_head link; /* link in types list */
78662 struct lock_class_key lock_class; /* key->sem lock class */
78663-};
78664+} __do_const;
78665
78666 extern struct key_type key_type_keyring;
78667
78668diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
78669index dfb4f2f..7927e62 100644
78670--- a/include/linux/kgdb.h
78671+++ b/include/linux/kgdb.h
78672@@ -52,7 +52,7 @@ extern int kgdb_connected;
78673 extern int kgdb_io_module_registered;
78674
78675 extern atomic_t kgdb_setting_breakpoint;
78676-extern atomic_t kgdb_cpu_doing_single_step;
78677+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
78678
78679 extern struct task_struct *kgdb_usethread;
78680 extern struct task_struct *kgdb_contthread;
78681@@ -254,7 +254,7 @@ struct kgdb_arch {
78682 void (*correct_hw_break)(void);
78683
78684 void (*enable_nmi)(bool on);
78685-};
78686+} __do_const;
78687
78688 /**
78689 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
78690@@ -279,7 +279,7 @@ struct kgdb_io {
78691 void (*pre_exception) (void);
78692 void (*post_exception) (void);
78693 int is_console;
78694-};
78695+} __do_const;
78696
78697 extern struct kgdb_arch arch_kgdb_ops;
78698
78699diff --git a/include/linux/kmod.h b/include/linux/kmod.h
78700index 0555cc6..40116ce 100644
78701--- a/include/linux/kmod.h
78702+++ b/include/linux/kmod.h
78703@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
78704 * usually useless though. */
78705 extern __printf(2, 3)
78706 int __request_module(bool wait, const char *name, ...);
78707+extern __printf(3, 4)
78708+int ___request_module(bool wait, char *param_name, const char *name, ...);
78709 #define request_module(mod...) __request_module(true, mod)
78710 #define request_module_nowait(mod...) __request_module(false, mod)
78711 #define try_then_request_module(x, mod...) \
78712@@ -57,6 +59,9 @@ struct subprocess_info {
78713 struct work_struct work;
78714 struct completion *complete;
78715 char *path;
78716+#ifdef CONFIG_GRKERNSEC
78717+ char *origpath;
78718+#endif
78719 char **argv;
78720 char **envp;
78721 int wait;
78722diff --git a/include/linux/kobject.h b/include/linux/kobject.h
78723index e7ba650..0af3acb 100644
78724--- a/include/linux/kobject.h
78725+++ b/include/linux/kobject.h
78726@@ -116,7 +116,7 @@ struct kobj_type {
78727 struct attribute **default_attrs;
78728 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
78729 const void *(*namespace)(struct kobject *kobj);
78730-};
78731+} __do_const;
78732
78733 struct kobj_uevent_env {
78734 char *envp[UEVENT_NUM_ENVP];
78735@@ -139,6 +139,7 @@ struct kobj_attribute {
78736 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
78737 const char *buf, size_t count);
78738 };
78739+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
78740
78741 extern const struct sysfs_ops kobj_sysfs_ops;
78742
78743@@ -166,7 +167,7 @@ struct kset {
78744 spinlock_t list_lock;
78745 struct kobject kobj;
78746 const struct kset_uevent_ops *uevent_ops;
78747-};
78748+} __randomize_layout;
78749
78750 extern void kset_init(struct kset *kset);
78751 extern int __must_check kset_register(struct kset *kset);
78752diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
78753index df32d25..fb52e27 100644
78754--- a/include/linux/kobject_ns.h
78755+++ b/include/linux/kobject_ns.h
78756@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
78757 const void *(*netlink_ns)(struct sock *sk);
78758 const void *(*initial_ns)(void);
78759 void (*drop_ns)(void *);
78760-};
78761+} __do_const;
78762
78763 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
78764 int kobj_ns_type_registered(enum kobj_ns_type type);
78765diff --git a/include/linux/kref.h b/include/linux/kref.h
78766index 484604d..0f6c5b6 100644
78767--- a/include/linux/kref.h
78768+++ b/include/linux/kref.h
78769@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
78770 static inline int kref_sub(struct kref *kref, unsigned int count,
78771 void (*release)(struct kref *kref))
78772 {
78773- WARN_ON(release == NULL);
78774+ BUG_ON(release == NULL);
78775
78776 if (atomic_sub_and_test((int) count, &kref->refcount)) {
78777 release(kref);
78778diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
78779index 9523d2a..16c0424 100644
78780--- a/include/linux/kvm_host.h
78781+++ b/include/linux/kvm_host.h
78782@@ -457,7 +457,7 @@ static inline void kvm_irqfd_exit(void)
78783 {
78784 }
78785 #endif
78786-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78787+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
78788 struct module *module);
78789 void kvm_exit(void);
78790
78791@@ -632,7 +632,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
78792 struct kvm_guest_debug *dbg);
78793 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
78794
78795-int kvm_arch_init(void *opaque);
78796+int kvm_arch_init(const void *opaque);
78797 void kvm_arch_exit(void);
78798
78799 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
78800diff --git a/include/linux/libata.h b/include/linux/libata.h
78801index bec6dbe..2873d64 100644
78802--- a/include/linux/libata.h
78803+++ b/include/linux/libata.h
78804@@ -975,7 +975,7 @@ struct ata_port_operations {
78805 * fields must be pointers.
78806 */
78807 const struct ata_port_operations *inherits;
78808-};
78809+} __do_const;
78810
78811 struct ata_port_info {
78812 unsigned long flags;
78813diff --git a/include/linux/linkage.h b/include/linux/linkage.h
78814index d3e8ad2..a949f68 100644
78815--- a/include/linux/linkage.h
78816+++ b/include/linux/linkage.h
78817@@ -31,6 +31,7 @@
78818 #endif
78819
78820 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
78821+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
78822 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
78823
78824 /*
78825diff --git a/include/linux/list.h b/include/linux/list.h
78826index ef95941..82db65a 100644
78827--- a/include/linux/list.h
78828+++ b/include/linux/list.h
78829@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
78830 extern void list_del(struct list_head *entry);
78831 #endif
78832
78833+extern void __pax_list_add(struct list_head *new,
78834+ struct list_head *prev,
78835+ struct list_head *next);
78836+static inline void pax_list_add(struct list_head *new, struct list_head *head)
78837+{
78838+ __pax_list_add(new, head, head->next);
78839+}
78840+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
78841+{
78842+ __pax_list_add(new, head->prev, head);
78843+}
78844+extern void pax_list_del(struct list_head *entry);
78845+
78846 /**
78847 * list_replace - replace old entry by new one
78848 * @old : the element to be replaced
78849@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
78850 INIT_LIST_HEAD(entry);
78851 }
78852
78853+extern void pax_list_del_init(struct list_head *entry);
78854+
78855 /**
78856 * list_move - delete from one list and add as another's head
78857 * @list: the entry to move
78858diff --git a/include/linux/math64.h b/include/linux/math64.h
78859index c45c089..298841c 100644
78860--- a/include/linux/math64.h
78861+++ b/include/linux/math64.h
78862@@ -15,7 +15,7 @@
78863 * This is commonly provided by 32bit archs to provide an optimized 64bit
78864 * divide.
78865 */
78866-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
78867+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
78868 {
78869 *remainder = dividend % divisor;
78870 return dividend / divisor;
78871@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
78872 /**
78873 * div64_u64 - unsigned 64bit divide with 64bit divisor
78874 */
78875-static inline u64 div64_u64(u64 dividend, u64 divisor)
78876+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
78877 {
78878 return dividend / divisor;
78879 }
78880@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
78881 #define div64_ul(x, y) div_u64((x), (y))
78882
78883 #ifndef div_u64_rem
78884-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
78885+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
78886 {
78887 *remainder = do_div(dividend, divisor);
78888 return dividend;
78889@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
78890 #endif
78891
78892 #ifndef div64_u64
78893-extern u64 div64_u64(u64 dividend, u64 divisor);
78894+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
78895 #endif
78896
78897 #ifndef div64_s64
78898@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
78899 * divide.
78900 */
78901 #ifndef div_u64
78902-static inline u64 div_u64(u64 dividend, u32 divisor)
78903+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
78904 {
78905 u32 remainder;
78906 return div_u64_rem(dividend, divisor, &remainder);
78907diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
78908index 9fe426b..8148be6 100644
78909--- a/include/linux/mempolicy.h
78910+++ b/include/linux/mempolicy.h
78911@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
78912 }
78913
78914 #define vma_policy(vma) ((vma)->vm_policy)
78915+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
78916+{
78917+ vma->vm_policy = pol;
78918+}
78919
78920 static inline void mpol_get(struct mempolicy *pol)
78921 {
78922@@ -241,6 +245,9 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
78923 }
78924
78925 #define vma_policy(vma) NULL
78926+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
78927+{
78928+}
78929
78930 static inline int
78931 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
78932diff --git a/include/linux/mm.h b/include/linux/mm.h
78933index 9fac6dd..158ca43 100644
78934--- a/include/linux/mm.h
78935+++ b/include/linux/mm.h
78936@@ -117,6 +117,11 @@ extern unsigned int kobjsize(const void *objp);
78937 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
78938 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
78939 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
78940+
78941+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78942+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
78943+#endif
78944+
78945 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
78946
78947 #ifdef CONFIG_MEM_SOFT_DIRTY
78948@@ -219,8 +224,8 @@ struct vm_operations_struct {
78949 /* called by access_process_vm when get_user_pages() fails, typically
78950 * for use by special VMAs that can switch between memory and hardware
78951 */
78952- int (*access)(struct vm_area_struct *vma, unsigned long addr,
78953- void *buf, int len, int write);
78954+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
78955+ void *buf, size_t len, int write);
78956 #ifdef CONFIG_NUMA
78957 /*
78958 * set_policy() op must add a reference to any non-NULL @new mempolicy
78959@@ -250,6 +255,7 @@ struct vm_operations_struct {
78960 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
78961 unsigned long size, pgoff_t pgoff);
78962 };
78963+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
78964
78965 struct mmu_gather;
78966 struct inode;
78967@@ -1064,8 +1070,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
78968 unsigned long *pfn);
78969 int follow_phys(struct vm_area_struct *vma, unsigned long address,
78970 unsigned int flags, unsigned long *prot, resource_size_t *phys);
78971-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
78972- void *buf, int len, int write);
78973+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
78974+ void *buf, size_t len, int write);
78975
78976 static inline void unmap_shared_mapping_range(struct address_space *mapping,
78977 loff_t const holebegin, loff_t const holelen)
78978@@ -1104,9 +1110,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
78979 }
78980 #endif
78981
78982-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
78983-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
78984- void *buf, int len, int write);
78985+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
78986+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
78987+ void *buf, size_t len, int write);
78988
78989 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
78990 unsigned long start, unsigned long nr_pages,
78991@@ -1138,34 +1144,6 @@ int set_page_dirty(struct page *page);
78992 int set_page_dirty_lock(struct page *page);
78993 int clear_page_dirty_for_io(struct page *page);
78994
78995-/* Is the vma a continuation of the stack vma above it? */
78996-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
78997-{
78998- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
78999-}
79000-
79001-static inline int stack_guard_page_start(struct vm_area_struct *vma,
79002- unsigned long addr)
79003-{
79004- return (vma->vm_flags & VM_GROWSDOWN) &&
79005- (vma->vm_start == addr) &&
79006- !vma_growsdown(vma->vm_prev, addr);
79007-}
79008-
79009-/* Is the vma a continuation of the stack vma below it? */
79010-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
79011-{
79012- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
79013-}
79014-
79015-static inline int stack_guard_page_end(struct vm_area_struct *vma,
79016- unsigned long addr)
79017-{
79018- return (vma->vm_flags & VM_GROWSUP) &&
79019- (vma->vm_end == addr) &&
79020- !vma_growsup(vma->vm_next, addr);
79021-}
79022-
79023 extern pid_t
79024 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
79025
79026@@ -1265,6 +1243,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
79027 }
79028 #endif
79029
79030+#ifdef CONFIG_MMU
79031+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
79032+#else
79033+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
79034+{
79035+ return __pgprot(0);
79036+}
79037+#endif
79038+
79039 int vma_wants_writenotify(struct vm_area_struct *vma);
79040
79041 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
79042@@ -1283,8 +1270,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
79043 {
79044 return 0;
79045 }
79046+
79047+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
79048+ unsigned long address)
79049+{
79050+ return 0;
79051+}
79052 #else
79053 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
79054+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
79055 #endif
79056
79057 #ifdef __PAGETABLE_PMD_FOLDED
79058@@ -1293,8 +1287,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
79059 {
79060 return 0;
79061 }
79062+
79063+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
79064+ unsigned long address)
79065+{
79066+ return 0;
79067+}
79068 #else
79069 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
79070+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
79071 #endif
79072
79073 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
79074@@ -1312,11 +1313,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
79075 NULL: pud_offset(pgd, address);
79076 }
79077
79078+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
79079+{
79080+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
79081+ NULL: pud_offset(pgd, address);
79082+}
79083+
79084 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
79085 {
79086 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
79087 NULL: pmd_offset(pud, address);
79088 }
79089+
79090+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
79091+{
79092+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
79093+ NULL: pmd_offset(pud, address);
79094+}
79095 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
79096
79097 #if USE_SPLIT_PTE_PTLOCKS
79098@@ -1694,7 +1707,7 @@ extern int install_special_mapping(struct mm_struct *mm,
79099 unsigned long addr, unsigned long len,
79100 unsigned long flags, struct page **pages);
79101
79102-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
79103+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
79104
79105 extern unsigned long mmap_region(struct file *file, unsigned long addr,
79106 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
79107@@ -1702,6 +1715,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
79108 unsigned long len, unsigned long prot, unsigned long flags,
79109 unsigned long pgoff, unsigned long *populate);
79110 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
79111+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
79112
79113 #ifdef CONFIG_MMU
79114 extern int __mm_populate(unsigned long addr, unsigned long len,
79115@@ -1730,10 +1744,11 @@ struct vm_unmapped_area_info {
79116 unsigned long high_limit;
79117 unsigned long align_mask;
79118 unsigned long align_offset;
79119+ unsigned long threadstack_offset;
79120 };
79121
79122-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
79123-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
79124+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
79125+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
79126
79127 /*
79128 * Search for an unmapped address range.
79129@@ -1745,7 +1760,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
79130 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
79131 */
79132 static inline unsigned long
79133-vm_unmapped_area(struct vm_unmapped_area_info *info)
79134+vm_unmapped_area(const struct vm_unmapped_area_info *info)
79135 {
79136 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
79137 return unmapped_area(info);
79138@@ -1808,6 +1823,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
79139 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
79140 struct vm_area_struct **pprev);
79141
79142+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
79143+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
79144+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
79145+
79146 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
79147 NULL if none. Assume start_addr < end_addr. */
79148 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
79149@@ -1836,15 +1855,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
79150 return vma;
79151 }
79152
79153-#ifdef CONFIG_MMU
79154-pgprot_t vm_get_page_prot(unsigned long vm_flags);
79155-#else
79156-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
79157-{
79158- return __pgprot(0);
79159-}
79160-#endif
79161-
79162 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
79163 unsigned long change_prot_numa(struct vm_area_struct *vma,
79164 unsigned long start, unsigned long end);
79165@@ -1896,6 +1906,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
79166 static inline void vm_stat_account(struct mm_struct *mm,
79167 unsigned long flags, struct file *file, long pages)
79168 {
79169+
79170+#ifdef CONFIG_PAX_RANDMMAP
79171+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
79172+#endif
79173+
79174 mm->total_vm += pages;
79175 }
79176 #endif /* CONFIG_PROC_FS */
79177@@ -1977,7 +1992,7 @@ extern int unpoison_memory(unsigned long pfn);
79178 extern int sysctl_memory_failure_early_kill;
79179 extern int sysctl_memory_failure_recovery;
79180 extern void shake_page(struct page *p, int access);
79181-extern atomic_long_t num_poisoned_pages;
79182+extern atomic_long_unchecked_t num_poisoned_pages;
79183 extern int soft_offline_page(struct page *page, int flags);
79184
79185 extern void dump_page(struct page *page);
79186@@ -2014,5 +2029,11 @@ void __init setup_nr_node_ids(void);
79187 static inline void setup_nr_node_ids(void) {}
79188 #endif
79189
79190+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79191+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
79192+#else
79193+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
79194+#endif
79195+
79196 #endif /* __KERNEL__ */
79197 #endif /* _LINUX_MM_H */
79198diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
79199index 290901a..e99b01c 100644
79200--- a/include/linux/mm_types.h
79201+++ b/include/linux/mm_types.h
79202@@ -307,7 +307,9 @@ struct vm_area_struct {
79203 #ifdef CONFIG_NUMA
79204 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
79205 #endif
79206-};
79207+
79208+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
79209+} __randomize_layout;
79210
79211 struct core_thread {
79212 struct task_struct *task;
79213@@ -453,7 +455,25 @@ struct mm_struct {
79214 bool tlb_flush_pending;
79215 #endif
79216 struct uprobes_state uprobes_state;
79217-};
79218+
79219+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
79220+ unsigned long pax_flags;
79221+#endif
79222+
79223+#ifdef CONFIG_PAX_DLRESOLVE
79224+ unsigned long call_dl_resolve;
79225+#endif
79226+
79227+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
79228+ unsigned long call_syscall;
79229+#endif
79230+
79231+#ifdef CONFIG_PAX_ASLR
79232+ unsigned long delta_mmap; /* randomized offset */
79233+ unsigned long delta_stack; /* randomized offset */
79234+#endif
79235+
79236+} __randomize_layout;
79237
79238 static inline void mm_init_cpumask(struct mm_struct *mm)
79239 {
79240diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
79241index c5d5278..f0b68c8 100644
79242--- a/include/linux/mmiotrace.h
79243+++ b/include/linux/mmiotrace.h
79244@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
79245 /* Called from ioremap.c */
79246 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
79247 void __iomem *addr);
79248-extern void mmiotrace_iounmap(volatile void __iomem *addr);
79249+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
79250
79251 /* For anyone to insert markers. Remember trailing newline. */
79252 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
79253@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
79254 {
79255 }
79256
79257-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
79258+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
79259 {
79260 }
79261
79262diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
79263index bd791e4..8617c34f 100644
79264--- a/include/linux/mmzone.h
79265+++ b/include/linux/mmzone.h
79266@@ -396,7 +396,7 @@ struct zone {
79267 unsigned long flags; /* zone flags, see below */
79268
79269 /* Zone statistics */
79270- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79271+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79272
79273 /*
79274 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
79275diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
79276index 45e9214..a7227d6 100644
79277--- a/include/linux/mod_devicetable.h
79278+++ b/include/linux/mod_devicetable.h
79279@@ -13,7 +13,7 @@
79280 typedef unsigned long kernel_ulong_t;
79281 #endif
79282
79283-#define PCI_ANY_ID (~0)
79284+#define PCI_ANY_ID ((__u16)~0)
79285
79286 struct pci_device_id {
79287 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
79288@@ -139,7 +139,7 @@ struct usb_device_id {
79289 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
79290 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
79291
79292-#define HID_ANY_ID (~0)
79293+#define HID_ANY_ID (~0U)
79294 #define HID_BUS_ANY 0xffff
79295 #define HID_GROUP_ANY 0x0000
79296
79297@@ -467,7 +467,7 @@ struct dmi_system_id {
79298 const char *ident;
79299 struct dmi_strmatch matches[4];
79300 void *driver_data;
79301-};
79302+} __do_const;
79303 /*
79304 * struct dmi_device_id appears during expansion of
79305 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
79306diff --git a/include/linux/module.h b/include/linux/module.h
79307index 15cd6b1..f6e2e6a 100644
79308--- a/include/linux/module.h
79309+++ b/include/linux/module.h
79310@@ -17,9 +17,11 @@
79311 #include <linux/moduleparam.h>
79312 #include <linux/tracepoint.h>
79313 #include <linux/export.h>
79314+#include <linux/fs.h>
79315
79316 #include <linux/percpu.h>
79317 #include <asm/module.h>
79318+#include <asm/pgtable.h>
79319
79320 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
79321 #define MODULE_SIG_STRING "~Module signature appended~\n"
79322@@ -43,7 +45,7 @@ struct module_kobject {
79323 struct kobject *drivers_dir;
79324 struct module_param_attrs *mp;
79325 struct completion *kobj_completion;
79326-};
79327+} __randomize_layout;
79328
79329 struct module_attribute {
79330 struct attribute attr;
79331@@ -55,12 +57,13 @@ struct module_attribute {
79332 int (*test)(struct module *);
79333 void (*free)(struct module *);
79334 };
79335+typedef struct module_attribute __no_const module_attribute_no_const;
79336
79337 struct module_version_attribute {
79338 struct module_attribute mattr;
79339 const char *module_name;
79340 const char *version;
79341-} __attribute__ ((__aligned__(sizeof(void *))));
79342+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
79343
79344 extern ssize_t __modver_version_show(struct module_attribute *,
79345 struct module_kobject *, char *);
79346@@ -238,7 +241,7 @@ struct module
79347
79348 /* Sysfs stuff. */
79349 struct module_kobject mkobj;
79350- struct module_attribute *modinfo_attrs;
79351+ module_attribute_no_const *modinfo_attrs;
79352 const char *version;
79353 const char *srcversion;
79354 struct kobject *holders_dir;
79355@@ -287,19 +290,16 @@ struct module
79356 int (*init)(void);
79357
79358 /* If this is non-NULL, vfree after init() returns */
79359- void *module_init;
79360+ void *module_init_rx, *module_init_rw;
79361
79362 /* Here is the actual code + data, vfree'd on unload. */
79363- void *module_core;
79364+ void *module_core_rx, *module_core_rw;
79365
79366 /* Here are the sizes of the init and core sections */
79367- unsigned int init_size, core_size;
79368+ unsigned int init_size_rw, core_size_rw;
79369
79370 /* The size of the executable code in each section. */
79371- unsigned int init_text_size, core_text_size;
79372-
79373- /* Size of RO sections of the module (text+rodata) */
79374- unsigned int init_ro_size, core_ro_size;
79375+ unsigned int init_size_rx, core_size_rx;
79376
79377 /* Arch-specific module values */
79378 struct mod_arch_specific arch;
79379@@ -355,6 +355,10 @@ struct module
79380 #ifdef CONFIG_EVENT_TRACING
79381 struct ftrace_event_call **trace_events;
79382 unsigned int num_trace_events;
79383+ struct file_operations trace_id;
79384+ struct file_operations trace_enable;
79385+ struct file_operations trace_format;
79386+ struct file_operations trace_filter;
79387 #endif
79388 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
79389 unsigned int num_ftrace_callsites;
79390@@ -378,7 +382,7 @@ struct module
79391 ctor_fn_t *ctors;
79392 unsigned int num_ctors;
79393 #endif
79394-};
79395+} __randomize_layout;
79396 #ifndef MODULE_ARCH_INIT
79397 #define MODULE_ARCH_INIT {}
79398 #endif
79399@@ -399,16 +403,46 @@ bool is_module_address(unsigned long addr);
79400 bool is_module_percpu_address(unsigned long addr);
79401 bool is_module_text_address(unsigned long addr);
79402
79403+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
79404+{
79405+
79406+#ifdef CONFIG_PAX_KERNEXEC
79407+ if (ktla_ktva(addr) >= (unsigned long)start &&
79408+ ktla_ktva(addr) < (unsigned long)start + size)
79409+ return 1;
79410+#endif
79411+
79412+ return ((void *)addr >= start && (void *)addr < start + size);
79413+}
79414+
79415+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
79416+{
79417+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
79418+}
79419+
79420+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
79421+{
79422+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
79423+}
79424+
79425+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
79426+{
79427+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
79428+}
79429+
79430+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
79431+{
79432+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
79433+}
79434+
79435 static inline int within_module_core(unsigned long addr, const struct module *mod)
79436 {
79437- return (unsigned long)mod->module_core <= addr &&
79438- addr < (unsigned long)mod->module_core + mod->core_size;
79439+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
79440 }
79441
79442 static inline int within_module_init(unsigned long addr, const struct module *mod)
79443 {
79444- return (unsigned long)mod->module_init <= addr &&
79445- addr < (unsigned long)mod->module_init + mod->init_size;
79446+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
79447 }
79448
79449 /* Search for module by name: must hold module_mutex. */
79450diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
79451index 560ca53..ef621ef 100644
79452--- a/include/linux/moduleloader.h
79453+++ b/include/linux/moduleloader.h
79454@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
79455 sections. Returns NULL on failure. */
79456 void *module_alloc(unsigned long size);
79457
79458+#ifdef CONFIG_PAX_KERNEXEC
79459+void *module_alloc_exec(unsigned long size);
79460+#else
79461+#define module_alloc_exec(x) module_alloc(x)
79462+#endif
79463+
79464 /* Free memory returned from module_alloc. */
79465 void module_free(struct module *mod, void *module_region);
79466
79467+#ifdef CONFIG_PAX_KERNEXEC
79468+void module_free_exec(struct module *mod, void *module_region);
79469+#else
79470+#define module_free_exec(x, y) module_free((x), (y))
79471+#endif
79472+
79473 /*
79474 * Apply the given relocation to the (simplified) ELF. Return -error
79475 * or 0.
79476@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
79477 unsigned int relsec,
79478 struct module *me)
79479 {
79480+#ifdef CONFIG_MODULES
79481 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
79482+#endif
79483 return -ENOEXEC;
79484 }
79485 #endif
79486@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
79487 unsigned int relsec,
79488 struct module *me)
79489 {
79490+#ifdef CONFIG_MODULES
79491 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
79492+#endif
79493 return -ENOEXEC;
79494 }
79495 #endif
79496diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
79497index c3eb102..073c4a6 100644
79498--- a/include/linux/moduleparam.h
79499+++ b/include/linux/moduleparam.h
79500@@ -295,7 +295,7 @@ static inline void __kernel_param_unlock(void)
79501 * @len is usually just sizeof(string).
79502 */
79503 #define module_param_string(name, string, len, perm) \
79504- static const struct kparam_string __param_string_##name \
79505+ static const struct kparam_string __param_string_##name __used \
79506 = { len, string }; \
79507 __module_param_call(MODULE_PARAM_PREFIX, name, \
79508 &param_ops_string, \
79509@@ -434,7 +434,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
79510 */
79511 #define module_param_array_named(name, array, type, nump, perm) \
79512 param_check_##type(name, &(array)[0]); \
79513- static const struct kparam_array __param_arr_##name \
79514+ static const struct kparam_array __param_arr_##name __used \
79515 = { .max = ARRAY_SIZE(array), .num = nump, \
79516 .ops = &param_ops_##type, \
79517 .elemsize = sizeof(array[0]), .elem = array }; \
79518diff --git a/include/linux/mount.h b/include/linux/mount.h
79519index 371d346..fba2819 100644
79520--- a/include/linux/mount.h
79521+++ b/include/linux/mount.h
79522@@ -56,7 +56,7 @@ struct vfsmount {
79523 struct dentry *mnt_root; /* root of the mounted tree */
79524 struct super_block *mnt_sb; /* pointer to superblock */
79525 int mnt_flags;
79526-};
79527+} __randomize_layout;
79528
79529 struct file; /* forward dec */
79530
79531diff --git a/include/linux/namei.h b/include/linux/namei.h
79532index 492de72..1bddcd4 100644
79533--- a/include/linux/namei.h
79534+++ b/include/linux/namei.h
79535@@ -19,7 +19,7 @@ struct nameidata {
79536 unsigned seq, m_seq;
79537 int last_type;
79538 unsigned depth;
79539- char *saved_names[MAX_NESTED_LINKS + 1];
79540+ const char *saved_names[MAX_NESTED_LINKS + 1];
79541 };
79542
79543 /*
79544@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
79545
79546 extern void nd_jump_link(struct nameidata *nd, struct path *path);
79547
79548-static inline void nd_set_link(struct nameidata *nd, char *path)
79549+static inline void nd_set_link(struct nameidata *nd, const char *path)
79550 {
79551 nd->saved_names[nd->depth] = path;
79552 }
79553
79554-static inline char *nd_get_link(struct nameidata *nd)
79555+static inline const char *nd_get_link(const struct nameidata *nd)
79556 {
79557 return nd->saved_names[nd->depth];
79558 }
79559diff --git a/include/linux/net.h b/include/linux/net.h
79560index 69be3e6..0fb422d 100644
79561--- a/include/linux/net.h
79562+++ b/include/linux/net.h
79563@@ -192,7 +192,7 @@ struct net_proto_family {
79564 int (*create)(struct net *net, struct socket *sock,
79565 int protocol, int kern);
79566 struct module *owner;
79567-};
79568+} __do_const;
79569
79570 struct iovec;
79571 struct kvec;
79572diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
79573index ce2a1f5..cb9bc8c 100644
79574--- a/include/linux/netdevice.h
79575+++ b/include/linux/netdevice.h
79576@@ -1129,6 +1129,7 @@ struct net_device_ops {
79577 struct net_device *dev,
79578 void *priv);
79579 };
79580+typedef struct net_device_ops __no_const net_device_ops_no_const;
79581
79582 /*
79583 * The DEVICE structure.
79584@@ -1211,7 +1212,7 @@ struct net_device {
79585 int iflink;
79586
79587 struct net_device_stats stats;
79588- atomic_long_t rx_dropped; /* dropped packets by core network
79589+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
79590 * Do not use this in drivers.
79591 */
79592
79593diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
79594index 2077489..a15e561 100644
79595--- a/include/linux/netfilter.h
79596+++ b/include/linux/netfilter.h
79597@@ -84,7 +84,7 @@ struct nf_sockopt_ops {
79598 #endif
79599 /* Use the module struct to lock set/get code in place */
79600 struct module *owner;
79601-};
79602+} __do_const;
79603
79604 /* Function to register/unregister hook points. */
79605 int nf_register_hook(struct nf_hook_ops *reg);
79606diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
79607index 28c7436..2d6156a 100644
79608--- a/include/linux/netfilter/nfnetlink.h
79609+++ b/include/linux/netfilter/nfnetlink.h
79610@@ -19,7 +19,7 @@ struct nfnl_callback {
79611 const struct nlattr * const cda[]);
79612 const struct nla_policy *policy; /* netlink attribute policy */
79613 const u_int16_t attr_count; /* number of nlattr's */
79614-};
79615+} __do_const;
79616
79617 struct nfnetlink_subsystem {
79618 const char *name;
79619diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
79620new file mode 100644
79621index 0000000..33f4af8
79622--- /dev/null
79623+++ b/include/linux/netfilter/xt_gradm.h
79624@@ -0,0 +1,9 @@
79625+#ifndef _LINUX_NETFILTER_XT_GRADM_H
79626+#define _LINUX_NETFILTER_XT_GRADM_H 1
79627+
79628+struct xt_gradm_mtinfo {
79629+ __u16 flags;
79630+ __u16 invflags;
79631+};
79632+
79633+#endif
79634diff --git a/include/linux/nls.h b/include/linux/nls.h
79635index 5dc635f..35f5e11 100644
79636--- a/include/linux/nls.h
79637+++ b/include/linux/nls.h
79638@@ -31,7 +31,7 @@ struct nls_table {
79639 const unsigned char *charset2upper;
79640 struct module *owner;
79641 struct nls_table *next;
79642-};
79643+} __do_const;
79644
79645 /* this value hold the maximum octet of charset */
79646 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
79647diff --git a/include/linux/notifier.h b/include/linux/notifier.h
79648index d14a4c3..a078786 100644
79649--- a/include/linux/notifier.h
79650+++ b/include/linux/notifier.h
79651@@ -54,7 +54,8 @@ struct notifier_block {
79652 notifier_fn_t notifier_call;
79653 struct notifier_block __rcu *next;
79654 int priority;
79655-};
79656+} __do_const;
79657+typedef struct notifier_block __no_const notifier_block_no_const;
79658
79659 struct atomic_notifier_head {
79660 spinlock_t lock;
79661diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
79662index b2a0f15..4d7da32 100644
79663--- a/include/linux/oprofile.h
79664+++ b/include/linux/oprofile.h
79665@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
79666 int oprofilefs_create_ro_ulong(struct dentry * root,
79667 char const * name, ulong * val);
79668
79669-/** Create a file for read-only access to an atomic_t. */
79670+/** Create a file for read-only access to an atomic_unchecked_t. */
79671 int oprofilefs_create_ro_atomic(struct dentry * root,
79672- char const * name, atomic_t * val);
79673+ char const * name, atomic_unchecked_t * val);
79674
79675 /** create a directory */
79676 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
79677diff --git a/include/linux/padata.h b/include/linux/padata.h
79678index 4386946..f50c615 100644
79679--- a/include/linux/padata.h
79680+++ b/include/linux/padata.h
79681@@ -129,7 +129,7 @@ struct parallel_data {
79682 struct padata_serial_queue __percpu *squeue;
79683 atomic_t reorder_objects;
79684 atomic_t refcnt;
79685- atomic_t seq_nr;
79686+ atomic_unchecked_t seq_nr;
79687 struct padata_cpumask cpumask;
79688 spinlock_t lock ____cacheline_aligned;
79689 unsigned int processed;
79690diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
79691index a2e2f1d..8a391d2 100644
79692--- a/include/linux/pci_hotplug.h
79693+++ b/include/linux/pci_hotplug.h
79694@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
79695 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
79696 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
79697 int (*reset_slot) (struct hotplug_slot *slot, int probe);
79698-};
79699+} __do_const;
79700+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
79701
79702 /**
79703 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
79704diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
79705index 2e069d1..27054b8 100644
79706--- a/include/linux/perf_event.h
79707+++ b/include/linux/perf_event.h
79708@@ -327,8 +327,8 @@ struct perf_event {
79709
79710 enum perf_event_active_state state;
79711 unsigned int attach_state;
79712- local64_t count;
79713- atomic64_t child_count;
79714+ local64_t count; /* PaX: fix it one day */
79715+ atomic64_unchecked_t child_count;
79716
79717 /*
79718 * These are the total time in nanoseconds that the event
79719@@ -379,8 +379,8 @@ struct perf_event {
79720 * These accumulate total time (in nanoseconds) that children
79721 * events have been enabled and running, respectively.
79722 */
79723- atomic64_t child_total_time_enabled;
79724- atomic64_t child_total_time_running;
79725+ atomic64_unchecked_t child_total_time_enabled;
79726+ atomic64_unchecked_t child_total_time_running;
79727
79728 /*
79729 * Protect attach/detach and child_list:
79730@@ -707,7 +707,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
79731 entry->ip[entry->nr++] = ip;
79732 }
79733
79734-extern int sysctl_perf_event_paranoid;
79735+extern int sysctl_perf_event_legitimately_concerned;
79736 extern int sysctl_perf_event_mlock;
79737 extern int sysctl_perf_event_sample_rate;
79738 extern int sysctl_perf_cpu_time_max_percent;
79739@@ -722,19 +722,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
79740 loff_t *ppos);
79741
79742
79743+static inline bool perf_paranoid_any(void)
79744+{
79745+ return sysctl_perf_event_legitimately_concerned > 2;
79746+}
79747+
79748 static inline bool perf_paranoid_tracepoint_raw(void)
79749 {
79750- return sysctl_perf_event_paranoid > -1;
79751+ return sysctl_perf_event_legitimately_concerned > -1;
79752 }
79753
79754 static inline bool perf_paranoid_cpu(void)
79755 {
79756- return sysctl_perf_event_paranoid > 0;
79757+ return sysctl_perf_event_legitimately_concerned > 0;
79758 }
79759
79760 static inline bool perf_paranoid_kernel(void)
79761 {
79762- return sysctl_perf_event_paranoid > 1;
79763+ return sysctl_perf_event_legitimately_concerned > 1;
79764 }
79765
79766 extern void perf_event_init(void);
79767@@ -850,7 +855,7 @@ struct perf_pmu_events_attr {
79768 struct device_attribute attr;
79769 u64 id;
79770 const char *event_str;
79771-};
79772+} __do_const;
79773
79774 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
79775 static struct perf_pmu_events_attr _var = { \
79776diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
79777index 7246ef3..1539ea4 100644
79778--- a/include/linux/pid_namespace.h
79779+++ b/include/linux/pid_namespace.h
79780@@ -43,7 +43,7 @@ struct pid_namespace {
79781 int hide_pid;
79782 int reboot; /* group exit code if this pidns was rebooted */
79783 unsigned int proc_inum;
79784-};
79785+} __randomize_layout;
79786
79787 extern struct pid_namespace init_pid_ns;
79788
79789diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
79790index b8809fe..ae4ccd0 100644
79791--- a/include/linux/pipe_fs_i.h
79792+++ b/include/linux/pipe_fs_i.h
79793@@ -47,10 +47,10 @@ struct pipe_inode_info {
79794 struct mutex mutex;
79795 wait_queue_head_t wait;
79796 unsigned int nrbufs, curbuf, buffers;
79797- unsigned int readers;
79798- unsigned int writers;
79799- unsigned int files;
79800- unsigned int waiting_writers;
79801+ atomic_t readers;
79802+ atomic_t writers;
79803+ atomic_t files;
79804+ atomic_t waiting_writers;
79805 unsigned int r_counter;
79806 unsigned int w_counter;
79807 struct page *tmp_page;
79808diff --git a/include/linux/pm.h b/include/linux/pm.h
79809index a224c7f..92d8a97 100644
79810--- a/include/linux/pm.h
79811+++ b/include/linux/pm.h
79812@@ -576,6 +576,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
79813 struct dev_pm_domain {
79814 struct dev_pm_ops ops;
79815 };
79816+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
79817
79818 /*
79819 * The PM_EVENT_ messages are also used by drivers implementing the legacy
79820diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
79821index 7c1d252..c5c773e 100644
79822--- a/include/linux/pm_domain.h
79823+++ b/include/linux/pm_domain.h
79824@@ -48,7 +48,7 @@ struct gpd_dev_ops {
79825
79826 struct gpd_cpu_data {
79827 unsigned int saved_exit_latency;
79828- struct cpuidle_state *idle_state;
79829+ cpuidle_state_no_const *idle_state;
79830 };
79831
79832 struct generic_pm_domain {
79833diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
79834index 6fa7cea..7bf6415 100644
79835--- a/include/linux/pm_runtime.h
79836+++ b/include/linux/pm_runtime.h
79837@@ -103,7 +103,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
79838
79839 static inline void pm_runtime_mark_last_busy(struct device *dev)
79840 {
79841- ACCESS_ONCE(dev->power.last_busy) = jiffies;
79842+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
79843 }
79844
79845 #else /* !CONFIG_PM_RUNTIME */
79846diff --git a/include/linux/pnp.h b/include/linux/pnp.h
79847index 195aafc..49a7bc2 100644
79848--- a/include/linux/pnp.h
79849+++ b/include/linux/pnp.h
79850@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
79851 struct pnp_fixup {
79852 char id[7];
79853 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
79854-};
79855+} __do_const;
79856
79857 /* config parameters */
79858 #define PNP_CONFIG_NORMAL 0x0001
79859diff --git a/include/linux/poison.h b/include/linux/poison.h
79860index 2110a81..13a11bb 100644
79861--- a/include/linux/poison.h
79862+++ b/include/linux/poison.h
79863@@ -19,8 +19,8 @@
79864 * under normal circumstances, used to verify that nobody uses
79865 * non-initialized list entries.
79866 */
79867-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
79868-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
79869+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
79870+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
79871
79872 /********** include/linux/timer.h **********/
79873 /*
79874diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
79875index d8b187c3..9a9257a 100644
79876--- a/include/linux/power/smartreflex.h
79877+++ b/include/linux/power/smartreflex.h
79878@@ -238,7 +238,7 @@ struct omap_sr_class_data {
79879 int (*notify)(struct omap_sr *sr, u32 status);
79880 u8 notify_flags;
79881 u8 class_type;
79882-};
79883+} __do_const;
79884
79885 /**
79886 * struct omap_sr_nvalue_table - Smartreflex n-target value info
79887diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
79888index 4ea1d37..80f4b33 100644
79889--- a/include/linux/ppp-comp.h
79890+++ b/include/linux/ppp-comp.h
79891@@ -84,7 +84,7 @@ struct compressor {
79892 struct module *owner;
79893 /* Extra skb space needed by the compressor algorithm */
79894 unsigned int comp_extra;
79895-};
79896+} __do_const;
79897
79898 /*
79899 * The return value from decompress routine is the length of the
79900diff --git a/include/linux/preempt.h b/include/linux/preempt.h
79901index a3d9dc8..8af9922 100644
79902--- a/include/linux/preempt.h
79903+++ b/include/linux/preempt.h
79904@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
79905 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
79906 #endif
79907
79908+#define raw_preempt_count_add(val) __preempt_count_add(val)
79909+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
79910+
79911 #define __preempt_count_inc() __preempt_count_add(1)
79912 #define __preempt_count_dec() __preempt_count_sub(1)
79913
79914 #define preempt_count_inc() preempt_count_add(1)
79915+#define raw_preempt_count_inc() raw_preempt_count_add(1)
79916 #define preempt_count_dec() preempt_count_sub(1)
79917+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
79918
79919 #ifdef CONFIG_PREEMPT_COUNT
79920
79921@@ -41,6 +46,12 @@ do { \
79922 barrier(); \
79923 } while (0)
79924
79925+#define raw_preempt_disable() \
79926+do { \
79927+ raw_preempt_count_inc(); \
79928+ barrier(); \
79929+} while (0)
79930+
79931 #define sched_preempt_enable_no_resched() \
79932 do { \
79933 barrier(); \
79934@@ -49,6 +60,12 @@ do { \
79935
79936 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
79937
79938+#define raw_preempt_enable_no_resched() \
79939+do { \
79940+ barrier(); \
79941+ raw_preempt_count_dec(); \
79942+} while (0)
79943+
79944 #ifdef CONFIG_PREEMPT
79945 #define preempt_enable() \
79946 do { \
79947@@ -105,8 +122,10 @@ do { \
79948 * region.
79949 */
79950 #define preempt_disable() barrier()
79951+#define raw_preempt_disable() barrier()
79952 #define sched_preempt_enable_no_resched() barrier()
79953 #define preempt_enable_no_resched() barrier()
79954+#define raw_preempt_enable_no_resched() barrier()
79955 #define preempt_enable() barrier()
79956 #define preempt_check_resched() do { } while (0)
79957
79958diff --git a/include/linux/printk.h b/include/linux/printk.h
79959index 6949258..7c4730e 100644
79960--- a/include/linux/printk.h
79961+++ b/include/linux/printk.h
79962@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
79963 void early_printk(const char *s, ...) { }
79964 #endif
79965
79966+extern int kptr_restrict;
79967+
79968 #ifdef CONFIG_PRINTK
79969 asmlinkage __printf(5, 0)
79970 int vprintk_emit(int facility, int level,
79971@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
79972
79973 extern int printk_delay_msec;
79974 extern int dmesg_restrict;
79975-extern int kptr_restrict;
79976
79977 extern void wake_up_klogd(void);
79978
79979diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
79980index 608e60a..c26f864 100644
79981--- a/include/linux/proc_fs.h
79982+++ b/include/linux/proc_fs.h
79983@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
79984 return proc_create_data(name, mode, parent, proc_fops, NULL);
79985 }
79986
79987+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
79988+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
79989+{
79990+#ifdef CONFIG_GRKERNSEC_PROC_USER
79991+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
79992+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79993+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
79994+#else
79995+ return proc_create_data(name, mode, parent, proc_fops, NULL);
79996+#endif
79997+}
79998+
79999+
80000 extern void proc_set_size(struct proc_dir_entry *, loff_t);
80001 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
80002 extern void *PDE_DATA(const struct inode *);
80003diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
80004index 34a1e10..70f6bde 100644
80005--- a/include/linux/proc_ns.h
80006+++ b/include/linux/proc_ns.h
80007@@ -14,7 +14,7 @@ struct proc_ns_operations {
80008 void (*put)(void *ns);
80009 int (*install)(struct nsproxy *nsproxy, void *ns);
80010 unsigned int (*inum)(void *ns);
80011-};
80012+} __do_const __randomize_layout;
80013
80014 struct proc_ns {
80015 void *ns;
80016diff --git a/include/linux/quota.h b/include/linux/quota.h
80017index cc7494a..1e27036 100644
80018--- a/include/linux/quota.h
80019+++ b/include/linux/quota.h
80020@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
80021
80022 extern bool qid_eq(struct kqid left, struct kqid right);
80023 extern bool qid_lt(struct kqid left, struct kqid right);
80024-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
80025+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
80026 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
80027 extern bool qid_valid(struct kqid qid);
80028
80029diff --git a/include/linux/random.h b/include/linux/random.h
80030index 4002b3d..d5ad855 100644
80031--- a/include/linux/random.h
80032+++ b/include/linux/random.h
80033@@ -10,9 +10,19 @@
80034
80035
80036 extern void add_device_randomness(const void *, unsigned int);
80037+
80038+static inline void add_latent_entropy(void)
80039+{
80040+
80041+#ifdef LATENT_ENTROPY_PLUGIN
80042+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
80043+#endif
80044+
80045+}
80046+
80047 extern void add_input_randomness(unsigned int type, unsigned int code,
80048- unsigned int value);
80049-extern void add_interrupt_randomness(int irq, int irq_flags);
80050+ unsigned int value) __latent_entropy;
80051+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
80052
80053 extern void get_random_bytes(void *buf, int nbytes);
80054 extern void get_random_bytes_arch(void *buf, int nbytes);
80055@@ -23,10 +33,10 @@ extern int random_int_secret_init(void);
80056 extern const struct file_operations random_fops, urandom_fops;
80057 #endif
80058
80059-unsigned int get_random_int(void);
80060+unsigned int __intentional_overflow(-1) get_random_int(void);
80061 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
80062
80063-u32 prandom_u32(void);
80064+u32 prandom_u32(void) __intentional_overflow(-1);
80065 void prandom_bytes(void *buf, int nbytes);
80066 void prandom_seed(u32 seed);
80067 void prandom_reseed_late(void);
80068@@ -38,6 +48,11 @@ struct rnd_state {
80069 u32 prandom_u32_state(struct rnd_state *state);
80070 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
80071
80072+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
80073+{
80074+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
80075+}
80076+
80077 /*
80078 * Handle minimum values for seeds
80079 */
80080diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
80081index fea49b5..2ac22bb 100644
80082--- a/include/linux/rbtree_augmented.h
80083+++ b/include/linux/rbtree_augmented.h
80084@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
80085 old->rbaugmented = rbcompute(old); \
80086 } \
80087 rbstatic const struct rb_augment_callbacks rbname = { \
80088- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
80089+ .propagate = rbname ## _propagate, \
80090+ .copy = rbname ## _copy, \
80091+ .rotate = rbname ## _rotate \
80092 };
80093
80094
80095diff --git a/include/linux/rculist.h b/include/linux/rculist.h
80096index 45a0a9e..e83788e 100644
80097--- a/include/linux/rculist.h
80098+++ b/include/linux/rculist.h
80099@@ -29,8 +29,8 @@
80100 */
80101 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
80102 {
80103- ACCESS_ONCE(list->next) = list;
80104- ACCESS_ONCE(list->prev) = list;
80105+ ACCESS_ONCE_RW(list->next) = list;
80106+ ACCESS_ONCE_RW(list->prev) = list;
80107 }
80108
80109 /*
80110@@ -59,6 +59,9 @@ extern void __list_add_rcu(struct list_head *new,
80111 struct list_head *prev, struct list_head *next);
80112 #endif
80113
80114+extern void __pax_list_add_rcu(struct list_head *new,
80115+ struct list_head *prev, struct list_head *next);
80116+
80117 /**
80118 * list_add_rcu - add a new entry to rcu-protected list
80119 * @new: new entry to be added
80120@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
80121 __list_add_rcu(new, head, head->next);
80122 }
80123
80124+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
80125+{
80126+ __pax_list_add_rcu(new, head, head->next);
80127+}
80128+
80129 /**
80130 * list_add_tail_rcu - add a new entry to rcu-protected list
80131 * @new: new entry to be added
80132@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
80133 __list_add_rcu(new, head->prev, head);
80134 }
80135
80136+static inline void pax_list_add_tail_rcu(struct list_head *new,
80137+ struct list_head *head)
80138+{
80139+ __pax_list_add_rcu(new, head->prev, head);
80140+}
80141+
80142 /**
80143 * list_del_rcu - deletes entry from list without re-initialization
80144 * @entry: the element to delete from the list.
80145@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
80146 entry->prev = LIST_POISON2;
80147 }
80148
80149+extern void pax_list_del_rcu(struct list_head *entry);
80150+
80151 /**
80152 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
80153 * @n: the element to delete from the hash list.
80154diff --git a/include/linux/reboot.h b/include/linux/reboot.h
80155index 9e7db9e..7d4fd72 100644
80156--- a/include/linux/reboot.h
80157+++ b/include/linux/reboot.h
80158@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
80159 */
80160
80161 extern void migrate_to_reboot_cpu(void);
80162-extern void machine_restart(char *cmd);
80163-extern void machine_halt(void);
80164-extern void machine_power_off(void);
80165+extern void machine_restart(char *cmd) __noreturn;
80166+extern void machine_halt(void) __noreturn;
80167+extern void machine_power_off(void) __noreturn;
80168
80169 extern void machine_shutdown(void);
80170 struct pt_regs;
80171@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
80172 */
80173
80174 extern void kernel_restart_prepare(char *cmd);
80175-extern void kernel_restart(char *cmd);
80176-extern void kernel_halt(void);
80177-extern void kernel_power_off(void);
80178+extern void kernel_restart(char *cmd) __noreturn;
80179+extern void kernel_halt(void) __noreturn;
80180+extern void kernel_power_off(void) __noreturn;
80181
80182 extern int C_A_D; /* for sysctl */
80183 void ctrl_alt_del(void);
80184@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
80185 * Emergency restart, callable from an interrupt handler.
80186 */
80187
80188-extern void emergency_restart(void);
80189+extern void emergency_restart(void) __noreturn;
80190 #include <asm/emergency-restart.h>
80191
80192 #endif /* _LINUX_REBOOT_H */
80193diff --git a/include/linux/regset.h b/include/linux/regset.h
80194index 8e0c9fe..ac4d221 100644
80195--- a/include/linux/regset.h
80196+++ b/include/linux/regset.h
80197@@ -161,7 +161,8 @@ struct user_regset {
80198 unsigned int align;
80199 unsigned int bias;
80200 unsigned int core_note_type;
80201-};
80202+} __do_const;
80203+typedef struct user_regset __no_const user_regset_no_const;
80204
80205 /**
80206 * struct user_regset_view - available regsets
80207diff --git a/include/linux/relay.h b/include/linux/relay.h
80208index d7c8359..818daf5 100644
80209--- a/include/linux/relay.h
80210+++ b/include/linux/relay.h
80211@@ -157,7 +157,7 @@ struct rchan_callbacks
80212 * The callback should return 0 if successful, negative if not.
80213 */
80214 int (*remove_buf_file)(struct dentry *dentry);
80215-};
80216+} __no_const;
80217
80218 /*
80219 * CONFIG_RELAY kernel API, kernel/relay.c
80220diff --git a/include/linux/rio.h b/include/linux/rio.h
80221index b71d573..2f940bd 100644
80222--- a/include/linux/rio.h
80223+++ b/include/linux/rio.h
80224@@ -355,7 +355,7 @@ struct rio_ops {
80225 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
80226 u64 rstart, u32 size, u32 flags);
80227 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
80228-};
80229+} __no_const;
80230
80231 #define RIO_RESOURCE_MEM 0x00000100
80232 #define RIO_RESOURCE_DOORBELL 0x00000200
80233diff --git a/include/linux/rmap.h b/include/linux/rmap.h
80234index 6dacb93..6174423 100644
80235--- a/include/linux/rmap.h
80236+++ b/include/linux/rmap.h
80237@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
80238 void anon_vma_init(void); /* create anon_vma_cachep */
80239 int anon_vma_prepare(struct vm_area_struct *);
80240 void unlink_anon_vmas(struct vm_area_struct *);
80241-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
80242-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
80243+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
80244+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
80245
80246 static inline void anon_vma_merge(struct vm_area_struct *vma,
80247 struct vm_area_struct *next)
80248diff --git a/include/linux/sched.h b/include/linux/sched.h
80249index 53f97eb..1d90705 100644
80250--- a/include/linux/sched.h
80251+++ b/include/linux/sched.h
80252@@ -63,6 +63,7 @@ struct bio_list;
80253 struct fs_struct;
80254 struct perf_event_context;
80255 struct blk_plug;
80256+struct linux_binprm;
80257
80258 /*
80259 * List of flags we want to share for kernel threads,
80260@@ -304,7 +305,7 @@ extern char __sched_text_start[], __sched_text_end[];
80261 extern int in_sched_functions(unsigned long addr);
80262
80263 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
80264-extern signed long schedule_timeout(signed long timeout);
80265+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
80266 extern signed long schedule_timeout_interruptible(signed long timeout);
80267 extern signed long schedule_timeout_killable(signed long timeout);
80268 extern signed long schedule_timeout_uninterruptible(signed long timeout);
80269@@ -315,6 +316,19 @@ struct nsproxy;
80270 struct user_namespace;
80271
80272 #ifdef CONFIG_MMU
80273+
80274+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
80275+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
80276+#else
80277+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
80278+{
80279+ return 0;
80280+}
80281+#endif
80282+
80283+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
80284+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
80285+
80286 extern void arch_pick_mmap_layout(struct mm_struct *mm);
80287 extern unsigned long
80288 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
80289@@ -600,6 +614,17 @@ struct signal_struct {
80290 #ifdef CONFIG_TASKSTATS
80291 struct taskstats *stats;
80292 #endif
80293+
80294+#ifdef CONFIG_GRKERNSEC
80295+ u32 curr_ip;
80296+ u32 saved_ip;
80297+ u32 gr_saddr;
80298+ u32 gr_daddr;
80299+ u16 gr_sport;
80300+ u16 gr_dport;
80301+ u8 used_accept:1;
80302+#endif
80303+
80304 #ifdef CONFIG_AUDIT
80305 unsigned audit_tty;
80306 unsigned audit_tty_log_passwd;
80307@@ -626,7 +651,7 @@ struct signal_struct {
80308 struct mutex cred_guard_mutex; /* guard against foreign influences on
80309 * credential calculations
80310 * (notably. ptrace) */
80311-};
80312+} __randomize_layout;
80313
80314 /*
80315 * Bits in flags field of signal_struct.
80316@@ -680,6 +705,14 @@ struct user_struct {
80317 struct key *session_keyring; /* UID's default session keyring */
80318 #endif
80319
80320+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
80321+ unsigned char kernel_banned;
80322+#endif
80323+#ifdef CONFIG_GRKERNSEC_BRUTE
80324+ unsigned char suid_banned;
80325+ unsigned long suid_ban_expires;
80326+#endif
80327+
80328 /* Hash table maintenance information */
80329 struct hlist_node uidhash_node;
80330 kuid_t uid;
80331@@ -687,7 +720,7 @@ struct user_struct {
80332 #ifdef CONFIG_PERF_EVENTS
80333 atomic_long_t locked_vm;
80334 #endif
80335-};
80336+} __randomize_layout;
80337
80338 extern int uids_sysfs_init(void);
80339
80340@@ -1162,8 +1195,8 @@ struct task_struct {
80341 struct list_head thread_group;
80342
80343 struct completion *vfork_done; /* for vfork() */
80344- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
80345- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
80346+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
80347+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
80348
80349 cputime_t utime, stime, utimescaled, stimescaled;
80350 cputime_t gtime;
80351@@ -1188,11 +1221,6 @@ struct task_struct {
80352 struct task_cputime cputime_expires;
80353 struct list_head cpu_timers[3];
80354
80355-/* process credentials */
80356- const struct cred __rcu *real_cred; /* objective and real subjective task
80357- * credentials (COW) */
80358- const struct cred __rcu *cred; /* effective (overridable) subjective task
80359- * credentials (COW) */
80360 char comm[TASK_COMM_LEN]; /* executable name excluding path
80361 - access with [gs]et_task_comm (which lock
80362 it with task_lock())
80363@@ -1209,6 +1237,10 @@ struct task_struct {
80364 #endif
80365 /* CPU-specific state of this task */
80366 struct thread_struct thread;
80367+/* thread_info moved to task_struct */
80368+#ifdef CONFIG_X86
80369+ struct thread_info tinfo;
80370+#endif
80371 /* filesystem information */
80372 struct fs_struct *fs;
80373 /* open file information */
80374@@ -1282,6 +1314,10 @@ struct task_struct {
80375 gfp_t lockdep_reclaim_gfp;
80376 #endif
80377
80378+/* process credentials */
80379+ const struct cred __rcu *real_cred; /* objective and real subjective task
80380+ * credentials (COW) */
80381+
80382 /* journalling filesystem info */
80383 void *journal_info;
80384
80385@@ -1320,6 +1356,10 @@ struct task_struct {
80386 /* cg_list protected by css_set_lock and tsk->alloc_lock */
80387 struct list_head cg_list;
80388 #endif
80389+
80390+ const struct cred __rcu *cred; /* effective (overridable) subjective task
80391+ * credentials (COW) */
80392+
80393 #ifdef CONFIG_FUTEX
80394 struct robust_list_head __user *robust_list;
80395 #ifdef CONFIG_COMPAT
80396@@ -1454,7 +1494,78 @@ struct task_struct {
80397 unsigned int sequential_io;
80398 unsigned int sequential_io_avg;
80399 #endif
80400-};
80401+
80402+#ifdef CONFIG_GRKERNSEC
80403+ /* grsecurity */
80404+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
80405+ u64 exec_id;
80406+#endif
80407+#ifdef CONFIG_GRKERNSEC_SETXID
80408+ const struct cred *delayed_cred;
80409+#endif
80410+ struct dentry *gr_chroot_dentry;
80411+ struct acl_subject_label *acl;
80412+ struct acl_subject_label *tmpacl;
80413+ struct acl_role_label *role;
80414+ struct file *exec_file;
80415+ unsigned long brute_expires;
80416+ u16 acl_role_id;
80417+ u8 inherited;
80418+ /* is this the task that authenticated to the special role */
80419+ u8 acl_sp_role;
80420+ u8 is_writable;
80421+ u8 brute;
80422+ u8 gr_is_chrooted;
80423+#endif
80424+
80425+} __randomize_layout;
80426+
80427+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
80428+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
80429+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
80430+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
80431+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
80432+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
80433+
80434+#ifdef CONFIG_PAX_SOFTMODE
80435+extern int pax_softmode;
80436+#endif
80437+
80438+extern int pax_check_flags(unsigned long *);
80439+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
80440+
80441+/* if tsk != current then task_lock must be held on it */
80442+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
80443+static inline unsigned long pax_get_flags(struct task_struct *tsk)
80444+{
80445+ if (likely(tsk->mm))
80446+ return tsk->mm->pax_flags;
80447+ else
80448+ return 0UL;
80449+}
80450+
80451+/* if tsk != current then task_lock must be held on it */
80452+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
80453+{
80454+ if (likely(tsk->mm)) {
80455+ tsk->mm->pax_flags = flags;
80456+ return 0;
80457+ }
80458+ return -EINVAL;
80459+}
80460+#endif
80461+
80462+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
80463+extern void pax_set_initial_flags(struct linux_binprm *bprm);
80464+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
80465+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
80466+#endif
80467+
80468+struct path;
80469+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
80470+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
80471+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
80472+extern void pax_report_refcount_overflow(struct pt_regs *regs);
80473
80474 /* Future-safe accessor for struct task_struct's cpus_allowed. */
80475 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
80476@@ -1531,7 +1642,7 @@ struct pid_namespace;
80477 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
80478 struct pid_namespace *ns);
80479
80480-static inline pid_t task_pid_nr(struct task_struct *tsk)
80481+static inline pid_t task_pid_nr(const struct task_struct *tsk)
80482 {
80483 return tsk->pid;
80484 }
80485@@ -1981,7 +2092,9 @@ void yield(void);
80486 extern struct exec_domain default_exec_domain;
80487
80488 union thread_union {
80489+#ifndef CONFIG_X86
80490 struct thread_info thread_info;
80491+#endif
80492 unsigned long stack[THREAD_SIZE/sizeof(long)];
80493 };
80494
80495@@ -2014,6 +2127,7 @@ extern struct pid_namespace init_pid_ns;
80496 */
80497
80498 extern struct task_struct *find_task_by_vpid(pid_t nr);
80499+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
80500 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
80501 struct pid_namespace *ns);
80502
80503@@ -2178,7 +2292,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
80504 extern void exit_itimers(struct signal_struct *);
80505 extern void flush_itimer_signals(void);
80506
80507-extern void do_group_exit(int);
80508+extern __noreturn void do_group_exit(int);
80509
80510 extern int allow_signal(int);
80511 extern int disallow_signal(int);
80512@@ -2369,9 +2483,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
80513
80514 #endif
80515
80516-static inline int object_is_on_stack(void *obj)
80517+static inline int object_starts_on_stack(void *obj)
80518 {
80519- void *stack = task_stack_page(current);
80520+ const void *stack = task_stack_page(current);
80521
80522 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
80523 }
80524diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
80525index 41467f8..1e4253d 100644
80526--- a/include/linux/sched/sysctl.h
80527+++ b/include/linux/sched/sysctl.h
80528@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
80529 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
80530
80531 extern int sysctl_max_map_count;
80532+extern unsigned long sysctl_heap_stack_gap;
80533
80534 extern unsigned int sysctl_sched_latency;
80535 extern unsigned int sysctl_sched_min_granularity;
80536diff --git a/include/linux/security.h b/include/linux/security.h
80537index 5623a7f..b352409 100644
80538--- a/include/linux/security.h
80539+++ b/include/linux/security.h
80540@@ -27,6 +27,7 @@
80541 #include <linux/slab.h>
80542 #include <linux/err.h>
80543 #include <linux/string.h>
80544+#include <linux/grsecurity.h>
80545
80546 struct linux_binprm;
80547 struct cred;
80548@@ -116,8 +117,6 @@ struct seq_file;
80549
80550 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
80551
80552-void reset_security_ops(void);
80553-
80554 #ifdef CONFIG_MMU
80555 extern unsigned long mmap_min_addr;
80556 extern unsigned long dac_mmap_min_addr;
80557diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
80558index dc368b8..e895209 100644
80559--- a/include/linux/semaphore.h
80560+++ b/include/linux/semaphore.h
80561@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
80562 }
80563
80564 extern void down(struct semaphore *sem);
80565-extern int __must_check down_interruptible(struct semaphore *sem);
80566+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
80567 extern int __must_check down_killable(struct semaphore *sem);
80568 extern int __must_check down_trylock(struct semaphore *sem);
80569 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
80570diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
80571index 52e0097..09625ef 100644
80572--- a/include/linux/seq_file.h
80573+++ b/include/linux/seq_file.h
80574@@ -27,6 +27,9 @@ struct seq_file {
80575 struct mutex lock;
80576 const struct seq_operations *op;
80577 int poll_event;
80578+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
80579+ u64 exec_id;
80580+#endif
80581 #ifdef CONFIG_USER_NS
80582 struct user_namespace *user_ns;
80583 #endif
80584@@ -39,6 +42,7 @@ struct seq_operations {
80585 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
80586 int (*show) (struct seq_file *m, void *v);
80587 };
80588+typedef struct seq_operations __no_const seq_operations_no_const;
80589
80590 #define SEQ_SKIP 1
80591
80592diff --git a/include/linux/shm.h b/include/linux/shm.h
80593index 429c199..4d42e38 100644
80594--- a/include/linux/shm.h
80595+++ b/include/linux/shm.h
80596@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
80597
80598 /* The task created the shm object. NULL if the task is dead. */
80599 struct task_struct *shm_creator;
80600+#ifdef CONFIG_GRKERNSEC
80601+ time_t shm_createtime;
80602+ pid_t shm_lapid;
80603+#endif
80604 };
80605
80606 /* shm_mode upper byte flags */
80607diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
80608index 6f69b3f..71ac613 100644
80609--- a/include/linux/skbuff.h
80610+++ b/include/linux/skbuff.h
80611@@ -643,7 +643,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
80612 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
80613 int node);
80614 struct sk_buff *build_skb(void *data, unsigned int frag_size);
80615-static inline struct sk_buff *alloc_skb(unsigned int size,
80616+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
80617 gfp_t priority)
80618 {
80619 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
80620@@ -750,7 +750,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
80621 */
80622 static inline int skb_queue_empty(const struct sk_buff_head *list)
80623 {
80624- return list->next == (struct sk_buff *)list;
80625+ return list->next == (const struct sk_buff *)list;
80626 }
80627
80628 /**
80629@@ -763,7 +763,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
80630 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
80631 const struct sk_buff *skb)
80632 {
80633- return skb->next == (struct sk_buff *)list;
80634+ return skb->next == (const struct sk_buff *)list;
80635 }
80636
80637 /**
80638@@ -776,7 +776,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
80639 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
80640 const struct sk_buff *skb)
80641 {
80642- return skb->prev == (struct sk_buff *)list;
80643+ return skb->prev == (const struct sk_buff *)list;
80644 }
80645
80646 /**
80647@@ -1686,7 +1686,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
80648 return skb->inner_transport_header - skb->inner_network_header;
80649 }
80650
80651-static inline int skb_network_offset(const struct sk_buff *skb)
80652+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
80653 {
80654 return skb_network_header(skb) - skb->data;
80655 }
80656@@ -1746,7 +1746,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
80657 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
80658 */
80659 #ifndef NET_SKB_PAD
80660-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
80661+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
80662 #endif
80663
80664 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
80665@@ -2345,7 +2345,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
80666 int *err);
80667 unsigned int datagram_poll(struct file *file, struct socket *sock,
80668 struct poll_table_struct *wait);
80669-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
80670+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
80671 struct iovec *to, int size);
80672 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
80673 struct iovec *iov);
80674@@ -2617,6 +2617,9 @@ static inline void nf_reset(struct sk_buff *skb)
80675 nf_bridge_put(skb->nf_bridge);
80676 skb->nf_bridge = NULL;
80677 #endif
80678+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
80679+ skb->nf_trace = 0;
80680+#endif
80681 }
80682
80683 static inline void nf_reset_trace(struct sk_buff *skb)
80684diff --git a/include/linux/slab.h b/include/linux/slab.h
80685index 1e2f4fe..df49ca6 100644
80686--- a/include/linux/slab.h
80687+++ b/include/linux/slab.h
80688@@ -14,15 +14,29 @@
80689 #include <linux/gfp.h>
80690 #include <linux/types.h>
80691 #include <linux/workqueue.h>
80692-
80693+#include <linux/err.h>
80694
80695 /*
80696 * Flags to pass to kmem_cache_create().
80697 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
80698 */
80699 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
80700+
80701+#ifdef CONFIG_PAX_USERCOPY_SLABS
80702+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
80703+#else
80704+#define SLAB_USERCOPY 0x00000000UL
80705+#endif
80706+
80707 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
80708 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
80709+
80710+#ifdef CONFIG_PAX_MEMORY_SANITIZE
80711+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
80712+#else
80713+#define SLAB_NO_SANITIZE 0x00000000UL
80714+#endif
80715+
80716 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
80717 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
80718 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
80719@@ -98,10 +112,13 @@
80720 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
80721 * Both make kfree a no-op.
80722 */
80723-#define ZERO_SIZE_PTR ((void *)16)
80724+#define ZERO_SIZE_PTR \
80725+({ \
80726+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
80727+ (void *)(-MAX_ERRNO-1L); \
80728+})
80729
80730-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
80731- (unsigned long)ZERO_SIZE_PTR)
80732+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
80733
80734 #include <linux/kmemleak.h>
80735
80736@@ -142,6 +159,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
80737 void kfree(const void *);
80738 void kzfree(const void *);
80739 size_t ksize(const void *);
80740+const char *check_heap_object(const void *ptr, unsigned long n);
80741+bool is_usercopy_object(const void *ptr);
80742
80743 /*
80744 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
80745@@ -174,7 +193,7 @@ struct kmem_cache {
80746 unsigned int align; /* Alignment as calculated */
80747 unsigned long flags; /* Active flags on the slab */
80748 const char *name; /* Slab name for sysfs */
80749- int refcount; /* Use counter */
80750+ atomic_t refcount; /* Use counter */
80751 void (*ctor)(void *); /* Called on object slot creation */
80752 struct list_head list; /* List of all slab caches on the system */
80753 };
80754@@ -248,6 +267,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
80755 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
80756 #endif
80757
80758+#ifdef CONFIG_PAX_USERCOPY_SLABS
80759+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
80760+#endif
80761+
80762 /*
80763 * Figure out which kmalloc slab an allocation of a certain size
80764 * belongs to.
80765@@ -256,7 +279,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
80766 * 2 = 120 .. 192 bytes
80767 * n = 2^(n-1) .. 2^n -1
80768 */
80769-static __always_inline int kmalloc_index(size_t size)
80770+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
80771 {
80772 if (!size)
80773 return 0;
80774@@ -299,11 +322,11 @@ static __always_inline int kmalloc_index(size_t size)
80775 }
80776 #endif /* !CONFIG_SLOB */
80777
80778-void *__kmalloc(size_t size, gfp_t flags);
80779+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
80780 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
80781
80782 #ifdef CONFIG_NUMA
80783-void *__kmalloc_node(size_t size, gfp_t flags, int node);
80784+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
80785 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
80786 #else
80787 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
80788diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
80789index 09bfffb..4fc80fb 100644
80790--- a/include/linux/slab_def.h
80791+++ b/include/linux/slab_def.h
80792@@ -36,7 +36,7 @@ struct kmem_cache {
80793 /* 4) cache creation/removal */
80794 const char *name;
80795 struct list_head list;
80796- int refcount;
80797+ atomic_t refcount;
80798 int object_size;
80799 int align;
80800
80801@@ -52,10 +52,14 @@ struct kmem_cache {
80802 unsigned long node_allocs;
80803 unsigned long node_frees;
80804 unsigned long node_overflow;
80805- atomic_t allochit;
80806- atomic_t allocmiss;
80807- atomic_t freehit;
80808- atomic_t freemiss;
80809+ atomic_unchecked_t allochit;
80810+ atomic_unchecked_t allocmiss;
80811+ atomic_unchecked_t freehit;
80812+ atomic_unchecked_t freemiss;
80813+#ifdef CONFIG_PAX_MEMORY_SANITIZE
80814+ atomic_unchecked_t sanitized;
80815+ atomic_unchecked_t not_sanitized;
80816+#endif
80817
80818 /*
80819 * If debugging is enabled, then the allocator can add additional
80820diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
80821index f56bfa9..8378a26 100644
80822--- a/include/linux/slub_def.h
80823+++ b/include/linux/slub_def.h
80824@@ -74,7 +74,7 @@ struct kmem_cache {
80825 struct kmem_cache_order_objects max;
80826 struct kmem_cache_order_objects min;
80827 gfp_t allocflags; /* gfp flags to use on each alloc */
80828- int refcount; /* Refcount for slab cache destroy */
80829+ atomic_t refcount; /* Refcount for slab cache destroy */
80830 void (*ctor)(void *);
80831 int inuse; /* Offset to metadata */
80832 int align; /* Alignment */
80833diff --git a/include/linux/smp.h b/include/linux/smp.h
80834index 5da22ee..71d8a28 100644
80835--- a/include/linux/smp.h
80836+++ b/include/linux/smp.h
80837@@ -176,7 +176,9 @@ static inline void kick_all_cpus_sync(void) { }
80838 #endif
80839
80840 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
80841+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
80842 #define put_cpu() preempt_enable()
80843+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
80844
80845 /*
80846 * Callback to arch code if there's nosmp or maxcpus=0 on the
80847diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
80848index 54f91d3..be2c379 100644
80849--- a/include/linux/sock_diag.h
80850+++ b/include/linux/sock_diag.h
80851@@ -11,7 +11,7 @@ struct sock;
80852 struct sock_diag_handler {
80853 __u8 family;
80854 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
80855-};
80856+} __do_const;
80857
80858 int sock_diag_register(const struct sock_diag_handler *h);
80859 void sock_diag_unregister(const struct sock_diag_handler *h);
80860diff --git a/include/linux/sonet.h b/include/linux/sonet.h
80861index 680f9a3..f13aeb0 100644
80862--- a/include/linux/sonet.h
80863+++ b/include/linux/sonet.h
80864@@ -7,7 +7,7 @@
80865 #include <uapi/linux/sonet.h>
80866
80867 struct k_sonet_stats {
80868-#define __HANDLE_ITEM(i) atomic_t i
80869+#define __HANDLE_ITEM(i) atomic_unchecked_t i
80870 __SONET_ITEMS
80871 #undef __HANDLE_ITEM
80872 };
80873diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
80874index 07d8e53..dc934c9 100644
80875--- a/include/linux/sunrpc/addr.h
80876+++ b/include/linux/sunrpc/addr.h
80877@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
80878 {
80879 switch (sap->sa_family) {
80880 case AF_INET:
80881- return ntohs(((struct sockaddr_in *)sap)->sin_port);
80882+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
80883 case AF_INET6:
80884- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
80885+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
80886 }
80887 return 0;
80888 }
80889@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
80890 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
80891 const struct sockaddr *src)
80892 {
80893- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
80894+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
80895 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
80896
80897 dsin->sin_family = ssin->sin_family;
80898@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
80899 if (sa->sa_family != AF_INET6)
80900 return 0;
80901
80902- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
80903+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
80904 }
80905
80906 #endif /* _LINUX_SUNRPC_ADDR_H */
80907diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
80908index 8af2804..c7414ef 100644
80909--- a/include/linux/sunrpc/clnt.h
80910+++ b/include/linux/sunrpc/clnt.h
80911@@ -97,7 +97,7 @@ struct rpc_procinfo {
80912 unsigned int p_timer; /* Which RTT timer to use */
80913 u32 p_statidx; /* Which procedure to account */
80914 const char * p_name; /* name of procedure */
80915-};
80916+} __do_const;
80917
80918 #ifdef __KERNEL__
80919
80920diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
80921index 6eecfc2..7ada79d 100644
80922--- a/include/linux/sunrpc/svc.h
80923+++ b/include/linux/sunrpc/svc.h
80924@@ -410,7 +410,7 @@ struct svc_procedure {
80925 unsigned int pc_count; /* call count */
80926 unsigned int pc_cachetype; /* cache info (NFS) */
80927 unsigned int pc_xdrressize; /* maximum size of XDR reply */
80928-};
80929+} __do_const;
80930
80931 /*
80932 * Function prototypes.
80933diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
80934index 0b8e3e6..33e0a01 100644
80935--- a/include/linux/sunrpc/svc_rdma.h
80936+++ b/include/linux/sunrpc/svc_rdma.h
80937@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
80938 extern unsigned int svcrdma_max_requests;
80939 extern unsigned int svcrdma_max_req_size;
80940
80941-extern atomic_t rdma_stat_recv;
80942-extern atomic_t rdma_stat_read;
80943-extern atomic_t rdma_stat_write;
80944-extern atomic_t rdma_stat_sq_starve;
80945-extern atomic_t rdma_stat_rq_starve;
80946-extern atomic_t rdma_stat_rq_poll;
80947-extern atomic_t rdma_stat_rq_prod;
80948-extern atomic_t rdma_stat_sq_poll;
80949-extern atomic_t rdma_stat_sq_prod;
80950+extern atomic_unchecked_t rdma_stat_recv;
80951+extern atomic_unchecked_t rdma_stat_read;
80952+extern atomic_unchecked_t rdma_stat_write;
80953+extern atomic_unchecked_t rdma_stat_sq_starve;
80954+extern atomic_unchecked_t rdma_stat_rq_starve;
80955+extern atomic_unchecked_t rdma_stat_rq_poll;
80956+extern atomic_unchecked_t rdma_stat_rq_prod;
80957+extern atomic_unchecked_t rdma_stat_sq_poll;
80958+extern atomic_unchecked_t rdma_stat_sq_prod;
80959
80960 #define RPCRDMA_VERSION 1
80961
80962diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
80963index 8d71d65..f79586e 100644
80964--- a/include/linux/sunrpc/svcauth.h
80965+++ b/include/linux/sunrpc/svcauth.h
80966@@ -120,7 +120,7 @@ struct auth_ops {
80967 int (*release)(struct svc_rqst *rq);
80968 void (*domain_release)(struct auth_domain *);
80969 int (*set_client)(struct svc_rqst *rq);
80970-};
80971+} __do_const;
80972
80973 #define SVC_GARBAGE 1
80974 #define SVC_SYSERR 2
80975diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
80976index a5ffd32..0935dea 100644
80977--- a/include/linux/swiotlb.h
80978+++ b/include/linux/swiotlb.h
80979@@ -60,7 +60,8 @@ extern void
80980
80981 extern void
80982 swiotlb_free_coherent(struct device *hwdev, size_t size,
80983- void *vaddr, dma_addr_t dma_handle);
80984+ void *vaddr, dma_addr_t dma_handle,
80985+ struct dma_attrs *attrs);
80986
80987 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
80988 unsigned long offset, size_t size,
80989diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
80990index 94273bb..c2e05fc 100644
80991--- a/include/linux/syscalls.h
80992+++ b/include/linux/syscalls.h
80993@@ -97,8 +97,14 @@ struct sigaltstack;
80994 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
80995
80996 #define __SC_DECL(t, a) t a
80997+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
80998 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
80999-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
81000+#define __SC_LONG(t, a) __typeof( \
81001+ __builtin_choose_expr( \
81002+ sizeof(t) > sizeof(int), \
81003+ (t) 0, \
81004+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
81005+ )) a
81006 #define __SC_CAST(t, a) (t) a
81007 #define __SC_ARGS(t, a) a
81008 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
81009@@ -363,11 +369,11 @@ asmlinkage long sys_sync(void);
81010 asmlinkage long sys_fsync(unsigned int fd);
81011 asmlinkage long sys_fdatasync(unsigned int fd);
81012 asmlinkage long sys_bdflush(int func, long data);
81013-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
81014- char __user *type, unsigned long flags,
81015+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
81016+ const char __user *type, unsigned long flags,
81017 void __user *data);
81018-asmlinkage long sys_umount(char __user *name, int flags);
81019-asmlinkage long sys_oldumount(char __user *name);
81020+asmlinkage long sys_umount(const char __user *name, int flags);
81021+asmlinkage long sys_oldumount(const char __user *name);
81022 asmlinkage long sys_truncate(const char __user *path, long length);
81023 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
81024 asmlinkage long sys_stat(const char __user *filename,
81025@@ -579,7 +585,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
81026 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
81027 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
81028 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
81029- struct sockaddr __user *, int);
81030+ struct sockaddr __user *, int) __intentional_overflow(0);
81031 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
81032 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
81033 unsigned int vlen, unsigned flags);
81034diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
81035index 27b3b0b..e093dd9 100644
81036--- a/include/linux/syscore_ops.h
81037+++ b/include/linux/syscore_ops.h
81038@@ -16,7 +16,7 @@ struct syscore_ops {
81039 int (*suspend)(void);
81040 void (*resume)(void);
81041 void (*shutdown)(void);
81042-};
81043+} __do_const;
81044
81045 extern void register_syscore_ops(struct syscore_ops *ops);
81046 extern void unregister_syscore_ops(struct syscore_ops *ops);
81047diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
81048index 14a8ff2..fa95f3a 100644
81049--- a/include/linux/sysctl.h
81050+++ b/include/linux/sysctl.h
81051@@ -34,13 +34,13 @@ struct ctl_table_root;
81052 struct ctl_table_header;
81053 struct ctl_dir;
81054
81055-typedef struct ctl_table ctl_table;
81056-
81057 typedef int proc_handler (struct ctl_table *ctl, int write,
81058 void __user *buffer, size_t *lenp, loff_t *ppos);
81059
81060 extern int proc_dostring(struct ctl_table *, int,
81061 void __user *, size_t *, loff_t *);
81062+extern int proc_dostring_modpriv(struct ctl_table *, int,
81063+ void __user *, size_t *, loff_t *);
81064 extern int proc_dointvec(struct ctl_table *, int,
81065 void __user *, size_t *, loff_t *);
81066 extern int proc_dointvec_minmax(struct ctl_table *, int,
81067@@ -115,7 +115,9 @@ struct ctl_table
81068 struct ctl_table_poll *poll;
81069 void *extra1;
81070 void *extra2;
81071-};
81072+} __do_const __randomize_layout;
81073+typedef struct ctl_table __no_const ctl_table_no_const;
81074+typedef struct ctl_table ctl_table;
81075
81076 struct ctl_node {
81077 struct rb_node node;
81078diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
81079index 6695040..3d4192d 100644
81080--- a/include/linux/sysfs.h
81081+++ b/include/linux/sysfs.h
81082@@ -33,7 +33,8 @@ struct attribute {
81083 struct lock_class_key *key;
81084 struct lock_class_key skey;
81085 #endif
81086-};
81087+} __do_const;
81088+typedef struct attribute __no_const attribute_no_const;
81089
81090 /**
81091 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
81092@@ -62,7 +63,8 @@ struct attribute_group {
81093 struct attribute *, int);
81094 struct attribute **attrs;
81095 struct bin_attribute **bin_attrs;
81096-};
81097+} __do_const;
81098+typedef struct attribute_group __no_const attribute_group_no_const;
81099
81100 /**
81101 * Use these macros to make defining attributes easier. See include/linux/device.h
81102@@ -126,7 +128,8 @@ struct bin_attribute {
81103 char *, loff_t, size_t);
81104 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
81105 struct vm_area_struct *vma);
81106-};
81107+} __do_const;
81108+typedef struct bin_attribute __no_const bin_attribute_no_const;
81109
81110 /**
81111 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
81112diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
81113index 387fa7d..3fcde6b 100644
81114--- a/include/linux/sysrq.h
81115+++ b/include/linux/sysrq.h
81116@@ -16,6 +16,7 @@
81117
81118 #include <linux/errno.h>
81119 #include <linux/types.h>
81120+#include <linux/compiler.h>
81121
81122 /* Possible values of bitmask for enabling sysrq functions */
81123 /* 0x0001 is reserved for enable everything */
81124@@ -33,7 +34,7 @@ struct sysrq_key_op {
81125 char *help_msg;
81126 char *action_msg;
81127 int enable_mask;
81128-};
81129+} __do_const;
81130
81131 #ifdef CONFIG_MAGIC_SYSRQ
81132
81133diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
81134index fddbe20..0312de8 100644
81135--- a/include/linux/thread_info.h
81136+++ b/include/linux/thread_info.h
81137@@ -161,6 +161,15 @@ static inline bool test_and_clear_restore_sigmask(void)
81138 #error "no set_restore_sigmask() provided and default one won't work"
81139 #endif
81140
81141+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
81142+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
81143+{
81144+#ifndef CONFIG_PAX_USERCOPY_DEBUG
81145+ if (!__builtin_constant_p(n))
81146+#endif
81147+ __check_object_size(ptr, n, to_user);
81148+}
81149+
81150 #endif /* __KERNEL__ */
81151
81152 #endif /* _LINUX_THREAD_INFO_H */
81153diff --git a/include/linux/tty.h b/include/linux/tty.h
81154index 97d660e..6356755 100644
81155--- a/include/linux/tty.h
81156+++ b/include/linux/tty.h
81157@@ -196,7 +196,7 @@ struct tty_port {
81158 const struct tty_port_operations *ops; /* Port operations */
81159 spinlock_t lock; /* Lock protecting tty field */
81160 int blocked_open; /* Waiting to open */
81161- int count; /* Usage count */
81162+ atomic_t count; /* Usage count */
81163 wait_queue_head_t open_wait; /* Open waiters */
81164 wait_queue_head_t close_wait; /* Close waiters */
81165 wait_queue_head_t delta_msr_wait; /* Modem status change */
81166@@ -278,7 +278,7 @@ struct tty_struct {
81167 /* If the tty has a pending do_SAK, queue it here - akpm */
81168 struct work_struct SAK_work;
81169 struct tty_port *port;
81170-};
81171+} __randomize_layout;
81172
81173 /* Each of a tty's open files has private_data pointing to tty_file_private */
81174 struct tty_file_private {
81175@@ -545,7 +545,7 @@ extern int tty_port_open(struct tty_port *port,
81176 struct tty_struct *tty, struct file *filp);
81177 static inline int tty_port_users(struct tty_port *port)
81178 {
81179- return port->count + port->blocked_open;
81180+ return atomic_read(&port->count) + port->blocked_open;
81181 }
81182
81183 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
81184diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
81185index 756a609..f61242d 100644
81186--- a/include/linux/tty_driver.h
81187+++ b/include/linux/tty_driver.h
81188@@ -285,7 +285,7 @@ struct tty_operations {
81189 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
81190 #endif
81191 const struct file_operations *proc_fops;
81192-};
81193+} __do_const;
81194
81195 struct tty_driver {
81196 int magic; /* magic number for this structure */
81197@@ -319,7 +319,7 @@ struct tty_driver {
81198
81199 const struct tty_operations *ops;
81200 struct list_head tty_drivers;
81201-};
81202+} __randomize_layout;
81203
81204 extern struct list_head tty_drivers;
81205
81206diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
81207index f15c898..207b7d1 100644
81208--- a/include/linux/tty_ldisc.h
81209+++ b/include/linux/tty_ldisc.h
81210@@ -211,7 +211,7 @@ struct tty_ldisc_ops {
81211
81212 struct module *owner;
81213
81214- int refcount;
81215+ atomic_t refcount;
81216 };
81217
81218 struct tty_ldisc {
81219diff --git a/include/linux/types.h b/include/linux/types.h
81220index 4d118ba..c3ee9bf 100644
81221--- a/include/linux/types.h
81222+++ b/include/linux/types.h
81223@@ -176,10 +176,26 @@ typedef struct {
81224 int counter;
81225 } atomic_t;
81226
81227+#ifdef CONFIG_PAX_REFCOUNT
81228+typedef struct {
81229+ int counter;
81230+} atomic_unchecked_t;
81231+#else
81232+typedef atomic_t atomic_unchecked_t;
81233+#endif
81234+
81235 #ifdef CONFIG_64BIT
81236 typedef struct {
81237 long counter;
81238 } atomic64_t;
81239+
81240+#ifdef CONFIG_PAX_REFCOUNT
81241+typedef struct {
81242+ long counter;
81243+} atomic64_unchecked_t;
81244+#else
81245+typedef atomic64_t atomic64_unchecked_t;
81246+#endif
81247 #endif
81248
81249 struct list_head {
81250diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
81251index 9d8cf05..0ed74dd 100644
81252--- a/include/linux/uaccess.h
81253+++ b/include/linux/uaccess.h
81254@@ -72,11 +72,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
81255 long ret; \
81256 mm_segment_t old_fs = get_fs(); \
81257 \
81258- set_fs(KERNEL_DS); \
81259 pagefault_disable(); \
81260- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
81261- pagefault_enable(); \
81262+ set_fs(KERNEL_DS); \
81263+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
81264 set_fs(old_fs); \
81265+ pagefault_enable(); \
81266 ret; \
81267 })
81268
81269diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
81270index 8e522cbc..aa8572d 100644
81271--- a/include/linux/uidgid.h
81272+++ b/include/linux/uidgid.h
81273@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
81274
81275 #endif /* CONFIG_USER_NS */
81276
81277+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
81278+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
81279+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
81280+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
81281+
81282 #endif /* _LINUX_UIDGID_H */
81283diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
81284index 99c1b4d..562e6f3 100644
81285--- a/include/linux/unaligned/access_ok.h
81286+++ b/include/linux/unaligned/access_ok.h
81287@@ -4,34 +4,34 @@
81288 #include <linux/kernel.h>
81289 #include <asm/byteorder.h>
81290
81291-static inline u16 get_unaligned_le16(const void *p)
81292+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
81293 {
81294- return le16_to_cpup((__le16 *)p);
81295+ return le16_to_cpup((const __le16 *)p);
81296 }
81297
81298-static inline u32 get_unaligned_le32(const void *p)
81299+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
81300 {
81301- return le32_to_cpup((__le32 *)p);
81302+ return le32_to_cpup((const __le32 *)p);
81303 }
81304
81305-static inline u64 get_unaligned_le64(const void *p)
81306+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
81307 {
81308- return le64_to_cpup((__le64 *)p);
81309+ return le64_to_cpup((const __le64 *)p);
81310 }
81311
81312-static inline u16 get_unaligned_be16(const void *p)
81313+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
81314 {
81315- return be16_to_cpup((__be16 *)p);
81316+ return be16_to_cpup((const __be16 *)p);
81317 }
81318
81319-static inline u32 get_unaligned_be32(const void *p)
81320+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
81321 {
81322- return be32_to_cpup((__be32 *)p);
81323+ return be32_to_cpup((const __be32 *)p);
81324 }
81325
81326-static inline u64 get_unaligned_be64(const void *p)
81327+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
81328 {
81329- return be64_to_cpup((__be64 *)p);
81330+ return be64_to_cpup((const __be64 *)p);
81331 }
81332
81333 static inline void put_unaligned_le16(u16 val, void *p)
81334diff --git a/include/linux/usb.h b/include/linux/usb.h
81335index 512ab16..f53e1bf 100644
81336--- a/include/linux/usb.h
81337+++ b/include/linux/usb.h
81338@@ -563,7 +563,7 @@ struct usb_device {
81339 int maxchild;
81340
81341 u32 quirks;
81342- atomic_t urbnum;
81343+ atomic_unchecked_t urbnum;
81344
81345 unsigned long active_duration;
81346
81347@@ -1643,7 +1643,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
81348
81349 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
81350 __u8 request, __u8 requesttype, __u16 value, __u16 index,
81351- void *data, __u16 size, int timeout);
81352+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
81353 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
81354 void *data, int len, int *actual_length, int timeout);
81355 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
81356diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
81357index e452ba6..78f8e80 100644
81358--- a/include/linux/usb/renesas_usbhs.h
81359+++ b/include/linux/usb/renesas_usbhs.h
81360@@ -39,7 +39,7 @@ enum {
81361 */
81362 struct renesas_usbhs_driver_callback {
81363 int (*notify_hotplug)(struct platform_device *pdev);
81364-};
81365+} __no_const;
81366
81367 /*
81368 * callback functions for platform
81369diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
81370index 4836ba3..603f6ee 100644
81371--- a/include/linux/user_namespace.h
81372+++ b/include/linux/user_namespace.h
81373@@ -33,7 +33,7 @@ struct user_namespace {
81374 struct key *persistent_keyring_register;
81375 struct rw_semaphore persistent_keyring_register_sem;
81376 #endif
81377-};
81378+} __randomize_layout;
81379
81380 extern struct user_namespace init_user_ns;
81381
81382diff --git a/include/linux/utsname.h b/include/linux/utsname.h
81383index 239e277..22a5cf5 100644
81384--- a/include/linux/utsname.h
81385+++ b/include/linux/utsname.h
81386@@ -24,7 +24,7 @@ struct uts_namespace {
81387 struct new_utsname name;
81388 struct user_namespace *user_ns;
81389 unsigned int proc_inum;
81390-};
81391+} __randomize_layout;
81392 extern struct uts_namespace init_uts_ns;
81393
81394 #ifdef CONFIG_UTS_NS
81395diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
81396index 6f8fbcf..4efc177 100644
81397--- a/include/linux/vermagic.h
81398+++ b/include/linux/vermagic.h
81399@@ -25,9 +25,42 @@
81400 #define MODULE_ARCH_VERMAGIC ""
81401 #endif
81402
81403+#ifdef CONFIG_PAX_REFCOUNT
81404+#define MODULE_PAX_REFCOUNT "REFCOUNT "
81405+#else
81406+#define MODULE_PAX_REFCOUNT ""
81407+#endif
81408+
81409+#ifdef CONSTIFY_PLUGIN
81410+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
81411+#else
81412+#define MODULE_CONSTIFY_PLUGIN ""
81413+#endif
81414+
81415+#ifdef STACKLEAK_PLUGIN
81416+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
81417+#else
81418+#define MODULE_STACKLEAK_PLUGIN ""
81419+#endif
81420+
81421+#ifdef RANDSTRUCT_PLUGIN
81422+#include <generated/randomize_layout_hash.h>
81423+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
81424+#else
81425+#define MODULE_RANDSTRUCT_PLUGIN
81426+#endif
81427+
81428+#ifdef CONFIG_GRKERNSEC
81429+#define MODULE_GRSEC "GRSEC "
81430+#else
81431+#define MODULE_GRSEC ""
81432+#endif
81433+
81434 #define VERMAGIC_STRING \
81435 UTS_RELEASE " " \
81436 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
81437 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
81438- MODULE_ARCH_VERMAGIC
81439+ MODULE_ARCH_VERMAGIC \
81440+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
81441+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
81442
81443diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
81444index 502073a..a7de024 100644
81445--- a/include/linux/vga_switcheroo.h
81446+++ b/include/linux/vga_switcheroo.h
81447@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
81448
81449 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
81450
81451-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
81452-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
81453+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
81454+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
81455 #else
81456
81457 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
81458@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
81459
81460 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
81461
81462-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
81463-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
81464+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
81465+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
81466
81467 #endif
81468 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
81469diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
81470index 4b8a891..cb8df6e 100644
81471--- a/include/linux/vmalloc.h
81472+++ b/include/linux/vmalloc.h
81473@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
81474 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
81475 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
81476 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
81477+
81478+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81479+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
81480+#endif
81481+
81482 /* bits [20..32] reserved for arch specific ioremap internals */
81483
81484 /*
81485@@ -142,7 +147,7 @@ extern void free_vm_area(struct vm_struct *area);
81486
81487 /* for /dev/kmem */
81488 extern long vread(char *buf, char *addr, unsigned long count);
81489-extern long vwrite(char *buf, char *addr, unsigned long count);
81490+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
81491
81492 /*
81493 * Internals. Dont't use..
81494diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
81495index e4b9480..5a5f65a 100644
81496--- a/include/linux/vmstat.h
81497+++ b/include/linux/vmstat.h
81498@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
81499 /*
81500 * Zone based page accounting with per cpu differentials.
81501 */
81502-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81503+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81504
81505 static inline void zone_page_state_add(long x, struct zone *zone,
81506 enum zone_stat_item item)
81507 {
81508- atomic_long_add(x, &zone->vm_stat[item]);
81509- atomic_long_add(x, &vm_stat[item]);
81510+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
81511+ atomic_long_add_unchecked(x, &vm_stat[item]);
81512 }
81513
81514-static inline unsigned long global_page_state(enum zone_stat_item item)
81515+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
81516 {
81517- long x = atomic_long_read(&vm_stat[item]);
81518+ long x = atomic_long_read_unchecked(&vm_stat[item]);
81519 #ifdef CONFIG_SMP
81520 if (x < 0)
81521 x = 0;
81522@@ -109,10 +109,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
81523 return x;
81524 }
81525
81526-static inline unsigned long zone_page_state(struct zone *zone,
81527+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
81528 enum zone_stat_item item)
81529 {
81530- long x = atomic_long_read(&zone->vm_stat[item]);
81531+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
81532 #ifdef CONFIG_SMP
81533 if (x < 0)
81534 x = 0;
81535@@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
81536 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
81537 enum zone_stat_item item)
81538 {
81539- long x = atomic_long_read(&zone->vm_stat[item]);
81540+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
81541
81542 #ifdef CONFIG_SMP
81543 int cpu;
81544@@ -220,8 +220,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
81545
81546 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
81547 {
81548- atomic_long_inc(&zone->vm_stat[item]);
81549- atomic_long_inc(&vm_stat[item]);
81550+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
81551+ atomic_long_inc_unchecked(&vm_stat[item]);
81552 }
81553
81554 static inline void __inc_zone_page_state(struct page *page,
81555@@ -232,8 +232,8 @@ static inline void __inc_zone_page_state(struct page *page,
81556
81557 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
81558 {
81559- atomic_long_dec(&zone->vm_stat[item]);
81560- atomic_long_dec(&vm_stat[item]);
81561+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
81562+ atomic_long_dec_unchecked(&vm_stat[item]);
81563 }
81564
81565 static inline void __dec_zone_page_state(struct page *page,
81566diff --git a/include/linux/xattr.h b/include/linux/xattr.h
81567index 91b0a68..0e9adf6 100644
81568--- a/include/linux/xattr.h
81569+++ b/include/linux/xattr.h
81570@@ -28,7 +28,7 @@ struct xattr_handler {
81571 size_t size, int handler_flags);
81572 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
81573 size_t size, int flags, int handler_flags);
81574-};
81575+} __do_const;
81576
81577 struct xattr {
81578 const char *name;
81579@@ -37,6 +37,9 @@ struct xattr {
81580 };
81581
81582 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
81583+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
81584+ssize_t pax_getxattr(struct dentry *, void *, size_t);
81585+#endif
81586 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
81587 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
81588 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
81589diff --git a/include/linux/zlib.h b/include/linux/zlib.h
81590index 9c5a6b4..09c9438 100644
81591--- a/include/linux/zlib.h
81592+++ b/include/linux/zlib.h
81593@@ -31,6 +31,7 @@
81594 #define _ZLIB_H
81595
81596 #include <linux/zconf.h>
81597+#include <linux/compiler.h>
81598
81599 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
81600 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
81601@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
81602
81603 /* basic functions */
81604
81605-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
81606+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
81607 /*
81608 Returns the number of bytes that needs to be allocated for a per-
81609 stream workspace with the specified parameters. A pointer to this
81610diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
81611index c768c9f..bdcaa5a 100644
81612--- a/include/media/v4l2-dev.h
81613+++ b/include/media/v4l2-dev.h
81614@@ -76,7 +76,7 @@ struct v4l2_file_operations {
81615 int (*mmap) (struct file *, struct vm_area_struct *);
81616 int (*open) (struct file *);
81617 int (*release) (struct file *);
81618-};
81619+} __do_const;
81620
81621 /*
81622 * Newer version of video_device, handled by videodev2.c
81623diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
81624index c9b1593..a572459 100644
81625--- a/include/media/v4l2-device.h
81626+++ b/include/media/v4l2-device.h
81627@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
81628 this function returns 0. If the name ends with a digit (e.g. cx18),
81629 then the name will be set to cx18-0 since cx180 looks really odd. */
81630 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
81631- atomic_t *instance);
81632+ atomic_unchecked_t *instance);
81633
81634 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
81635 Since the parent disappears this ensures that v4l2_dev doesn't have an
81636diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
81637index 9a36d92..0aafe2a 100644
81638--- a/include/net/9p/transport.h
81639+++ b/include/net/9p/transport.h
81640@@ -60,7 +60,7 @@ struct p9_trans_module {
81641 int (*cancel) (struct p9_client *, struct p9_req_t *req);
81642 int (*zc_request)(struct p9_client *, struct p9_req_t *,
81643 char *, char *, int , int, int, int);
81644-};
81645+} __do_const;
81646
81647 void v9fs_register_trans(struct p9_trans_module *m);
81648 void v9fs_unregister_trans(struct p9_trans_module *m);
81649diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
81650index c853b16d..37fccb7 100644
81651--- a/include/net/bluetooth/l2cap.h
81652+++ b/include/net/bluetooth/l2cap.h
81653@@ -557,7 +557,7 @@ struct l2cap_ops {
81654 long (*get_sndtimeo) (struct l2cap_chan *chan);
81655 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
81656 unsigned long len, int nb);
81657-};
81658+} __do_const;
81659
81660 struct l2cap_conn {
81661 struct hci_conn *hcon;
81662diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
81663index f2ae33d..c457cf0 100644
81664--- a/include/net/caif/cfctrl.h
81665+++ b/include/net/caif/cfctrl.h
81666@@ -52,7 +52,7 @@ struct cfctrl_rsp {
81667 void (*radioset_rsp)(void);
81668 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
81669 struct cflayer *client_layer);
81670-};
81671+} __no_const;
81672
81673 /* Link Setup Parameters for CAIF-Links. */
81674 struct cfctrl_link_param {
81675@@ -101,8 +101,8 @@ struct cfctrl_request_info {
81676 struct cfctrl {
81677 struct cfsrvl serv;
81678 struct cfctrl_rsp res;
81679- atomic_t req_seq_no;
81680- atomic_t rsp_seq_no;
81681+ atomic_unchecked_t req_seq_no;
81682+ atomic_unchecked_t rsp_seq_no;
81683 struct list_head list;
81684 /* Protects from simultaneous access to first_req list */
81685 spinlock_t info_list_lock;
81686diff --git a/include/net/flow.h b/include/net/flow.h
81687index 65ce471..b7bbe9b 100644
81688--- a/include/net/flow.h
81689+++ b/include/net/flow.h
81690@@ -222,6 +222,6 @@ struct flow_cache_object *flow_cache_lookup(struct net *net,
81691
81692 void flow_cache_flush(void);
81693 void flow_cache_flush_deferred(void);
81694-extern atomic_t flow_cache_genid;
81695+extern atomic_unchecked_t flow_cache_genid;
81696
81697 #endif
81698diff --git a/include/net/genetlink.h b/include/net/genetlink.h
81699index 1b177ed..a24a138 100644
81700--- a/include/net/genetlink.h
81701+++ b/include/net/genetlink.h
81702@@ -118,7 +118,7 @@ struct genl_ops {
81703 u8 cmd;
81704 u8 internal_flags;
81705 u8 flags;
81706-};
81707+} __do_const;
81708
81709 int __genl_register_family(struct genl_family *family);
81710
81711diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
81712index 734d9b5..48a9a4b 100644
81713--- a/include/net/gro_cells.h
81714+++ b/include/net/gro_cells.h
81715@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
81716 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
81717
81718 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
81719- atomic_long_inc(&dev->rx_dropped);
81720+ atomic_long_inc_unchecked(&dev->rx_dropped);
81721 kfree_skb(skb);
81722 return;
81723 }
81724diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
81725index c55aeed..b3393f4 100644
81726--- a/include/net/inet_connection_sock.h
81727+++ b/include/net/inet_connection_sock.h
81728@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
81729 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
81730 int (*bind_conflict)(const struct sock *sk,
81731 const struct inet_bind_bucket *tb, bool relax);
81732-};
81733+} __do_const;
81734
81735 /** inet_connection_sock - INET connection oriented sock
81736 *
81737diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
81738index f4e127a..c3d5e9c 100644
81739--- a/include/net/inetpeer.h
81740+++ b/include/net/inetpeer.h
81741@@ -47,8 +47,8 @@ struct inet_peer {
81742 */
81743 union {
81744 struct {
81745- atomic_t rid; /* Frag reception counter */
81746- atomic_t ip_id_count; /* IP ID for the next packet */
81747+ atomic_unchecked_t rid; /* Frag reception counter */
81748+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
81749 };
81750 struct rcu_head rcu;
81751 struct inet_peer *gc_next;
81752@@ -178,16 +178,13 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
81753 /* can be called with or without local BH being disabled */
81754 static inline int inet_getid(struct inet_peer *p, int more)
81755 {
81756- int old, new;
81757+ int id;
81758 more++;
81759 inet_peer_refcheck(p);
81760- do {
81761- old = atomic_read(&p->ip_id_count);
81762- new = old + more;
81763- if (!new)
81764- new = 1;
81765- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
81766- return new;
81767+ id = atomic_add_return_unchecked(more, &p->ip_id_count);
81768+ if (!id)
81769+ id = atomic_inc_return_unchecked(&p->ip_id_count);
81770+ return id;
81771 }
81772
81773 #endif /* _NET_INETPEER_H */
81774diff --git a/include/net/ip.h b/include/net/ip.h
81775index 5a25f36..2e73203 100644
81776--- a/include/net/ip.h
81777+++ b/include/net/ip.h
81778@@ -219,7 +219,7 @@ static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
81779
81780 void inet_get_local_port_range(struct net *net, int *low, int *high);
81781
81782-extern unsigned long *sysctl_local_reserved_ports;
81783+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
81784 static inline int inet_is_reserved_local_port(int port)
81785 {
81786 return test_bit(port, sysctl_local_reserved_ports);
81787diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
81788index 9922093..a1755d6 100644
81789--- a/include/net/ip_fib.h
81790+++ b/include/net/ip_fib.h
81791@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
81792
81793 #define FIB_RES_SADDR(net, res) \
81794 ((FIB_RES_NH(res).nh_saddr_genid == \
81795- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
81796+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
81797 FIB_RES_NH(res).nh_saddr : \
81798 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
81799 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
81800diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
81801index 5679d92..2e7a690 100644
81802--- a/include/net/ip_vs.h
81803+++ b/include/net/ip_vs.h
81804@@ -558,7 +558,7 @@ struct ip_vs_conn {
81805 struct ip_vs_conn *control; /* Master control connection */
81806 atomic_t n_control; /* Number of controlled ones */
81807 struct ip_vs_dest *dest; /* real server */
81808- atomic_t in_pkts; /* incoming packet counter */
81809+ atomic_unchecked_t in_pkts; /* incoming packet counter */
81810
81811 /* packet transmitter for different forwarding methods. If it
81812 mangles the packet, it must return NF_DROP or better NF_STOLEN,
81813@@ -705,7 +705,7 @@ struct ip_vs_dest {
81814 __be16 port; /* port number of the server */
81815 union nf_inet_addr addr; /* IP address of the server */
81816 volatile unsigned int flags; /* dest status flags */
81817- atomic_t conn_flags; /* flags to copy to conn */
81818+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
81819 atomic_t weight; /* server weight */
81820
81821 atomic_t refcnt; /* reference counter */
81822@@ -960,11 +960,11 @@ struct netns_ipvs {
81823 /* ip_vs_lblc */
81824 int sysctl_lblc_expiration;
81825 struct ctl_table_header *lblc_ctl_header;
81826- struct ctl_table *lblc_ctl_table;
81827+ ctl_table_no_const *lblc_ctl_table;
81828 /* ip_vs_lblcr */
81829 int sysctl_lblcr_expiration;
81830 struct ctl_table_header *lblcr_ctl_header;
81831- struct ctl_table *lblcr_ctl_table;
81832+ ctl_table_no_const *lblcr_ctl_table;
81833 /* ip_vs_est */
81834 struct list_head est_list; /* estimator list */
81835 spinlock_t est_lock;
81836diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
81837index 0224402..dafaf94a 100644
81838--- a/include/net/irda/ircomm_tty.h
81839+++ b/include/net/irda/ircomm_tty.h
81840@@ -35,6 +35,7 @@
81841 #include <linux/termios.h>
81842 #include <linux/timer.h>
81843 #include <linux/tty.h> /* struct tty_struct */
81844+#include <asm/local.h>
81845
81846 #include <net/irda/irias_object.h>
81847 #include <net/irda/ircomm_core.h>
81848diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
81849index 714cc9a..ea05f3e 100644
81850--- a/include/net/iucv/af_iucv.h
81851+++ b/include/net/iucv/af_iucv.h
81852@@ -149,7 +149,7 @@ struct iucv_skb_cb {
81853 struct iucv_sock_list {
81854 struct hlist_head head;
81855 rwlock_t lock;
81856- atomic_t autobind_name;
81857+ atomic_unchecked_t autobind_name;
81858 };
81859
81860 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
81861diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
81862index f3be818..bf46196 100644
81863--- a/include/net/llc_c_ac.h
81864+++ b/include/net/llc_c_ac.h
81865@@ -87,7 +87,7 @@
81866 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
81867 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
81868
81869-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
81870+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
81871
81872 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
81873 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
81874diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
81875index 3948cf1..83b28c4 100644
81876--- a/include/net/llc_c_ev.h
81877+++ b/include/net/llc_c_ev.h
81878@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
81879 return (struct llc_conn_state_ev *)skb->cb;
81880 }
81881
81882-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
81883-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
81884+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
81885+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
81886
81887 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
81888 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
81889diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
81890index 0e79cfb..f46db31 100644
81891--- a/include/net/llc_c_st.h
81892+++ b/include/net/llc_c_st.h
81893@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
81894 u8 next_state;
81895 llc_conn_ev_qfyr_t *ev_qualifiers;
81896 llc_conn_action_t *ev_actions;
81897-};
81898+} __do_const;
81899
81900 struct llc_conn_state {
81901 u8 current_state;
81902diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
81903index a61b98c..aade1eb 100644
81904--- a/include/net/llc_s_ac.h
81905+++ b/include/net/llc_s_ac.h
81906@@ -23,7 +23,7 @@
81907 #define SAP_ACT_TEST_IND 9
81908
81909 /* All action functions must look like this */
81910-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
81911+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
81912
81913 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
81914 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
81915diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
81916index 567c681..cd73ac0 100644
81917--- a/include/net/llc_s_st.h
81918+++ b/include/net/llc_s_st.h
81919@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
81920 llc_sap_ev_t ev;
81921 u8 next_state;
81922 llc_sap_action_t *ev_actions;
81923-};
81924+} __do_const;
81925
81926 struct llc_sap_state {
81927 u8 curr_state;
81928diff --git a/include/net/mac80211.h b/include/net/mac80211.h
81929index 7ceed99..d3ffaa2 100644
81930--- a/include/net/mac80211.h
81931+++ b/include/net/mac80211.h
81932@@ -4407,7 +4407,7 @@ struct rate_control_ops {
81933 void (*add_sta_debugfs)(void *priv, void *priv_sta,
81934 struct dentry *dir);
81935 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
81936-};
81937+} __do_const;
81938
81939 static inline int rate_supported(struct ieee80211_sta *sta,
81940 enum ieee80211_band band,
81941diff --git a/include/net/neighbour.h b/include/net/neighbour.h
81942index 536501a..74ad02bc 100644
81943--- a/include/net/neighbour.h
81944+++ b/include/net/neighbour.h
81945@@ -123,7 +123,7 @@ struct neigh_ops {
81946 void (*error_report)(struct neighbour *, struct sk_buff *);
81947 int (*output)(struct neighbour *, struct sk_buff *);
81948 int (*connected_output)(struct neighbour *, struct sk_buff *);
81949-};
81950+} __do_const;
81951
81952 struct pneigh_entry {
81953 struct pneigh_entry *next;
81954@@ -178,7 +178,7 @@ struct neigh_table {
81955 struct neigh_statistics __percpu *stats;
81956 struct neigh_hash_table __rcu *nht;
81957 struct pneigh_entry **phash_buckets;
81958-};
81959+} __randomize_layout;
81960
81961 #define NEIGH_PRIV_ALIGN sizeof(long long)
81962 #define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN)
81963diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
81964index da68c9a..c4a0720 100644
81965--- a/include/net/net_namespace.h
81966+++ b/include/net/net_namespace.h
81967@@ -124,8 +124,8 @@ struct net {
81968 struct netns_ipvs *ipvs;
81969 #endif
81970 struct sock *diag_nlsk;
81971- atomic_t fnhe_genid;
81972-};
81973+ atomic_unchecked_t fnhe_genid;
81974+} __randomize_layout;
81975
81976 /*
81977 * ifindex generation is per-net namespace, and loopback is
81978@@ -281,7 +281,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
81979 #define __net_init __init
81980 #define __net_exit __exit_refok
81981 #define __net_initdata __initdata
81982+#ifdef CONSTIFY_PLUGIN
81983 #define __net_initconst __initconst
81984+#else
81985+#define __net_initconst __initdata
81986+#endif
81987 #endif
81988
81989 struct pernet_operations {
81990@@ -291,7 +295,7 @@ struct pernet_operations {
81991 void (*exit_batch)(struct list_head *net_exit_list);
81992 int *id;
81993 size_t size;
81994-};
81995+} __do_const;
81996
81997 /*
81998 * Use these carefully. If you implement a network device and it
81999@@ -339,23 +343,23 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
82000
82001 static inline int rt_genid_ipv4(struct net *net)
82002 {
82003- return atomic_read(&net->ipv4.rt_genid);
82004+ return atomic_read_unchecked(&net->ipv4.rt_genid);
82005 }
82006
82007 static inline void rt_genid_bump_ipv4(struct net *net)
82008 {
82009- atomic_inc(&net->ipv4.rt_genid);
82010+ atomic_inc_unchecked(&net->ipv4.rt_genid);
82011 }
82012
82013 #if IS_ENABLED(CONFIG_IPV6)
82014 static inline int rt_genid_ipv6(struct net *net)
82015 {
82016- return atomic_read(&net->ipv6.rt_genid);
82017+ return atomic_read_unchecked(&net->ipv6.rt_genid);
82018 }
82019
82020 static inline void rt_genid_bump_ipv6(struct net *net)
82021 {
82022- atomic_inc(&net->ipv6.rt_genid);
82023+ atomic_inc_unchecked(&net->ipv6.rt_genid);
82024 }
82025 #else
82026 static inline int rt_genid_ipv6(struct net *net)
82027@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
82028
82029 static inline int fnhe_genid(struct net *net)
82030 {
82031- return atomic_read(&net->fnhe_genid);
82032+ return atomic_read_unchecked(&net->fnhe_genid);
82033 }
82034
82035 static inline void fnhe_genid_bump(struct net *net)
82036 {
82037- atomic_inc(&net->fnhe_genid);
82038+ atomic_inc_unchecked(&net->fnhe_genid);
82039 }
82040
82041 #endif /* __NET_NET_NAMESPACE_H */
82042diff --git a/include/net/netdma.h b/include/net/netdma.h
82043index 8ba8ce2..99b7fff 100644
82044--- a/include/net/netdma.h
82045+++ b/include/net/netdma.h
82046@@ -24,7 +24,7 @@
82047 #include <linux/dmaengine.h>
82048 #include <linux/skbuff.h>
82049
82050-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
82051+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
82052 struct sk_buff *skb, int offset, struct iovec *to,
82053 size_t len, struct dma_pinned_list *pinned_list);
82054
82055diff --git a/include/net/netlink.h b/include/net/netlink.h
82056index 2b47eaa..6d5bcc2 100644
82057--- a/include/net/netlink.h
82058+++ b/include/net/netlink.h
82059@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
82060 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
82061 {
82062 if (mark)
82063- skb_trim(skb, (unsigned char *) mark - skb->data);
82064+ skb_trim(skb, (const unsigned char *) mark - skb->data);
82065 }
82066
82067 /**
82068diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
82069index c9c0c53..53f24c3 100644
82070--- a/include/net/netns/conntrack.h
82071+++ b/include/net/netns/conntrack.h
82072@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
82073 struct nf_proto_net {
82074 #ifdef CONFIG_SYSCTL
82075 struct ctl_table_header *ctl_table_header;
82076- struct ctl_table *ctl_table;
82077+ ctl_table_no_const *ctl_table;
82078 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
82079 struct ctl_table_header *ctl_compat_header;
82080- struct ctl_table *ctl_compat_table;
82081+ ctl_table_no_const *ctl_compat_table;
82082 #endif
82083 #endif
82084 unsigned int users;
82085@@ -58,7 +58,7 @@ struct nf_ip_net {
82086 struct nf_icmp_net icmpv6;
82087 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
82088 struct ctl_table_header *ctl_table_header;
82089- struct ctl_table *ctl_table;
82090+ ctl_table_no_const *ctl_table;
82091 #endif
82092 };
82093
82094diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
82095index ee520cb..9a0fd88 100644
82096--- a/include/net/netns/ipv4.h
82097+++ b/include/net/netns/ipv4.h
82098@@ -72,7 +72,7 @@ struct netns_ipv4 {
82099
82100 kgid_t sysctl_ping_group_range[2];
82101
82102- atomic_t dev_addr_genid;
82103+ atomic_unchecked_t dev_addr_genid;
82104
82105 #ifdef CONFIG_IP_MROUTE
82106 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
82107@@ -82,6 +82,6 @@ struct netns_ipv4 {
82108 struct fib_rules_ops *mr_rules_ops;
82109 #endif
82110 #endif
82111- atomic_t rt_genid;
82112+ atomic_unchecked_t rt_genid;
82113 };
82114 #endif
82115diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
82116index 0fb2401..477d81c 100644
82117--- a/include/net/netns/ipv6.h
82118+++ b/include/net/netns/ipv6.h
82119@@ -71,8 +71,8 @@ struct netns_ipv6 {
82120 struct fib_rules_ops *mr6_rules_ops;
82121 #endif
82122 #endif
82123- atomic_t dev_addr_genid;
82124- atomic_t rt_genid;
82125+ atomic_unchecked_t dev_addr_genid;
82126+ atomic_unchecked_t rt_genid;
82127 };
82128
82129 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
82130diff --git a/include/net/ping.h b/include/net/ping.h
82131index 90f4841..74446a8 100644
82132--- a/include/net/ping.h
82133+++ b/include/net/ping.h
82134@@ -56,7 +56,7 @@ struct ping_iter_state {
82135 extern struct proto ping_prot;
82136 extern struct ping_table ping_table;
82137 #if IS_ENABLED(CONFIG_IPV6)
82138-extern struct pingv6_ops pingv6_ops;
82139+extern struct pingv6_ops *pingv6_ops;
82140 #endif
82141
82142 struct pingfakehdr {
82143diff --git a/include/net/protocol.h b/include/net/protocol.h
82144index fbf7676..a5e21c3 100644
82145--- a/include/net/protocol.h
82146+++ b/include/net/protocol.h
82147@@ -44,7 +44,7 @@ struct net_protocol {
82148 void (*err_handler)(struct sk_buff *skb, u32 info);
82149 unsigned int no_policy:1,
82150 netns_ok:1;
82151-};
82152+} __do_const;
82153
82154 #if IS_ENABLED(CONFIG_IPV6)
82155 struct inet6_protocol {
82156@@ -57,7 +57,7 @@ struct inet6_protocol {
82157 u8 type, u8 code, int offset,
82158 __be32 info);
82159 unsigned int flags; /* INET6_PROTO_xxx */
82160-};
82161+} __do_const;
82162
82163 #define INET6_PROTO_NOPOLICY 0x1
82164 #define INET6_PROTO_FINAL 0x2
82165diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
82166index bb13a18..e734116 100644
82167--- a/include/net/rtnetlink.h
82168+++ b/include/net/rtnetlink.h
82169@@ -79,7 +79,7 @@ struct rtnl_link_ops {
82170 const struct net_device *dev);
82171 unsigned int (*get_num_tx_queues)(void);
82172 unsigned int (*get_num_rx_queues)(void);
82173-};
82174+} __do_const;
82175
82176 int __rtnl_link_register(struct rtnl_link_ops *ops);
82177 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
82178diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
82179index 6bd44fe..96f364e 100644
82180--- a/include/net/sctp/checksum.h
82181+++ b/include/net/sctp/checksum.h
82182@@ -62,8 +62,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
82183 unsigned int offset)
82184 {
82185 struct sctphdr *sh = sctp_hdr(skb);
82186- __le32 ret, old = sh->checksum;
82187- const struct skb_checksum_ops ops = {
82188+ __le32 ret, old = sh->checksum;
82189+ static const struct skb_checksum_ops ops = {
82190 .update = sctp_csum_update,
82191 .combine = sctp_csum_combine,
82192 };
82193diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
82194index 4ef75af..5aa073a 100644
82195--- a/include/net/sctp/sm.h
82196+++ b/include/net/sctp/sm.h
82197@@ -81,7 +81,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
82198 typedef struct {
82199 sctp_state_fn_t *fn;
82200 const char *name;
82201-} sctp_sm_table_entry_t;
82202+} __do_const sctp_sm_table_entry_t;
82203
82204 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
82205 * currently in use.
82206@@ -293,7 +293,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
82207 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
82208
82209 /* Extern declarations for major data structures. */
82210-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
82211+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
82212
82213
82214 /* Get the size of a DATA chunk payload. */
82215diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
82216index 0a248b3..4dcbe5c 100644
82217--- a/include/net/sctp/structs.h
82218+++ b/include/net/sctp/structs.h
82219@@ -508,7 +508,7 @@ struct sctp_pf {
82220 struct sctp_association *asoc);
82221 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
82222 struct sctp_af *af;
82223-};
82224+} __do_const;
82225
82226
82227 /* Structure to track chunk fragments that have been acked, but peer
82228diff --git a/include/net/sock.h b/include/net/sock.h
82229index 2ef3c3e..e02013e 100644
82230--- a/include/net/sock.h
82231+++ b/include/net/sock.h
82232@@ -348,7 +348,7 @@ struct sock {
82233 unsigned int sk_napi_id;
82234 unsigned int sk_ll_usec;
82235 #endif
82236- atomic_t sk_drops;
82237+ atomic_unchecked_t sk_drops;
82238 int sk_rcvbuf;
82239
82240 struct sk_filter __rcu *sk_filter;
82241@@ -1209,7 +1209,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
82242 return ret >> PAGE_SHIFT;
82243 }
82244
82245-static inline long
82246+static inline long __intentional_overflow(-1)
82247 sk_memory_allocated(const struct sock *sk)
82248 {
82249 struct proto *prot = sk->sk_prot;
82250@@ -1813,7 +1813,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
82251 }
82252
82253 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
82254- char __user *from, char *to,
82255+ char __user *from, unsigned char *to,
82256 int copy, int offset)
82257 {
82258 if (skb->ip_summed == CHECKSUM_NONE) {
82259@@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
82260 }
82261 }
82262
82263-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
82264+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
82265
82266 /**
82267 * sk_page_frag - return an appropriate page_frag
82268diff --git a/include/net/tcp.h b/include/net/tcp.h
82269index 70e55d2..c5d8d53 100644
82270--- a/include/net/tcp.h
82271+++ b/include/net/tcp.h
82272@@ -540,7 +540,7 @@ void tcp_retransmit_timer(struct sock *sk);
82273 void tcp_xmit_retransmit_queue(struct sock *);
82274 void tcp_simple_retransmit(struct sock *);
82275 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
82276-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
82277+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
82278
82279 void tcp_send_probe0(struct sock *);
82280 void tcp_send_partial(struct sock *);
82281@@ -711,8 +711,8 @@ struct tcp_skb_cb {
82282 struct inet6_skb_parm h6;
82283 #endif
82284 } header; /* For incoming frames */
82285- __u32 seq; /* Starting sequence number */
82286- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
82287+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
82288+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
82289 __u32 when; /* used to compute rtt's */
82290 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
82291
82292@@ -726,7 +726,7 @@ struct tcp_skb_cb {
82293
82294 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
82295 /* 1 byte hole */
82296- __u32 ack_seq; /* Sequence number ACK'd */
82297+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
82298 };
82299
82300 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
82301diff --git a/include/net/xfrm.h b/include/net/xfrm.h
82302index 6b82fdf..14d74d2 100644
82303--- a/include/net/xfrm.h
82304+++ b/include/net/xfrm.h
82305@@ -287,7 +287,6 @@ struct xfrm_dst;
82306 struct xfrm_policy_afinfo {
82307 unsigned short family;
82308 struct dst_ops *dst_ops;
82309- void (*garbage_collect)(struct net *net);
82310 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
82311 const xfrm_address_t *saddr,
82312 const xfrm_address_t *daddr);
82313@@ -305,7 +304,7 @@ struct xfrm_policy_afinfo {
82314 struct net_device *dev,
82315 const struct flowi *fl);
82316 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
82317-};
82318+} __do_const;
82319
82320 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
82321 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
82322@@ -344,7 +343,7 @@ struct xfrm_state_afinfo {
82323 int (*transport_finish)(struct sk_buff *skb,
82324 int async);
82325 void (*local_error)(struct sk_buff *skb, u32 mtu);
82326-};
82327+} __do_const;
82328
82329 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
82330 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
82331@@ -429,7 +428,7 @@ struct xfrm_mode {
82332 struct module *owner;
82333 unsigned int encap;
82334 int flags;
82335-};
82336+} __do_const;
82337
82338 /* Flags for xfrm_mode. */
82339 enum {
82340@@ -526,7 +525,7 @@ struct xfrm_policy {
82341 struct timer_list timer;
82342
82343 struct flow_cache_object flo;
82344- atomic_t genid;
82345+ atomic_unchecked_t genid;
82346 u32 priority;
82347 u32 index;
82348 struct xfrm_mark mark;
82349@@ -1166,6 +1165,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
82350 }
82351
82352 void xfrm_garbage_collect(struct net *net);
82353+void xfrm_garbage_collect_deferred(struct net *net);
82354
82355 #else
82356
82357@@ -1204,6 +1204,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
82358 static inline void xfrm_garbage_collect(struct net *net)
82359 {
82360 }
82361+static inline void xfrm_garbage_collect_deferred(struct net *net)
82362+{
82363+}
82364 #endif
82365
82366 static __inline__
82367diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
82368index 1017e0b..227aa4d 100644
82369--- a/include/rdma/iw_cm.h
82370+++ b/include/rdma/iw_cm.h
82371@@ -122,7 +122,7 @@ struct iw_cm_verbs {
82372 int backlog);
82373
82374 int (*destroy_listen)(struct iw_cm_id *cm_id);
82375-};
82376+} __no_const;
82377
82378 /**
82379 * iw_create_cm_id - Create an IW CM identifier.
82380diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
82381index 52beadf..598734c 100644
82382--- a/include/scsi/libfc.h
82383+++ b/include/scsi/libfc.h
82384@@ -771,6 +771,7 @@ struct libfc_function_template {
82385 */
82386 void (*disc_stop_final) (struct fc_lport *);
82387 };
82388+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
82389
82390 /**
82391 * struct fc_disc - Discovery context
82392@@ -875,7 +876,7 @@ struct fc_lport {
82393 struct fc_vport *vport;
82394
82395 /* Operational Information */
82396- struct libfc_function_template tt;
82397+ libfc_function_template_no_const tt;
82398 u8 link_up;
82399 u8 qfull;
82400 enum fc_lport_state state;
82401diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
82402index d65fbec..f80fef2 100644
82403--- a/include/scsi/scsi_device.h
82404+++ b/include/scsi/scsi_device.h
82405@@ -180,9 +180,9 @@ struct scsi_device {
82406 unsigned int max_device_blocked; /* what device_blocked counts down from */
82407 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
82408
82409- atomic_t iorequest_cnt;
82410- atomic_t iodone_cnt;
82411- atomic_t ioerr_cnt;
82412+ atomic_unchecked_t iorequest_cnt;
82413+ atomic_unchecked_t iodone_cnt;
82414+ atomic_unchecked_t ioerr_cnt;
82415
82416 struct device sdev_gendev,
82417 sdev_dev;
82418diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
82419index b797e8f..8e2c3aa 100644
82420--- a/include/scsi/scsi_transport_fc.h
82421+++ b/include/scsi/scsi_transport_fc.h
82422@@ -751,7 +751,8 @@ struct fc_function_template {
82423 unsigned long show_host_system_hostname:1;
82424
82425 unsigned long disable_target_scan:1;
82426-};
82427+} __do_const;
82428+typedef struct fc_function_template __no_const fc_function_template_no_const;
82429
82430
82431 /**
82432diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
82433index ae6c3b8..fd748ac 100644
82434--- a/include/sound/compress_driver.h
82435+++ b/include/sound/compress_driver.h
82436@@ -128,7 +128,7 @@ struct snd_compr_ops {
82437 struct snd_compr_caps *caps);
82438 int (*get_codec_caps) (struct snd_compr_stream *stream,
82439 struct snd_compr_codec_caps *codec);
82440-};
82441+} __no_const;
82442
82443 /**
82444 * struct snd_compr: Compressed device
82445diff --git a/include/sound/soc.h b/include/sound/soc.h
82446index 1f741cb..8cefc08 100644
82447--- a/include/sound/soc.h
82448+++ b/include/sound/soc.h
82449@@ -763,7 +763,7 @@ struct snd_soc_codec_driver {
82450 /* probe ordering - for components with runtime dependencies */
82451 int probe_order;
82452 int remove_order;
82453-};
82454+} __do_const;
82455
82456 /* SoC platform interface */
82457 struct snd_soc_platform_driver {
82458@@ -809,7 +809,7 @@ struct snd_soc_platform_driver {
82459 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
82460 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
82461 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
82462-};
82463+} __do_const;
82464
82465 struct snd_soc_platform {
82466 const char *name;
82467diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
82468index 321301c..2ae5cb0 100644
82469--- a/include/target/target_core_base.h
82470+++ b/include/target/target_core_base.h
82471@@ -687,7 +687,7 @@ struct se_device {
82472 atomic_long_t write_bytes;
82473 /* Active commands on this virtual SE device */
82474 atomic_t simple_cmds;
82475- atomic_t dev_ordered_id;
82476+ atomic_unchecked_t dev_ordered_id;
82477 atomic_t dev_ordered_sync;
82478 atomic_t dev_qf_count;
82479 int export_count;
82480diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
82481new file mode 100644
82482index 0000000..fb634b7
82483--- /dev/null
82484+++ b/include/trace/events/fs.h
82485@@ -0,0 +1,53 @@
82486+#undef TRACE_SYSTEM
82487+#define TRACE_SYSTEM fs
82488+
82489+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
82490+#define _TRACE_FS_H
82491+
82492+#include <linux/fs.h>
82493+#include <linux/tracepoint.h>
82494+
82495+TRACE_EVENT(do_sys_open,
82496+
82497+ TP_PROTO(const char *filename, int flags, int mode),
82498+
82499+ TP_ARGS(filename, flags, mode),
82500+
82501+ TP_STRUCT__entry(
82502+ __string( filename, filename )
82503+ __field( int, flags )
82504+ __field( int, mode )
82505+ ),
82506+
82507+ TP_fast_assign(
82508+ __assign_str(filename, filename);
82509+ __entry->flags = flags;
82510+ __entry->mode = mode;
82511+ ),
82512+
82513+ TP_printk("\"%s\" %x %o",
82514+ __get_str(filename), __entry->flags, __entry->mode)
82515+);
82516+
82517+TRACE_EVENT(open_exec,
82518+
82519+ TP_PROTO(const char *filename),
82520+
82521+ TP_ARGS(filename),
82522+
82523+ TP_STRUCT__entry(
82524+ __string( filename, filename )
82525+ ),
82526+
82527+ TP_fast_assign(
82528+ __assign_str(filename, filename);
82529+ ),
82530+
82531+ TP_printk("\"%s\"",
82532+ __get_str(filename))
82533+);
82534+
82535+#endif /* _TRACE_FS_H */
82536+
82537+/* This part must be outside protection */
82538+#include <trace/define_trace.h>
82539diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
82540index 1c09820..7f5ec79 100644
82541--- a/include/trace/events/irq.h
82542+++ b/include/trace/events/irq.h
82543@@ -36,7 +36,7 @@ struct softirq_action;
82544 */
82545 TRACE_EVENT(irq_handler_entry,
82546
82547- TP_PROTO(int irq, struct irqaction *action),
82548+ TP_PROTO(int irq, const struct irqaction *action),
82549
82550 TP_ARGS(irq, action),
82551
82552@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
82553 */
82554 TRACE_EVENT(irq_handler_exit,
82555
82556- TP_PROTO(int irq, struct irqaction *action, int ret),
82557+ TP_PROTO(int irq, const struct irqaction *action, int ret),
82558
82559 TP_ARGS(irq, action, ret),
82560
82561diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
82562index 7caf44c..23c6f27 100644
82563--- a/include/uapi/linux/a.out.h
82564+++ b/include/uapi/linux/a.out.h
82565@@ -39,6 +39,14 @@ enum machine_type {
82566 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
82567 };
82568
82569+/* Constants for the N_FLAGS field */
82570+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
82571+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
82572+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
82573+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
82574+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
82575+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
82576+
82577 #if !defined (N_MAGIC)
82578 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
82579 #endif
82580diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
82581index d876736..ccce5c0 100644
82582--- a/include/uapi/linux/byteorder/little_endian.h
82583+++ b/include/uapi/linux/byteorder/little_endian.h
82584@@ -42,51 +42,51 @@
82585
82586 static inline __le64 __cpu_to_le64p(const __u64 *p)
82587 {
82588- return (__force __le64)*p;
82589+ return (__force const __le64)*p;
82590 }
82591-static inline __u64 __le64_to_cpup(const __le64 *p)
82592+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
82593 {
82594- return (__force __u64)*p;
82595+ return (__force const __u64)*p;
82596 }
82597 static inline __le32 __cpu_to_le32p(const __u32 *p)
82598 {
82599- return (__force __le32)*p;
82600+ return (__force const __le32)*p;
82601 }
82602 static inline __u32 __le32_to_cpup(const __le32 *p)
82603 {
82604- return (__force __u32)*p;
82605+ return (__force const __u32)*p;
82606 }
82607 static inline __le16 __cpu_to_le16p(const __u16 *p)
82608 {
82609- return (__force __le16)*p;
82610+ return (__force const __le16)*p;
82611 }
82612 static inline __u16 __le16_to_cpup(const __le16 *p)
82613 {
82614- return (__force __u16)*p;
82615+ return (__force const __u16)*p;
82616 }
82617 static inline __be64 __cpu_to_be64p(const __u64 *p)
82618 {
82619- return (__force __be64)__swab64p(p);
82620+ return (__force const __be64)__swab64p(p);
82621 }
82622 static inline __u64 __be64_to_cpup(const __be64 *p)
82623 {
82624- return __swab64p((__u64 *)p);
82625+ return __swab64p((const __u64 *)p);
82626 }
82627 static inline __be32 __cpu_to_be32p(const __u32 *p)
82628 {
82629- return (__force __be32)__swab32p(p);
82630+ return (__force const __be32)__swab32p(p);
82631 }
82632-static inline __u32 __be32_to_cpup(const __be32 *p)
82633+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
82634 {
82635- return __swab32p((__u32 *)p);
82636+ return __swab32p((const __u32 *)p);
82637 }
82638 static inline __be16 __cpu_to_be16p(const __u16 *p)
82639 {
82640- return (__force __be16)__swab16p(p);
82641+ return (__force const __be16)__swab16p(p);
82642 }
82643 static inline __u16 __be16_to_cpup(const __be16 *p)
82644 {
82645- return __swab16p((__u16 *)p);
82646+ return __swab16p((const __u16 *)p);
82647 }
82648 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
82649 #define __le64_to_cpus(x) do { (void)(x); } while (0)
82650diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
82651index ef6103b..d4e65dd 100644
82652--- a/include/uapi/linux/elf.h
82653+++ b/include/uapi/linux/elf.h
82654@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
82655 #define PT_GNU_EH_FRAME 0x6474e550
82656
82657 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
82658+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
82659+
82660+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
82661+
82662+/* Constants for the e_flags field */
82663+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
82664+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
82665+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
82666+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
82667+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
82668+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
82669
82670 /*
82671 * Extended Numbering
82672@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
82673 #define DT_DEBUG 21
82674 #define DT_TEXTREL 22
82675 #define DT_JMPREL 23
82676+#define DT_FLAGS 30
82677+ #define DF_TEXTREL 0x00000004
82678 #define DT_ENCODING 32
82679 #define OLD_DT_LOOS 0x60000000
82680 #define DT_LOOS 0x6000000d
82681@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
82682 #define PF_W 0x2
82683 #define PF_X 0x1
82684
82685+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
82686+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
82687+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
82688+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
82689+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
82690+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
82691+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
82692+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
82693+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
82694+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
82695+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
82696+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
82697+
82698 typedef struct elf32_phdr{
82699 Elf32_Word p_type;
82700 Elf32_Off p_offset;
82701@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
82702 #define EI_OSABI 7
82703 #define EI_PAD 8
82704
82705+#define EI_PAX 14
82706+
82707 #define ELFMAG0 0x7f /* EI_MAG */
82708 #define ELFMAG1 'E'
82709 #define ELFMAG2 'L'
82710diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
82711index aa169c4..6a2771d 100644
82712--- a/include/uapi/linux/personality.h
82713+++ b/include/uapi/linux/personality.h
82714@@ -30,6 +30,7 @@ enum {
82715 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
82716 ADDR_NO_RANDOMIZE | \
82717 ADDR_COMPAT_LAYOUT | \
82718+ ADDR_LIMIT_3GB | \
82719 MMAP_PAGE_ZERO)
82720
82721 /*
82722diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
82723index 7530e74..e714828 100644
82724--- a/include/uapi/linux/screen_info.h
82725+++ b/include/uapi/linux/screen_info.h
82726@@ -43,7 +43,8 @@ struct screen_info {
82727 __u16 pages; /* 0x32 */
82728 __u16 vesa_attributes; /* 0x34 */
82729 __u32 capabilities; /* 0x36 */
82730- __u8 _reserved[6]; /* 0x3a */
82731+ __u16 vesapm_size; /* 0x3a */
82732+ __u8 _reserved[4]; /* 0x3c */
82733 } __attribute__((packed));
82734
82735 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
82736diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
82737index 0e011eb..82681b1 100644
82738--- a/include/uapi/linux/swab.h
82739+++ b/include/uapi/linux/swab.h
82740@@ -43,7 +43,7 @@
82741 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
82742 */
82743
82744-static inline __attribute_const__ __u16 __fswab16(__u16 val)
82745+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
82746 {
82747 #ifdef __HAVE_BUILTIN_BSWAP16__
82748 return __builtin_bswap16(val);
82749@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
82750 #endif
82751 }
82752
82753-static inline __attribute_const__ __u32 __fswab32(__u32 val)
82754+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
82755 {
82756 #ifdef __HAVE_BUILTIN_BSWAP32__
82757 return __builtin_bswap32(val);
82758@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
82759 #endif
82760 }
82761
82762-static inline __attribute_const__ __u64 __fswab64(__u64 val)
82763+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
82764 {
82765 #ifdef __HAVE_BUILTIN_BSWAP64__
82766 return __builtin_bswap64(val);
82767diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
82768index 6d67213..552fdd9 100644
82769--- a/include/uapi/linux/sysctl.h
82770+++ b/include/uapi/linux/sysctl.h
82771@@ -155,8 +155,6 @@ enum
82772 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
82773 };
82774
82775-
82776-
82777 /* CTL_VM names: */
82778 enum
82779 {
82780diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
82781index 437f1b0..0eeb38d 100644
82782--- a/include/uapi/linux/videodev2.h
82783+++ b/include/uapi/linux/videodev2.h
82784@@ -1227,7 +1227,7 @@ struct v4l2_ext_control {
82785 union {
82786 __s32 value;
82787 __s64 value64;
82788- char *string;
82789+ char __user *string;
82790 };
82791 } __attribute__ ((packed));
82792
82793diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
82794index e4629b9..6958086 100644
82795--- a/include/uapi/linux/xattr.h
82796+++ b/include/uapi/linux/xattr.h
82797@@ -63,5 +63,9 @@
82798 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
82799 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
82800
82801+/* User namespace */
82802+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
82803+#define XATTR_PAX_FLAGS_SUFFIX "flags"
82804+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
82805
82806 #endif /* _UAPI_LINUX_XATTR_H */
82807diff --git a/include/video/udlfb.h b/include/video/udlfb.h
82808index f9466fa..f4e2b81 100644
82809--- a/include/video/udlfb.h
82810+++ b/include/video/udlfb.h
82811@@ -53,10 +53,10 @@ struct dlfb_data {
82812 u32 pseudo_palette[256];
82813 int blank_mode; /*one of FB_BLANK_ */
82814 /* blit-only rendering path metrics, exposed through sysfs */
82815- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
82816- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
82817- atomic_t bytes_sent; /* to usb, after compression including overhead */
82818- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
82819+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
82820+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
82821+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
82822+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
82823 };
82824
82825 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
82826diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
82827index 30f5362..8ed8ac9 100644
82828--- a/include/video/uvesafb.h
82829+++ b/include/video/uvesafb.h
82830@@ -122,6 +122,7 @@ struct uvesafb_par {
82831 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
82832 u8 pmi_setpal; /* PMI for palette changes */
82833 u16 *pmi_base; /* protected mode interface location */
82834+ u8 *pmi_code; /* protected mode code location */
82835 void *pmi_start;
82836 void *pmi_pal;
82837 u8 *vbe_state_orig; /*
82838diff --git a/init/Kconfig b/init/Kconfig
82839index 4e5d96a..93cd8a1 100644
82840--- a/init/Kconfig
82841+++ b/init/Kconfig
82842@@ -1079,6 +1079,7 @@ endif # CGROUPS
82843
82844 config CHECKPOINT_RESTORE
82845 bool "Checkpoint/restore support" if EXPERT
82846+ depends on !GRKERNSEC
82847 default n
82848 help
82849 Enables additional kernel features in a sake of checkpoint/restore.
82850@@ -1550,7 +1551,7 @@ config SLUB_DEBUG
82851
82852 config COMPAT_BRK
82853 bool "Disable heap randomization"
82854- default y
82855+ default n
82856 help
82857 Randomizing heap placement makes heap exploits harder, but it
82858 also breaks ancient binaries (including anything libc5 based).
82859@@ -1838,7 +1839,7 @@ config INIT_ALL_POSSIBLE
82860 config STOP_MACHINE
82861 bool
82862 default y
82863- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
82864+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
82865 help
82866 Need stop_machine() primitive.
82867
82868diff --git a/init/Makefile b/init/Makefile
82869index 7bc47ee..6da2dc7 100644
82870--- a/init/Makefile
82871+++ b/init/Makefile
82872@@ -2,6 +2,9 @@
82873 # Makefile for the linux kernel.
82874 #
82875
82876+ccflags-y := $(GCC_PLUGINS_CFLAGS)
82877+asflags-y := $(GCC_PLUGINS_AFLAGS)
82878+
82879 obj-y := main.o version.o mounts.o
82880 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
82881 obj-y += noinitramfs.o
82882diff --git a/init/do_mounts.c b/init/do_mounts.c
82883index 8e5addc..c96ea61 100644
82884--- a/init/do_mounts.c
82885+++ b/init/do_mounts.c
82886@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
82887 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
82888 {
82889 struct super_block *s;
82890- int err = sys_mount(name, "/root", fs, flags, data);
82891+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
82892 if (err)
82893 return err;
82894
82895- sys_chdir("/root");
82896+ sys_chdir((const char __force_user *)"/root");
82897 s = current->fs->pwd.dentry->d_sb;
82898 ROOT_DEV = s->s_dev;
82899 printk(KERN_INFO
82900@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
82901 va_start(args, fmt);
82902 vsprintf(buf, fmt, args);
82903 va_end(args);
82904- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
82905+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
82906 if (fd >= 0) {
82907 sys_ioctl(fd, FDEJECT, 0);
82908 sys_close(fd);
82909 }
82910 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
82911- fd = sys_open("/dev/console", O_RDWR, 0);
82912+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
82913 if (fd >= 0) {
82914 sys_ioctl(fd, TCGETS, (long)&termios);
82915 termios.c_lflag &= ~ICANON;
82916 sys_ioctl(fd, TCSETSF, (long)&termios);
82917- sys_read(fd, &c, 1);
82918+ sys_read(fd, (char __user *)&c, 1);
82919 termios.c_lflag |= ICANON;
82920 sys_ioctl(fd, TCSETSF, (long)&termios);
82921 sys_close(fd);
82922@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
82923 mount_root();
82924 out:
82925 devtmpfs_mount("dev");
82926- sys_mount(".", "/", NULL, MS_MOVE, NULL);
82927- sys_chroot(".");
82928+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
82929+ sys_chroot((const char __force_user *)".");
82930 }
82931
82932 static bool is_tmpfs;
82933diff --git a/init/do_mounts.h b/init/do_mounts.h
82934index f5b978a..69dbfe8 100644
82935--- a/init/do_mounts.h
82936+++ b/init/do_mounts.h
82937@@ -15,15 +15,15 @@ extern int root_mountflags;
82938
82939 static inline int create_dev(char *name, dev_t dev)
82940 {
82941- sys_unlink(name);
82942- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
82943+ sys_unlink((char __force_user *)name);
82944+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
82945 }
82946
82947 #if BITS_PER_LONG == 32
82948 static inline u32 bstat(char *name)
82949 {
82950 struct stat64 stat;
82951- if (sys_stat64(name, &stat) != 0)
82952+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
82953 return 0;
82954 if (!S_ISBLK(stat.st_mode))
82955 return 0;
82956@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
82957 static inline u32 bstat(char *name)
82958 {
82959 struct stat stat;
82960- if (sys_newstat(name, &stat) != 0)
82961+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
82962 return 0;
82963 if (!S_ISBLK(stat.st_mode))
82964 return 0;
82965diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
82966index 3e0878e..8a9d7a0 100644
82967--- a/init/do_mounts_initrd.c
82968+++ b/init/do_mounts_initrd.c
82969@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
82970 {
82971 sys_unshare(CLONE_FS | CLONE_FILES);
82972 /* stdin/stdout/stderr for /linuxrc */
82973- sys_open("/dev/console", O_RDWR, 0);
82974+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
82975 sys_dup(0);
82976 sys_dup(0);
82977 /* move initrd over / and chdir/chroot in initrd root */
82978- sys_chdir("/root");
82979- sys_mount(".", "/", NULL, MS_MOVE, NULL);
82980- sys_chroot(".");
82981+ sys_chdir((const char __force_user *)"/root");
82982+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
82983+ sys_chroot((const char __force_user *)".");
82984 sys_setsid();
82985 return 0;
82986 }
82987@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
82988 create_dev("/dev/root.old", Root_RAM0);
82989 /* mount initrd on rootfs' /root */
82990 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
82991- sys_mkdir("/old", 0700);
82992- sys_chdir("/old");
82993+ sys_mkdir((const char __force_user *)"/old", 0700);
82994+ sys_chdir((const char __force_user *)"/old");
82995
82996 /* try loading default modules from initrd */
82997 load_default_modules();
82998@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
82999 current->flags &= ~PF_FREEZER_SKIP;
83000
83001 /* move initrd to rootfs' /old */
83002- sys_mount("..", ".", NULL, MS_MOVE, NULL);
83003+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
83004 /* switch root and cwd back to / of rootfs */
83005- sys_chroot("..");
83006+ sys_chroot((const char __force_user *)"..");
83007
83008 if (new_decode_dev(real_root_dev) == Root_RAM0) {
83009- sys_chdir("/old");
83010+ sys_chdir((const char __force_user *)"/old");
83011 return;
83012 }
83013
83014- sys_chdir("/");
83015+ sys_chdir((const char __force_user *)"/");
83016 ROOT_DEV = new_decode_dev(real_root_dev);
83017 mount_root();
83018
83019 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
83020- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
83021+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
83022 if (!error)
83023 printk("okay\n");
83024 else {
83025- int fd = sys_open("/dev/root.old", O_RDWR, 0);
83026+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
83027 if (error == -ENOENT)
83028 printk("/initrd does not exist. Ignored.\n");
83029 else
83030 printk("failed\n");
83031 printk(KERN_NOTICE "Unmounting old root\n");
83032- sys_umount("/old", MNT_DETACH);
83033+ sys_umount((char __force_user *)"/old", MNT_DETACH);
83034 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
83035 if (fd < 0) {
83036 error = fd;
83037@@ -127,11 +127,11 @@ int __init initrd_load(void)
83038 * mounted in the normal path.
83039 */
83040 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
83041- sys_unlink("/initrd.image");
83042+ sys_unlink((const char __force_user *)"/initrd.image");
83043 handle_initrd();
83044 return 1;
83045 }
83046 }
83047- sys_unlink("/initrd.image");
83048+ sys_unlink((const char __force_user *)"/initrd.image");
83049 return 0;
83050 }
83051diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
83052index 8cb6db5..d729f50 100644
83053--- a/init/do_mounts_md.c
83054+++ b/init/do_mounts_md.c
83055@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
83056 partitioned ? "_d" : "", minor,
83057 md_setup_args[ent].device_names);
83058
83059- fd = sys_open(name, 0, 0);
83060+ fd = sys_open((char __force_user *)name, 0, 0);
83061 if (fd < 0) {
83062 printk(KERN_ERR "md: open failed - cannot start "
83063 "array %s\n", name);
83064@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
83065 * array without it
83066 */
83067 sys_close(fd);
83068- fd = sys_open(name, 0, 0);
83069+ fd = sys_open((char __force_user *)name, 0, 0);
83070 sys_ioctl(fd, BLKRRPART, 0);
83071 }
83072 sys_close(fd);
83073@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
83074
83075 wait_for_device_probe();
83076
83077- fd = sys_open("/dev/md0", 0, 0);
83078+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
83079 if (fd >= 0) {
83080 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
83081 sys_close(fd);
83082diff --git a/init/init_task.c b/init/init_task.c
83083index ba0a7f36..2bcf1d5 100644
83084--- a/init/init_task.c
83085+++ b/init/init_task.c
83086@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
83087 * Initial thread structure. Alignment of this is handled by a special
83088 * linker map entry.
83089 */
83090+#ifdef CONFIG_X86
83091+union thread_union init_thread_union __init_task_data;
83092+#else
83093 union thread_union init_thread_union __init_task_data =
83094 { INIT_THREAD_INFO(init_task) };
83095+#endif
83096diff --git a/init/initramfs.c b/init/initramfs.c
83097index a67ef9d..2d17ed9 100644
83098--- a/init/initramfs.c
83099+++ b/init/initramfs.c
83100@@ -84,7 +84,7 @@ static void __init free_hash(void)
83101 }
83102 }
83103
83104-static long __init do_utime(char *filename, time_t mtime)
83105+static long __init do_utime(char __force_user *filename, time_t mtime)
83106 {
83107 struct timespec t[2];
83108
83109@@ -119,7 +119,7 @@ static void __init dir_utime(void)
83110 struct dir_entry *de, *tmp;
83111 list_for_each_entry_safe(de, tmp, &dir_list, list) {
83112 list_del(&de->list);
83113- do_utime(de->name, de->mtime);
83114+ do_utime((char __force_user *)de->name, de->mtime);
83115 kfree(de->name);
83116 kfree(de);
83117 }
83118@@ -281,7 +281,7 @@ static int __init maybe_link(void)
83119 if (nlink >= 2) {
83120 char *old = find_link(major, minor, ino, mode, collected);
83121 if (old)
83122- return (sys_link(old, collected) < 0) ? -1 : 1;
83123+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
83124 }
83125 return 0;
83126 }
83127@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
83128 {
83129 struct stat st;
83130
83131- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
83132+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
83133 if (S_ISDIR(st.st_mode))
83134- sys_rmdir(path);
83135+ sys_rmdir((char __force_user *)path);
83136 else
83137- sys_unlink(path);
83138+ sys_unlink((char __force_user *)path);
83139 }
83140 }
83141
83142@@ -315,7 +315,7 @@ static int __init do_name(void)
83143 int openflags = O_WRONLY|O_CREAT;
83144 if (ml != 1)
83145 openflags |= O_TRUNC;
83146- wfd = sys_open(collected, openflags, mode);
83147+ wfd = sys_open((char __force_user *)collected, openflags, mode);
83148
83149 if (wfd >= 0) {
83150 sys_fchown(wfd, uid, gid);
83151@@ -327,17 +327,17 @@ static int __init do_name(void)
83152 }
83153 }
83154 } else if (S_ISDIR(mode)) {
83155- sys_mkdir(collected, mode);
83156- sys_chown(collected, uid, gid);
83157- sys_chmod(collected, mode);
83158+ sys_mkdir((char __force_user *)collected, mode);
83159+ sys_chown((char __force_user *)collected, uid, gid);
83160+ sys_chmod((char __force_user *)collected, mode);
83161 dir_add(collected, mtime);
83162 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
83163 S_ISFIFO(mode) || S_ISSOCK(mode)) {
83164 if (maybe_link() == 0) {
83165- sys_mknod(collected, mode, rdev);
83166- sys_chown(collected, uid, gid);
83167- sys_chmod(collected, mode);
83168- do_utime(collected, mtime);
83169+ sys_mknod((char __force_user *)collected, mode, rdev);
83170+ sys_chown((char __force_user *)collected, uid, gid);
83171+ sys_chmod((char __force_user *)collected, mode);
83172+ do_utime((char __force_user *)collected, mtime);
83173 }
83174 }
83175 return 0;
83176@@ -346,15 +346,15 @@ static int __init do_name(void)
83177 static int __init do_copy(void)
83178 {
83179 if (count >= body_len) {
83180- sys_write(wfd, victim, body_len);
83181+ sys_write(wfd, (char __force_user *)victim, body_len);
83182 sys_close(wfd);
83183- do_utime(vcollected, mtime);
83184+ do_utime((char __force_user *)vcollected, mtime);
83185 kfree(vcollected);
83186 eat(body_len);
83187 state = SkipIt;
83188 return 0;
83189 } else {
83190- sys_write(wfd, victim, count);
83191+ sys_write(wfd, (char __force_user *)victim, count);
83192 body_len -= count;
83193 eat(count);
83194 return 1;
83195@@ -365,9 +365,9 @@ static int __init do_symlink(void)
83196 {
83197 collected[N_ALIGN(name_len) + body_len] = '\0';
83198 clean_path(collected, 0);
83199- sys_symlink(collected + N_ALIGN(name_len), collected);
83200- sys_lchown(collected, uid, gid);
83201- do_utime(collected, mtime);
83202+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
83203+ sys_lchown((char __force_user *)collected, uid, gid);
83204+ do_utime((char __force_user *)collected, mtime);
83205 state = SkipIt;
83206 next_state = Reset;
83207 return 0;
83208@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
83209 {
83210 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
83211 if (err)
83212- panic(err); /* Failed to decompress INTERNAL initramfs */
83213+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
83214 if (initrd_start) {
83215 #ifdef CONFIG_BLK_DEV_RAM
83216 int fd;
83217diff --git a/init/main.c b/init/main.c
83218index febc511..f0851763 100644
83219--- a/init/main.c
83220+++ b/init/main.c
83221@@ -103,6 +103,8 @@ static inline void mark_rodata_ro(void) { }
83222 extern void tc_init(void);
83223 #endif
83224
83225+extern void grsecurity_init(void);
83226+
83227 /*
83228 * Debug helper: via this flag we know that we are in 'early bootup code'
83229 * where only the boot processor is running with IRQ disabled. This means
83230@@ -164,6 +166,75 @@ static int __init set_reset_devices(char *str)
83231
83232 __setup("reset_devices", set_reset_devices);
83233
83234+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
83235+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
83236+static int __init setup_grsec_proc_gid(char *str)
83237+{
83238+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
83239+ return 1;
83240+}
83241+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
83242+#endif
83243+
83244+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
83245+unsigned long pax_user_shadow_base __read_only;
83246+EXPORT_SYMBOL(pax_user_shadow_base);
83247+extern char pax_enter_kernel_user[];
83248+extern char pax_exit_kernel_user[];
83249+#endif
83250+
83251+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
83252+static int __init setup_pax_nouderef(char *str)
83253+{
83254+#ifdef CONFIG_X86_32
83255+ unsigned int cpu;
83256+ struct desc_struct *gdt;
83257+
83258+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
83259+ gdt = get_cpu_gdt_table(cpu);
83260+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
83261+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
83262+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
83263+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
83264+ }
83265+ loadsegment(ds, __KERNEL_DS);
83266+ loadsegment(es, __KERNEL_DS);
83267+ loadsegment(ss, __KERNEL_DS);
83268+#else
83269+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
83270+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
83271+ clone_pgd_mask = ~(pgdval_t)0UL;
83272+ pax_user_shadow_base = 0UL;
83273+ setup_clear_cpu_cap(X86_FEATURE_PCID);
83274+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
83275+#endif
83276+
83277+ return 0;
83278+}
83279+early_param("pax_nouderef", setup_pax_nouderef);
83280+
83281+#ifdef CONFIG_X86_64
83282+static int __init setup_pax_weakuderef(char *str)
83283+{
83284+ if (clone_pgd_mask != ~(pgdval_t)0UL)
83285+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
83286+ return 1;
83287+}
83288+__setup("pax_weakuderef", setup_pax_weakuderef);
83289+#endif
83290+#endif
83291+
83292+#ifdef CONFIG_PAX_SOFTMODE
83293+int pax_softmode;
83294+
83295+static int __init setup_pax_softmode(char *str)
83296+{
83297+ get_option(&str, &pax_softmode);
83298+ return 1;
83299+}
83300+__setup("pax_softmode=", setup_pax_softmode);
83301+#endif
83302+
83303 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
83304 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
83305 static const char *panic_later, *panic_param;
83306@@ -691,25 +762,24 @@ int __init_or_module do_one_initcall(initcall_t fn)
83307 {
83308 int count = preempt_count();
83309 int ret;
83310- char msgbuf[64];
83311+ const char *msg1 = "", *msg2 = "";
83312
83313 if (initcall_debug)
83314 ret = do_one_initcall_debug(fn);
83315 else
83316 ret = fn();
83317
83318- msgbuf[0] = 0;
83319-
83320 if (preempt_count() != count) {
83321- sprintf(msgbuf, "preemption imbalance ");
83322+ msg1 = " preemption imbalance";
83323 preempt_count_set(count);
83324 }
83325 if (irqs_disabled()) {
83326- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
83327+ msg2 = " disabled interrupts";
83328 local_irq_enable();
83329 }
83330- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
83331+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
83332
83333+ add_latent_entropy();
83334 return ret;
83335 }
83336
83337@@ -816,8 +886,8 @@ static int run_init_process(const char *init_filename)
83338 {
83339 argv_init[0] = init_filename;
83340 return do_execve(init_filename,
83341- (const char __user *const __user *)argv_init,
83342- (const char __user *const __user *)envp_init);
83343+ (const char __user *const __force_user *)argv_init,
83344+ (const char __user *const __force_user *)envp_init);
83345 }
83346
83347 static int try_to_run_init_process(const char *init_filename)
83348@@ -834,6 +904,10 @@ static int try_to_run_init_process(const char *init_filename)
83349 return ret;
83350 }
83351
83352+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
83353+extern int gr_init_ran;
83354+#endif
83355+
83356 static noinline void __init kernel_init_freeable(void);
83357
83358 static int __ref kernel_init(void *unused)
83359@@ -858,6 +932,11 @@ static int __ref kernel_init(void *unused)
83360 ramdisk_execute_command, ret);
83361 }
83362
83363+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
83364+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
83365+ gr_init_ran = 1;
83366+#endif
83367+
83368 /*
83369 * We try each of these until one succeeds.
83370 *
83371@@ -913,7 +992,7 @@ static noinline void __init kernel_init_freeable(void)
83372 do_basic_setup();
83373
83374 /* Open the /dev/console on the rootfs, this should never fail */
83375- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
83376+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
83377 pr_err("Warning: unable to open an initial console.\n");
83378
83379 (void) sys_dup(0);
83380@@ -926,11 +1005,13 @@ static noinline void __init kernel_init_freeable(void)
83381 if (!ramdisk_execute_command)
83382 ramdisk_execute_command = "/init";
83383
83384- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
83385+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
83386 ramdisk_execute_command = NULL;
83387 prepare_namespace();
83388 }
83389
83390+ grsecurity_init();
83391+
83392 /*
83393 * Ok, we have completed the initial bootup, and
83394 * we're essentially up and running. Get rid of the
83395diff --git a/ipc/compat.c b/ipc/compat.c
83396index 892f658..e7c6320 100644
83397--- a/ipc/compat.c
83398+++ b/ipc/compat.c
83399@@ -399,7 +399,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
83400 COMPAT_SHMLBA);
83401 if (err < 0)
83402 return err;
83403- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
83404+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
83405 }
83406 case SHMDT:
83407 return sys_shmdt(compat_ptr(ptr));
83408diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
83409index b0e99de..09f385c 100644
83410--- a/ipc/ipc_sysctl.c
83411+++ b/ipc/ipc_sysctl.c
83412@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
83413 static int proc_ipc_dointvec(ctl_table *table, int write,
83414 void __user *buffer, size_t *lenp, loff_t *ppos)
83415 {
83416- struct ctl_table ipc_table;
83417+ ctl_table_no_const ipc_table;
83418
83419 memcpy(&ipc_table, table, sizeof(ipc_table));
83420 ipc_table.data = get_ipc(table);
83421@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
83422 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
83423 void __user *buffer, size_t *lenp, loff_t *ppos)
83424 {
83425- struct ctl_table ipc_table;
83426+ ctl_table_no_const ipc_table;
83427
83428 memcpy(&ipc_table, table, sizeof(ipc_table));
83429 ipc_table.data = get_ipc(table);
83430@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
83431 static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
83432 void __user *buffer, size_t *lenp, loff_t *ppos)
83433 {
83434- struct ctl_table ipc_table;
83435+ ctl_table_no_const ipc_table;
83436 size_t lenp_bef = *lenp;
83437 int rc;
83438
83439@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
83440 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
83441 void __user *buffer, size_t *lenp, loff_t *ppos)
83442 {
83443- struct ctl_table ipc_table;
83444+ ctl_table_no_const ipc_table;
83445 memcpy(&ipc_table, table, sizeof(ipc_table));
83446 ipc_table.data = get_ipc(table);
83447
83448@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
83449 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
83450 void __user *buffer, size_t *lenp, loff_t *ppos)
83451 {
83452- struct ctl_table ipc_table;
83453+ ctl_table_no_const ipc_table;
83454 size_t lenp_bef = *lenp;
83455 int oldval;
83456 int rc;
83457diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
83458index 383d638..943fdbb 100644
83459--- a/ipc/mq_sysctl.c
83460+++ b/ipc/mq_sysctl.c
83461@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
83462 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
83463 void __user *buffer, size_t *lenp, loff_t *ppos)
83464 {
83465- struct ctl_table mq_table;
83466+ ctl_table_no_const mq_table;
83467 memcpy(&mq_table, table, sizeof(mq_table));
83468 mq_table.data = get_mq(table);
83469
83470diff --git a/ipc/mqueue.c b/ipc/mqueue.c
83471index 95827ce..09e6d38 100644
83472--- a/ipc/mqueue.c
83473+++ b/ipc/mqueue.c
83474@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
83475 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
83476 info->attr.mq_msgsize);
83477
83478+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
83479 spin_lock(&mq_lock);
83480 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
83481 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
83482diff --git a/ipc/msg.c b/ipc/msg.c
83483index 558aa91..359e718 100644
83484--- a/ipc/msg.c
83485+++ b/ipc/msg.c
83486@@ -297,18 +297,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
83487 return security_msg_queue_associate(msq, msgflg);
83488 }
83489
83490+static struct ipc_ops msg_ops = {
83491+ .getnew = newque,
83492+ .associate = msg_security,
83493+ .more_checks = NULL
83494+};
83495+
83496 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
83497 {
83498 struct ipc_namespace *ns;
83499- struct ipc_ops msg_ops;
83500 struct ipc_params msg_params;
83501
83502 ns = current->nsproxy->ipc_ns;
83503
83504- msg_ops.getnew = newque;
83505- msg_ops.associate = msg_security;
83506- msg_ops.more_checks = NULL;
83507-
83508 msg_params.key = key;
83509 msg_params.flg = msgflg;
83510
83511diff --git a/ipc/sem.c b/ipc/sem.c
83512index db9d241..bc8427c 100644
83513--- a/ipc/sem.c
83514+++ b/ipc/sem.c
83515@@ -562,10 +562,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
83516 return 0;
83517 }
83518
83519+static struct ipc_ops sem_ops = {
83520+ .getnew = newary,
83521+ .associate = sem_security,
83522+ .more_checks = sem_more_checks
83523+};
83524+
83525 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
83526 {
83527 struct ipc_namespace *ns;
83528- struct ipc_ops sem_ops;
83529 struct ipc_params sem_params;
83530
83531 ns = current->nsproxy->ipc_ns;
83532@@ -573,10 +578,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
83533 if (nsems < 0 || nsems > ns->sc_semmsl)
83534 return -EINVAL;
83535
83536- sem_ops.getnew = newary;
83537- sem_ops.associate = sem_security;
83538- sem_ops.more_checks = sem_more_checks;
83539-
83540 sem_params.key = key;
83541 sem_params.flg = semflg;
83542 sem_params.u.nsems = nsems;
83543diff --git a/ipc/shm.c b/ipc/shm.c
83544index 7a51443..3a257d8 100644
83545--- a/ipc/shm.c
83546+++ b/ipc/shm.c
83547@@ -72,6 +72,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
83548 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
83549 #endif
83550
83551+#ifdef CONFIG_GRKERNSEC
83552+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
83553+ const time_t shm_createtime, const kuid_t cuid,
83554+ const int shmid);
83555+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
83556+ const time_t shm_createtime);
83557+#endif
83558+
83559 void shm_init_ns(struct ipc_namespace *ns)
83560 {
83561 ns->shm_ctlmax = SHMMAX;
83562@@ -554,6 +562,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
83563 shp->shm_lprid = 0;
83564 shp->shm_atim = shp->shm_dtim = 0;
83565 shp->shm_ctim = get_seconds();
83566+#ifdef CONFIG_GRKERNSEC
83567+ {
83568+ struct timespec timeval;
83569+ do_posix_clock_monotonic_gettime(&timeval);
83570+
83571+ shp->shm_createtime = timeval.tv_sec;
83572+ }
83573+#endif
83574 shp->shm_segsz = size;
83575 shp->shm_nattch = 0;
83576 shp->shm_file = file;
83577@@ -607,18 +623,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
83578 return 0;
83579 }
83580
83581+static struct ipc_ops shm_ops = {
83582+ .getnew = newseg,
83583+ .associate = shm_security,
83584+ .more_checks = shm_more_checks
83585+};
83586+
83587 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
83588 {
83589 struct ipc_namespace *ns;
83590- struct ipc_ops shm_ops;
83591 struct ipc_params shm_params;
83592
83593 ns = current->nsproxy->ipc_ns;
83594
83595- shm_ops.getnew = newseg;
83596- shm_ops.associate = shm_security;
83597- shm_ops.more_checks = shm_more_checks;
83598-
83599 shm_params.key = key;
83600 shm_params.flg = shmflg;
83601 shm_params.u.size = size;
83602@@ -1089,6 +1106,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
83603 f_mode = FMODE_READ | FMODE_WRITE;
83604 }
83605 if (shmflg & SHM_EXEC) {
83606+
83607+#ifdef CONFIG_PAX_MPROTECT
83608+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
83609+ goto out;
83610+#endif
83611+
83612 prot |= PROT_EXEC;
83613 acc_mode |= S_IXUGO;
83614 }
83615@@ -1113,6 +1136,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
83616 if (err)
83617 goto out_unlock;
83618
83619+#ifdef CONFIG_GRKERNSEC
83620+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
83621+ shp->shm_perm.cuid, shmid) ||
83622+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
83623+ err = -EACCES;
83624+ goto out_unlock;
83625+ }
83626+#endif
83627+
83628 ipc_lock_object(&shp->shm_perm);
83629
83630 /* check if shm_destroy() is tearing down shp */
83631@@ -1125,6 +1157,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
83632 path = shp->shm_file->f_path;
83633 path_get(&path);
83634 shp->shm_nattch++;
83635+#ifdef CONFIG_GRKERNSEC
83636+ shp->shm_lapid = current->pid;
83637+#endif
83638 size = i_size_read(path.dentry->d_inode);
83639 ipc_unlock_object(&shp->shm_perm);
83640 rcu_read_unlock();
83641diff --git a/ipc/util.c b/ipc/util.c
83642index 3ae17a4..d67c32f 100644
83643--- a/ipc/util.c
83644+++ b/ipc/util.c
83645@@ -71,6 +71,8 @@ struct ipc_proc_iface {
83646 int (*show)(struct seq_file *, void *);
83647 };
83648
83649+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
83650+
83651 static void ipc_memory_notifier(struct work_struct *work)
83652 {
83653 ipcns_notify(IPCNS_MEMCHANGED);
83654@@ -558,6 +560,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
83655 granted_mode >>= 6;
83656 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
83657 granted_mode >>= 3;
83658+
83659+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
83660+ return -1;
83661+
83662 /* is there some bit set in requested_mode but not in granted_mode? */
83663 if ((requested_mode & ~granted_mode & 0007) &&
83664 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
83665diff --git a/kernel/acct.c b/kernel/acct.c
83666index 8d6e145..33e0b1e 100644
83667--- a/kernel/acct.c
83668+++ b/kernel/acct.c
83669@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
83670 */
83671 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
83672 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
83673- file->f_op->write(file, (char *)&ac,
83674+ file->f_op->write(file, (char __force_user *)&ac,
83675 sizeof(acct_t), &file->f_pos);
83676 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
83677 set_fs(fs);
83678diff --git a/kernel/audit.c b/kernel/audit.c
83679index 906ae5a0..a7ad0b4 100644
83680--- a/kernel/audit.c
83681+++ b/kernel/audit.c
83682@@ -117,7 +117,7 @@ u32 audit_sig_sid = 0;
83683 3) suppressed due to audit_rate_limit
83684 4) suppressed due to audit_backlog_limit
83685 */
83686-static atomic_t audit_lost = ATOMIC_INIT(0);
83687+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
83688
83689 /* The netlink socket. */
83690 static struct sock *audit_sock;
83691@@ -250,7 +250,7 @@ void audit_log_lost(const char *message)
83692 unsigned long now;
83693 int print;
83694
83695- atomic_inc(&audit_lost);
83696+ atomic_inc_unchecked(&audit_lost);
83697
83698 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
83699
83700@@ -269,7 +269,7 @@ void audit_log_lost(const char *message)
83701 printk(KERN_WARNING
83702 "audit: audit_lost=%d audit_rate_limit=%d "
83703 "audit_backlog_limit=%d\n",
83704- atomic_read(&audit_lost),
83705+ atomic_read_unchecked(&audit_lost),
83706 audit_rate_limit,
83707 audit_backlog_limit);
83708 audit_panic(message);
83709@@ -765,7 +765,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
83710 status_set.pid = audit_pid;
83711 status_set.rate_limit = audit_rate_limit;
83712 status_set.backlog_limit = audit_backlog_limit;
83713- status_set.lost = atomic_read(&audit_lost);
83714+ status_set.lost = atomic_read_unchecked(&audit_lost);
83715 status_set.backlog = skb_queue_len(&audit_skb_queue);
83716 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
83717 &status_set, sizeof(status_set));
83718@@ -1356,7 +1356,7 @@ void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
83719 int i, avail, new_len;
83720 unsigned char *ptr;
83721 struct sk_buff *skb;
83722- static const unsigned char *hex = "0123456789ABCDEF";
83723+ static const unsigned char hex[] = "0123456789ABCDEF";
83724
83725 if (!ab)
83726 return;
83727diff --git a/kernel/auditsc.c b/kernel/auditsc.c
83728index 90594c9..abbeed7 100644
83729--- a/kernel/auditsc.c
83730+++ b/kernel/auditsc.c
83731@@ -1945,7 +1945,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
83732 }
83733
83734 /* global counter which is incremented every time something logs in */
83735-static atomic_t session_id = ATOMIC_INIT(0);
83736+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
83737
83738 static int audit_set_loginuid_perm(kuid_t loginuid)
83739 {
83740@@ -2008,7 +2008,7 @@ int audit_set_loginuid(kuid_t loginuid)
83741
83742 /* are we setting or clearing? */
83743 if (uid_valid(loginuid))
83744- sessionid = atomic_inc_return(&session_id);
83745+ sessionid = atomic_inc_return_unchecked(&session_id);
83746
83747 task->sessionid = sessionid;
83748 task->loginuid = loginuid;
83749diff --git a/kernel/capability.c b/kernel/capability.c
83750index 4e66bf9..cdccecf 100644
83751--- a/kernel/capability.c
83752+++ b/kernel/capability.c
83753@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
83754 * before modification is attempted and the application
83755 * fails.
83756 */
83757+ if (tocopy > ARRAY_SIZE(kdata))
83758+ return -EFAULT;
83759+
83760 if (copy_to_user(dataptr, kdata, tocopy
83761 * sizeof(struct __user_cap_data_struct))) {
83762 return -EFAULT;
83763@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
83764 int ret;
83765
83766 rcu_read_lock();
83767- ret = security_capable(__task_cred(t), ns, cap);
83768+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
83769+ gr_task_is_capable(t, __task_cred(t), cap);
83770 rcu_read_unlock();
83771
83772- return (ret == 0);
83773+ return ret;
83774 }
83775
83776 /**
83777@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
83778 int ret;
83779
83780 rcu_read_lock();
83781- ret = security_capable_noaudit(__task_cred(t), ns, cap);
83782+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
83783 rcu_read_unlock();
83784
83785- return (ret == 0);
83786+ return ret;
83787 }
83788
83789 /**
83790@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
83791 BUG();
83792 }
83793
83794- if (security_capable(current_cred(), ns, cap) == 0) {
83795+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
83796 current->flags |= PF_SUPERPRIV;
83797 return true;
83798 }
83799@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
83800 }
83801 EXPORT_SYMBOL(ns_capable);
83802
83803+bool ns_capable_nolog(struct user_namespace *ns, int cap)
83804+{
83805+ if (unlikely(!cap_valid(cap))) {
83806+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
83807+ BUG();
83808+ }
83809+
83810+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
83811+ current->flags |= PF_SUPERPRIV;
83812+ return true;
83813+ }
83814+ return false;
83815+}
83816+EXPORT_SYMBOL(ns_capable_nolog);
83817+
83818 /**
83819 * file_ns_capable - Determine if the file's opener had a capability in effect
83820 * @file: The file we want to check
83821@@ -432,6 +451,12 @@ bool capable(int cap)
83822 }
83823 EXPORT_SYMBOL(capable);
83824
83825+bool capable_nolog(int cap)
83826+{
83827+ return ns_capable_nolog(&init_user_ns, cap);
83828+}
83829+EXPORT_SYMBOL(capable_nolog);
83830+
83831 /**
83832 * inode_capable - Check superior capability over inode
83833 * @inode: The inode in question
83834@@ -453,3 +478,11 @@ bool inode_capable(const struct inode *inode, int cap)
83835 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
83836 }
83837 EXPORT_SYMBOL(inode_capable);
83838+
83839+bool inode_capable_nolog(const struct inode *inode, int cap)
83840+{
83841+ struct user_namespace *ns = current_user_ns();
83842+
83843+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
83844+}
83845+EXPORT_SYMBOL(inode_capable_nolog);
83846diff --git a/kernel/cgroup.c b/kernel/cgroup.c
83847index bc1dcab..f3a6b42 100644
83848--- a/kernel/cgroup.c
83849+++ b/kernel/cgroup.c
83850@@ -5607,7 +5607,7 @@ static int cgroup_css_links_read(struct cgroup_subsys_state *css,
83851 struct css_set *cset = link->cset;
83852 struct task_struct *task;
83853 int count = 0;
83854- seq_printf(seq, "css_set %p\n", cset);
83855+ seq_printf(seq, "css_set %pK\n", cset);
83856 list_for_each_entry(task, &cset->tasks, cg_list) {
83857 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
83858 seq_puts(seq, " ...\n");
83859diff --git a/kernel/compat.c b/kernel/compat.c
83860index 0a09e48..b46b3d78 100644
83861--- a/kernel/compat.c
83862+++ b/kernel/compat.c
83863@@ -13,6 +13,7 @@
83864
83865 #include <linux/linkage.h>
83866 #include <linux/compat.h>
83867+#include <linux/module.h>
83868 #include <linux/errno.h>
83869 #include <linux/time.h>
83870 #include <linux/signal.h>
83871@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
83872 mm_segment_t oldfs;
83873 long ret;
83874
83875- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
83876+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
83877 oldfs = get_fs();
83878 set_fs(KERNEL_DS);
83879 ret = hrtimer_nanosleep_restart(restart);
83880@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
83881 oldfs = get_fs();
83882 set_fs(KERNEL_DS);
83883 ret = hrtimer_nanosleep(&tu,
83884- rmtp ? (struct timespec __user *)&rmt : NULL,
83885+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
83886 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
83887 set_fs(oldfs);
83888
83889@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
83890 mm_segment_t old_fs = get_fs();
83891
83892 set_fs(KERNEL_DS);
83893- ret = sys_sigpending((old_sigset_t __user *) &s);
83894+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
83895 set_fs(old_fs);
83896 if (ret == 0)
83897 ret = put_user(s, set);
83898@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
83899 mm_segment_t old_fs = get_fs();
83900
83901 set_fs(KERNEL_DS);
83902- ret = sys_old_getrlimit(resource, &r);
83903+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
83904 set_fs(old_fs);
83905
83906 if (!ret) {
83907@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
83908 set_fs (KERNEL_DS);
83909 ret = sys_wait4(pid,
83910 (stat_addr ?
83911- (unsigned int __user *) &status : NULL),
83912- options, (struct rusage __user *) &r);
83913+ (unsigned int __force_user *) &status : NULL),
83914+ options, (struct rusage __force_user *) &r);
83915 set_fs (old_fs);
83916
83917 if (ret > 0) {
83918@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
83919 memset(&info, 0, sizeof(info));
83920
83921 set_fs(KERNEL_DS);
83922- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
83923- uru ? (struct rusage __user *)&ru : NULL);
83924+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
83925+ uru ? (struct rusage __force_user *)&ru : NULL);
83926 set_fs(old_fs);
83927
83928 if ((ret < 0) || (info.si_signo == 0))
83929@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
83930 oldfs = get_fs();
83931 set_fs(KERNEL_DS);
83932 err = sys_timer_settime(timer_id, flags,
83933- (struct itimerspec __user *) &newts,
83934- (struct itimerspec __user *) &oldts);
83935+ (struct itimerspec __force_user *) &newts,
83936+ (struct itimerspec __force_user *) &oldts);
83937 set_fs(oldfs);
83938 if (!err && old && put_compat_itimerspec(old, &oldts))
83939 return -EFAULT;
83940@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
83941 oldfs = get_fs();
83942 set_fs(KERNEL_DS);
83943 err = sys_timer_gettime(timer_id,
83944- (struct itimerspec __user *) &ts);
83945+ (struct itimerspec __force_user *) &ts);
83946 set_fs(oldfs);
83947 if (!err && put_compat_itimerspec(setting, &ts))
83948 return -EFAULT;
83949@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
83950 oldfs = get_fs();
83951 set_fs(KERNEL_DS);
83952 err = sys_clock_settime(which_clock,
83953- (struct timespec __user *) &ts);
83954+ (struct timespec __force_user *) &ts);
83955 set_fs(oldfs);
83956 return err;
83957 }
83958@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
83959 oldfs = get_fs();
83960 set_fs(KERNEL_DS);
83961 err = sys_clock_gettime(which_clock,
83962- (struct timespec __user *) &ts);
83963+ (struct timespec __force_user *) &ts);
83964 set_fs(oldfs);
83965 if (!err && put_compat_timespec(&ts, tp))
83966 return -EFAULT;
83967@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
83968
83969 oldfs = get_fs();
83970 set_fs(KERNEL_DS);
83971- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
83972+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
83973 set_fs(oldfs);
83974
83975 err = compat_put_timex(utp, &txc);
83976@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
83977 oldfs = get_fs();
83978 set_fs(KERNEL_DS);
83979 err = sys_clock_getres(which_clock,
83980- (struct timespec __user *) &ts);
83981+ (struct timespec __force_user *) &ts);
83982 set_fs(oldfs);
83983 if (!err && tp && put_compat_timespec(&ts, tp))
83984 return -EFAULT;
83985@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
83986 long err;
83987 mm_segment_t oldfs;
83988 struct timespec tu;
83989- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
83990+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
83991
83992- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
83993+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
83994 oldfs = get_fs();
83995 set_fs(KERNEL_DS);
83996 err = clock_nanosleep_restart(restart);
83997@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
83998 oldfs = get_fs();
83999 set_fs(KERNEL_DS);
84000 err = sys_clock_nanosleep(which_clock, flags,
84001- (struct timespec __user *) &in,
84002- (struct timespec __user *) &out);
84003+ (struct timespec __force_user *) &in,
84004+ (struct timespec __force_user *) &out);
84005 set_fs(oldfs);
84006
84007 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
84008@@ -1128,7 +1129,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
84009 mm_segment_t old_fs = get_fs();
84010
84011 set_fs(KERNEL_DS);
84012- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
84013+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
84014 set_fs(old_fs);
84015 if (put_compat_timespec(&t, interval))
84016 return -EFAULT;
84017diff --git a/kernel/configs.c b/kernel/configs.c
84018index c18b1f1..b9a0132 100644
84019--- a/kernel/configs.c
84020+++ b/kernel/configs.c
84021@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
84022 struct proc_dir_entry *entry;
84023
84024 /* create the current config file */
84025+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
84026+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
84027+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
84028+ &ikconfig_file_ops);
84029+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84030+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
84031+ &ikconfig_file_ops);
84032+#endif
84033+#else
84034 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
84035 &ikconfig_file_ops);
84036+#endif
84037+
84038 if (!entry)
84039 return -ENOMEM;
84040
84041diff --git a/kernel/cred.c b/kernel/cred.c
84042index e0573a4..3874e41 100644
84043--- a/kernel/cred.c
84044+++ b/kernel/cred.c
84045@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
84046 validate_creds(cred);
84047 alter_cred_subscribers(cred, -1);
84048 put_cred(cred);
84049+
84050+#ifdef CONFIG_GRKERNSEC_SETXID
84051+ cred = (struct cred *) tsk->delayed_cred;
84052+ if (cred != NULL) {
84053+ tsk->delayed_cred = NULL;
84054+ validate_creds(cred);
84055+ alter_cred_subscribers(cred, -1);
84056+ put_cred(cred);
84057+ }
84058+#endif
84059 }
84060
84061 /**
84062@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
84063 * Always returns 0 thus allowing this function to be tail-called at the end
84064 * of, say, sys_setgid().
84065 */
84066-int commit_creds(struct cred *new)
84067+static int __commit_creds(struct cred *new)
84068 {
84069 struct task_struct *task = current;
84070 const struct cred *old = task->real_cred;
84071@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
84072
84073 get_cred(new); /* we will require a ref for the subj creds too */
84074
84075+ gr_set_role_label(task, new->uid, new->gid);
84076+
84077 /* dumpability changes */
84078 if (!uid_eq(old->euid, new->euid) ||
84079 !gid_eq(old->egid, new->egid) ||
84080@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
84081 put_cred(old);
84082 return 0;
84083 }
84084+#ifdef CONFIG_GRKERNSEC_SETXID
84085+extern int set_user(struct cred *new);
84086+
84087+void gr_delayed_cred_worker(void)
84088+{
84089+ const struct cred *new = current->delayed_cred;
84090+ struct cred *ncred;
84091+
84092+ current->delayed_cred = NULL;
84093+
84094+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
84095+ // from doing get_cred on it when queueing this
84096+ put_cred(new);
84097+ return;
84098+ } else if (new == NULL)
84099+ return;
84100+
84101+ ncred = prepare_creds();
84102+ if (!ncred)
84103+ goto die;
84104+ // uids
84105+ ncred->uid = new->uid;
84106+ ncred->euid = new->euid;
84107+ ncred->suid = new->suid;
84108+ ncred->fsuid = new->fsuid;
84109+ // gids
84110+ ncred->gid = new->gid;
84111+ ncred->egid = new->egid;
84112+ ncred->sgid = new->sgid;
84113+ ncred->fsgid = new->fsgid;
84114+ // groups
84115+ if (set_groups(ncred, new->group_info) < 0) {
84116+ abort_creds(ncred);
84117+ goto die;
84118+ }
84119+ // caps
84120+ ncred->securebits = new->securebits;
84121+ ncred->cap_inheritable = new->cap_inheritable;
84122+ ncred->cap_permitted = new->cap_permitted;
84123+ ncred->cap_effective = new->cap_effective;
84124+ ncred->cap_bset = new->cap_bset;
84125+
84126+ if (set_user(ncred)) {
84127+ abort_creds(ncred);
84128+ goto die;
84129+ }
84130+
84131+ // from doing get_cred on it when queueing this
84132+ put_cred(new);
84133+
84134+ __commit_creds(ncred);
84135+ return;
84136+die:
84137+ // from doing get_cred on it when queueing this
84138+ put_cred(new);
84139+ do_group_exit(SIGKILL);
84140+}
84141+#endif
84142+
84143+int commit_creds(struct cred *new)
84144+{
84145+#ifdef CONFIG_GRKERNSEC_SETXID
84146+ int ret;
84147+ int schedule_it = 0;
84148+ struct task_struct *t;
84149+
84150+ /* we won't get called with tasklist_lock held for writing
84151+ and interrupts disabled as the cred struct in that case is
84152+ init_cred
84153+ */
84154+ if (grsec_enable_setxid && !current_is_single_threaded() &&
84155+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
84156+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
84157+ schedule_it = 1;
84158+ }
84159+ ret = __commit_creds(new);
84160+ if (schedule_it) {
84161+ rcu_read_lock();
84162+ read_lock(&tasklist_lock);
84163+ for (t = next_thread(current); t != current;
84164+ t = next_thread(t)) {
84165+ if (t->delayed_cred == NULL) {
84166+ t->delayed_cred = get_cred(new);
84167+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
84168+ set_tsk_need_resched(t);
84169+ }
84170+ }
84171+ read_unlock(&tasklist_lock);
84172+ rcu_read_unlock();
84173+ }
84174+ return ret;
84175+#else
84176+ return __commit_creds(new);
84177+#endif
84178+}
84179+
84180 EXPORT_SYMBOL(commit_creds);
84181
84182 /**
84183diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
84184index 7d2f35e..1bafcd0 100644
84185--- a/kernel/debug/debug_core.c
84186+++ b/kernel/debug/debug_core.c
84187@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
84188 */
84189 static atomic_t masters_in_kgdb;
84190 static atomic_t slaves_in_kgdb;
84191-static atomic_t kgdb_break_tasklet_var;
84192+static atomic_unchecked_t kgdb_break_tasklet_var;
84193 atomic_t kgdb_setting_breakpoint;
84194
84195 struct task_struct *kgdb_usethread;
84196@@ -133,7 +133,7 @@ int kgdb_single_step;
84197 static pid_t kgdb_sstep_pid;
84198
84199 /* to keep track of the CPU which is doing the single stepping*/
84200-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
84201+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
84202
84203 /*
84204 * If you are debugging a problem where roundup (the collection of
84205@@ -541,7 +541,7 @@ return_normal:
84206 * kernel will only try for the value of sstep_tries before
84207 * giving up and continuing on.
84208 */
84209- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
84210+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
84211 (kgdb_info[cpu].task &&
84212 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
84213 atomic_set(&kgdb_active, -1);
84214@@ -639,8 +639,8 @@ cpu_master_loop:
84215 }
84216
84217 kgdb_restore:
84218- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
84219- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
84220+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
84221+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
84222 if (kgdb_info[sstep_cpu].task)
84223 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
84224 else
84225@@ -916,18 +916,18 @@ static void kgdb_unregister_callbacks(void)
84226 static void kgdb_tasklet_bpt(unsigned long ing)
84227 {
84228 kgdb_breakpoint();
84229- atomic_set(&kgdb_break_tasklet_var, 0);
84230+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
84231 }
84232
84233 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
84234
84235 void kgdb_schedule_breakpoint(void)
84236 {
84237- if (atomic_read(&kgdb_break_tasklet_var) ||
84238+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
84239 atomic_read(&kgdb_active) != -1 ||
84240 atomic_read(&kgdb_setting_breakpoint))
84241 return;
84242- atomic_inc(&kgdb_break_tasklet_var);
84243+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
84244 tasklet_schedule(&kgdb_tasklet_breakpoint);
84245 }
84246 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
84247diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
84248index 0b097c8..11dd5c5 100644
84249--- a/kernel/debug/kdb/kdb_main.c
84250+++ b/kernel/debug/kdb/kdb_main.c
84251@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
84252 continue;
84253
84254 kdb_printf("%-20s%8u 0x%p ", mod->name,
84255- mod->core_size, (void *)mod);
84256+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
84257 #ifdef CONFIG_MODULE_UNLOAD
84258 kdb_printf("%4ld ", module_refcount(mod));
84259 #endif
84260@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
84261 kdb_printf(" (Loading)");
84262 else
84263 kdb_printf(" (Live)");
84264- kdb_printf(" 0x%p", mod->module_core);
84265+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
84266
84267 #ifdef CONFIG_MODULE_UNLOAD
84268 {
84269diff --git a/kernel/events/core.c b/kernel/events/core.c
84270index f574401..11b21f0 100644
84271--- a/kernel/events/core.c
84272+++ b/kernel/events/core.c
84273@@ -157,8 +157,15 @@ static struct srcu_struct pmus_srcu;
84274 * 0 - disallow raw tracepoint access for unpriv
84275 * 1 - disallow cpu events for unpriv
84276 * 2 - disallow kernel profiling for unpriv
84277+ * 3 - disallow all unpriv perf event use
84278 */
84279-int sysctl_perf_event_paranoid __read_mostly = 1;
84280+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
84281+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
84282+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
84283+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
84284+#else
84285+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
84286+#endif
84287
84288 /* Minimum for 512 kiB + 1 user control page */
84289 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
84290@@ -184,7 +191,7 @@ void update_perf_cpu_limits(void)
84291
84292 tmp *= sysctl_perf_cpu_time_max_percent;
84293 do_div(tmp, 100);
84294- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
84295+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
84296 }
84297
84298 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
84299@@ -271,7 +278,7 @@ void perf_sample_event_took(u64 sample_len_ns)
84300 update_perf_cpu_limits();
84301 }
84302
84303-static atomic64_t perf_event_id;
84304+static atomic64_unchecked_t perf_event_id;
84305
84306 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
84307 enum event_type_t event_type);
84308@@ -2985,7 +2992,7 @@ static void __perf_event_read(void *info)
84309
84310 static inline u64 perf_event_count(struct perf_event *event)
84311 {
84312- return local64_read(&event->count) + atomic64_read(&event->child_count);
84313+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
84314 }
84315
84316 static u64 perf_event_read(struct perf_event *event)
84317@@ -3353,9 +3360,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
84318 mutex_lock(&event->child_mutex);
84319 total += perf_event_read(event);
84320 *enabled += event->total_time_enabled +
84321- atomic64_read(&event->child_total_time_enabled);
84322+ atomic64_read_unchecked(&event->child_total_time_enabled);
84323 *running += event->total_time_running +
84324- atomic64_read(&event->child_total_time_running);
84325+ atomic64_read_unchecked(&event->child_total_time_running);
84326
84327 list_for_each_entry(child, &event->child_list, child_list) {
84328 total += perf_event_read(child);
84329@@ -3770,10 +3777,10 @@ void perf_event_update_userpage(struct perf_event *event)
84330 userpg->offset -= local64_read(&event->hw.prev_count);
84331
84332 userpg->time_enabled = enabled +
84333- atomic64_read(&event->child_total_time_enabled);
84334+ atomic64_read_unchecked(&event->child_total_time_enabled);
84335
84336 userpg->time_running = running +
84337- atomic64_read(&event->child_total_time_running);
84338+ atomic64_read_unchecked(&event->child_total_time_running);
84339
84340 arch_perf_update_userpage(userpg, now);
84341
84342@@ -4324,7 +4331,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
84343
84344 /* Data. */
84345 sp = perf_user_stack_pointer(regs);
84346- rem = __output_copy_user(handle, (void *) sp, dump_size);
84347+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
84348 dyn_size = dump_size - rem;
84349
84350 perf_output_skip(handle, rem);
84351@@ -4415,11 +4422,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
84352 values[n++] = perf_event_count(event);
84353 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
84354 values[n++] = enabled +
84355- atomic64_read(&event->child_total_time_enabled);
84356+ atomic64_read_unchecked(&event->child_total_time_enabled);
84357 }
84358 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
84359 values[n++] = running +
84360- atomic64_read(&event->child_total_time_running);
84361+ atomic64_read_unchecked(&event->child_total_time_running);
84362 }
84363 if (read_format & PERF_FORMAT_ID)
84364 values[n++] = primary_event_id(event);
84365@@ -6686,7 +6693,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
84366 event->parent = parent_event;
84367
84368 event->ns = get_pid_ns(task_active_pid_ns(current));
84369- event->id = atomic64_inc_return(&perf_event_id);
84370+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
84371
84372 event->state = PERF_EVENT_STATE_INACTIVE;
84373
84374@@ -6985,6 +6992,11 @@ SYSCALL_DEFINE5(perf_event_open,
84375 if (flags & ~PERF_FLAG_ALL)
84376 return -EINVAL;
84377
84378+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
84379+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
84380+ return -EACCES;
84381+#endif
84382+
84383 err = perf_copy_attr(attr_uptr, &attr);
84384 if (err)
84385 return err;
84386@@ -7316,10 +7328,10 @@ static void sync_child_event(struct perf_event *child_event,
84387 /*
84388 * Add back the child's count to the parent's count:
84389 */
84390- atomic64_add(child_val, &parent_event->child_count);
84391- atomic64_add(child_event->total_time_enabled,
84392+ atomic64_add_unchecked(child_val, &parent_event->child_count);
84393+ atomic64_add_unchecked(child_event->total_time_enabled,
84394 &parent_event->child_total_time_enabled);
84395- atomic64_add(child_event->total_time_running,
84396+ atomic64_add_unchecked(child_event->total_time_running,
84397 &parent_event->child_total_time_running);
84398
84399 /*
84400diff --git a/kernel/events/internal.h b/kernel/events/internal.h
84401index 569b2187..19940d9 100644
84402--- a/kernel/events/internal.h
84403+++ b/kernel/events/internal.h
84404@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
84405 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
84406 }
84407
84408-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
84409+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
84410 static inline unsigned long \
84411 func_name(struct perf_output_handle *handle, \
84412- const void *buf, unsigned long len) \
84413+ const void user *buf, unsigned long len) \
84414 { \
84415 unsigned long size, written; \
84416 \
84417@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
84418 return 0;
84419 }
84420
84421-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
84422+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
84423
84424 static inline unsigned long
84425 memcpy_skip(void *dst, const void *src, unsigned long n)
84426@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
84427 return 0;
84428 }
84429
84430-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
84431+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
84432
84433 #ifndef arch_perf_out_copy_user
84434 #define arch_perf_out_copy_user arch_perf_out_copy_user
84435@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
84436 }
84437 #endif
84438
84439-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
84440+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
84441
84442 /* Callchain handling */
84443 extern struct perf_callchain_entry *
84444diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
84445index 24b7d6c..40cf797 100644
84446--- a/kernel/events/uprobes.c
84447+++ b/kernel/events/uprobes.c
84448@@ -1640,7 +1640,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
84449 {
84450 struct page *page;
84451 uprobe_opcode_t opcode;
84452- int result;
84453+ long result;
84454
84455 pagefault_disable();
84456 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
84457diff --git a/kernel/exit.c b/kernel/exit.c
84458index a949819..a5f127d 100644
84459--- a/kernel/exit.c
84460+++ b/kernel/exit.c
84461@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
84462 struct task_struct *leader;
84463 int zap_leader;
84464 repeat:
84465+#ifdef CONFIG_NET
84466+ gr_del_task_from_ip_table(p);
84467+#endif
84468+
84469 /* don't need to get the RCU readlock here - the process is dead and
84470 * can't be modifying its own credentials. But shut RCU-lockdep up */
84471 rcu_read_lock();
84472@@ -329,7 +333,7 @@ int allow_signal(int sig)
84473 * know it'll be handled, so that they don't get converted to
84474 * SIGKILL or just silently dropped.
84475 */
84476- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
84477+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
84478 recalc_sigpending();
84479 spin_unlock_irq(&current->sighand->siglock);
84480 return 0;
84481@@ -698,6 +702,8 @@ void do_exit(long code)
84482 struct task_struct *tsk = current;
84483 int group_dead;
84484
84485+ set_fs(USER_DS);
84486+
84487 profile_task_exit(tsk);
84488
84489 WARN_ON(blk_needs_flush_plug(tsk));
84490@@ -714,7 +720,6 @@ void do_exit(long code)
84491 * mm_release()->clear_child_tid() from writing to a user-controlled
84492 * kernel address.
84493 */
84494- set_fs(USER_DS);
84495
84496 ptrace_event(PTRACE_EVENT_EXIT, code);
84497
84498@@ -773,6 +778,9 @@ void do_exit(long code)
84499 tsk->exit_code = code;
84500 taskstats_exit(tsk, group_dead);
84501
84502+ gr_acl_handle_psacct(tsk, code);
84503+ gr_acl_handle_exit();
84504+
84505 exit_mm(tsk);
84506
84507 if (group_dead)
84508@@ -894,7 +902,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
84509 * Take down every thread in the group. This is called by fatal signals
84510 * as well as by sys_exit_group (below).
84511 */
84512-void
84513+__noreturn void
84514 do_group_exit(int exit_code)
84515 {
84516 struct signal_struct *sig = current->signal;
84517diff --git a/kernel/fork.c b/kernel/fork.c
84518index dfa736c..d170f9b 100644
84519--- a/kernel/fork.c
84520+++ b/kernel/fork.c
84521@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
84522 *stackend = STACK_END_MAGIC; /* for overflow detection */
84523
84524 #ifdef CONFIG_CC_STACKPROTECTOR
84525- tsk->stack_canary = get_random_int();
84526+ tsk->stack_canary = pax_get_random_long();
84527 #endif
84528
84529 /*
84530@@ -345,12 +345,80 @@ free_tsk:
84531 }
84532
84533 #ifdef CONFIG_MMU
84534-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
84535+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
84536+{
84537+ struct vm_area_struct *tmp;
84538+ unsigned long charge;
84539+ struct file *file;
84540+ int retval;
84541+
84542+ charge = 0;
84543+ if (mpnt->vm_flags & VM_ACCOUNT) {
84544+ unsigned long len = vma_pages(mpnt);
84545+
84546+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
84547+ goto fail_nomem;
84548+ charge = len;
84549+ }
84550+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
84551+ if (!tmp)
84552+ goto fail_nomem;
84553+ *tmp = *mpnt;
84554+ tmp->vm_mm = mm;
84555+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
84556+ retval = vma_dup_policy(mpnt, tmp);
84557+ if (retval)
84558+ goto fail_nomem_policy;
84559+ if (anon_vma_fork(tmp, mpnt))
84560+ goto fail_nomem_anon_vma_fork;
84561+ tmp->vm_flags &= ~VM_LOCKED;
84562+ tmp->vm_next = tmp->vm_prev = NULL;
84563+ tmp->vm_mirror = NULL;
84564+ file = tmp->vm_file;
84565+ if (file) {
84566+ struct inode *inode = file_inode(file);
84567+ struct address_space *mapping = file->f_mapping;
84568+
84569+ get_file(file);
84570+ if (tmp->vm_flags & VM_DENYWRITE)
84571+ atomic_dec(&inode->i_writecount);
84572+ mutex_lock(&mapping->i_mmap_mutex);
84573+ if (tmp->vm_flags & VM_SHARED)
84574+ mapping->i_mmap_writable++;
84575+ flush_dcache_mmap_lock(mapping);
84576+ /* insert tmp into the share list, just after mpnt */
84577+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
84578+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
84579+ else
84580+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
84581+ flush_dcache_mmap_unlock(mapping);
84582+ mutex_unlock(&mapping->i_mmap_mutex);
84583+ }
84584+
84585+ /*
84586+ * Clear hugetlb-related page reserves for children. This only
84587+ * affects MAP_PRIVATE mappings. Faults generated by the child
84588+ * are not guaranteed to succeed, even if read-only
84589+ */
84590+ if (is_vm_hugetlb_page(tmp))
84591+ reset_vma_resv_huge_pages(tmp);
84592+
84593+ return tmp;
84594+
84595+fail_nomem_anon_vma_fork:
84596+ mpol_put(vma_policy(tmp));
84597+fail_nomem_policy:
84598+ kmem_cache_free(vm_area_cachep, tmp);
84599+fail_nomem:
84600+ vm_unacct_memory(charge);
84601+ return NULL;
84602+}
84603+
84604+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
84605 {
84606 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
84607 struct rb_node **rb_link, *rb_parent;
84608 int retval;
84609- unsigned long charge;
84610
84611 uprobe_start_dup_mmap();
84612 down_write(&oldmm->mmap_sem);
84613@@ -379,55 +447,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
84614
84615 prev = NULL;
84616 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
84617- struct file *file;
84618-
84619 if (mpnt->vm_flags & VM_DONTCOPY) {
84620 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
84621 -vma_pages(mpnt));
84622 continue;
84623 }
84624- charge = 0;
84625- if (mpnt->vm_flags & VM_ACCOUNT) {
84626- unsigned long len = vma_pages(mpnt);
84627-
84628- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
84629- goto fail_nomem;
84630- charge = len;
84631- }
84632- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
84633- if (!tmp)
84634- goto fail_nomem;
84635- *tmp = *mpnt;
84636- INIT_LIST_HEAD(&tmp->anon_vma_chain);
84637- retval = vma_dup_policy(mpnt, tmp);
84638- if (retval)
84639- goto fail_nomem_policy;
84640- tmp->vm_mm = mm;
84641- if (anon_vma_fork(tmp, mpnt))
84642- goto fail_nomem_anon_vma_fork;
84643- tmp->vm_flags &= ~VM_LOCKED;
84644- tmp->vm_next = tmp->vm_prev = NULL;
84645- file = tmp->vm_file;
84646- if (file) {
84647- struct inode *inode = file_inode(file);
84648- struct address_space *mapping = file->f_mapping;
84649-
84650- get_file(file);
84651- if (tmp->vm_flags & VM_DENYWRITE)
84652- atomic_dec(&inode->i_writecount);
84653- mutex_lock(&mapping->i_mmap_mutex);
84654- if (tmp->vm_flags & VM_SHARED)
84655- mapping->i_mmap_writable++;
84656- flush_dcache_mmap_lock(mapping);
84657- /* insert tmp into the share list, just after mpnt */
84658- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
84659- vma_nonlinear_insert(tmp,
84660- &mapping->i_mmap_nonlinear);
84661- else
84662- vma_interval_tree_insert_after(tmp, mpnt,
84663- &mapping->i_mmap);
84664- flush_dcache_mmap_unlock(mapping);
84665- mutex_unlock(&mapping->i_mmap_mutex);
84666+ tmp = dup_vma(mm, oldmm, mpnt);
84667+ if (!tmp) {
84668+ retval = -ENOMEM;
84669+ goto out;
84670 }
84671
84672 /*
84673@@ -459,6 +487,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
84674 if (retval)
84675 goto out;
84676 }
84677+
84678+#ifdef CONFIG_PAX_SEGMEXEC
84679+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
84680+ struct vm_area_struct *mpnt_m;
84681+
84682+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
84683+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
84684+
84685+ if (!mpnt->vm_mirror)
84686+ continue;
84687+
84688+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
84689+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
84690+ mpnt->vm_mirror = mpnt_m;
84691+ } else {
84692+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
84693+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
84694+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
84695+ mpnt->vm_mirror->vm_mirror = mpnt;
84696+ }
84697+ }
84698+ BUG_ON(mpnt_m);
84699+ }
84700+#endif
84701+
84702 /* a new mm has just been created */
84703 arch_dup_mmap(oldmm, mm);
84704 retval = 0;
84705@@ -468,14 +521,6 @@ out:
84706 up_write(&oldmm->mmap_sem);
84707 uprobe_end_dup_mmap();
84708 return retval;
84709-fail_nomem_anon_vma_fork:
84710- mpol_put(vma_policy(tmp));
84711-fail_nomem_policy:
84712- kmem_cache_free(vm_area_cachep, tmp);
84713-fail_nomem:
84714- retval = -ENOMEM;
84715- vm_unacct_memory(charge);
84716- goto out;
84717 }
84718
84719 static inline int mm_alloc_pgd(struct mm_struct *mm)
84720@@ -689,8 +734,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
84721 return ERR_PTR(err);
84722
84723 mm = get_task_mm(task);
84724- if (mm && mm != current->mm &&
84725- !ptrace_may_access(task, mode)) {
84726+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
84727+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
84728 mmput(mm);
84729 mm = ERR_PTR(-EACCES);
84730 }
84731@@ -909,13 +954,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
84732 spin_unlock(&fs->lock);
84733 return -EAGAIN;
84734 }
84735- fs->users++;
84736+ atomic_inc(&fs->users);
84737 spin_unlock(&fs->lock);
84738 return 0;
84739 }
84740 tsk->fs = copy_fs_struct(fs);
84741 if (!tsk->fs)
84742 return -ENOMEM;
84743+ /* Carry through gr_chroot_dentry and is_chrooted instead
84744+ of recomputing it here. Already copied when the task struct
84745+ is duplicated. This allows pivot_root to not be treated as
84746+ a chroot
84747+ */
84748+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
84749+
84750 return 0;
84751 }
84752
84753@@ -1126,7 +1178,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
84754 * parts of the process environment (as per the clone
84755 * flags). The actual kick-off is left to the caller.
84756 */
84757-static struct task_struct *copy_process(unsigned long clone_flags,
84758+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
84759 unsigned long stack_start,
84760 unsigned long stack_size,
84761 int __user *child_tidptr,
84762@@ -1198,6 +1250,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
84763 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
84764 #endif
84765 retval = -EAGAIN;
84766+
84767+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
84768+
84769 if (atomic_read(&p->real_cred->user->processes) >=
84770 task_rlimit(p, RLIMIT_NPROC)) {
84771 if (p->real_cred->user != INIT_USER &&
84772@@ -1446,6 +1501,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
84773 goto bad_fork_free_pid;
84774 }
84775
84776+ /* synchronizes with gr_set_acls()
84777+ we need to call this past the point of no return for fork()
84778+ */
84779+ gr_copy_label(p);
84780+
84781 if (likely(p->pid)) {
84782 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
84783
84784@@ -1532,6 +1592,8 @@ bad_fork_cleanup_count:
84785 bad_fork_free:
84786 free_task(p);
84787 fork_out:
84788+ gr_log_forkfail(retval);
84789+
84790 return ERR_PTR(retval);
84791 }
84792
84793@@ -1593,6 +1655,7 @@ long do_fork(unsigned long clone_flags,
84794
84795 p = copy_process(clone_flags, stack_start, stack_size,
84796 child_tidptr, NULL, trace);
84797+ add_latent_entropy();
84798 /*
84799 * Do this prior waking up the new thread - the thread pointer
84800 * might get invalid after that point, if the thread exits quickly.
84801@@ -1607,6 +1670,8 @@ long do_fork(unsigned long clone_flags,
84802 if (clone_flags & CLONE_PARENT_SETTID)
84803 put_user(nr, parent_tidptr);
84804
84805+ gr_handle_brute_check();
84806+
84807 if (clone_flags & CLONE_VFORK) {
84808 p->vfork_done = &vfork;
84809 init_completion(&vfork);
84810@@ -1723,7 +1788,7 @@ void __init proc_caches_init(void)
84811 mm_cachep = kmem_cache_create("mm_struct",
84812 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
84813 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
84814- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
84815+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
84816 mmap_init();
84817 nsproxy_cache_init();
84818 }
84819@@ -1763,7 +1828,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
84820 return 0;
84821
84822 /* don't need lock here; in the worst case we'll do useless copy */
84823- if (fs->users == 1)
84824+ if (atomic_read(&fs->users) == 1)
84825 return 0;
84826
84827 *new_fsp = copy_fs_struct(fs);
84828@@ -1870,7 +1935,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
84829 fs = current->fs;
84830 spin_lock(&fs->lock);
84831 current->fs = new_fs;
84832- if (--fs->users)
84833+ gr_set_chroot_entries(current, &current->fs->root);
84834+ if (atomic_dec_return(&fs->users))
84835 new_fs = NULL;
84836 else
84837 new_fs = fs;
84838diff --git a/kernel/futex.c b/kernel/futex.c
84839index f6ff019..ac53307 100644
84840--- a/kernel/futex.c
84841+++ b/kernel/futex.c
84842@@ -54,6 +54,7 @@
84843 #include <linux/mount.h>
84844 #include <linux/pagemap.h>
84845 #include <linux/syscalls.h>
84846+#include <linux/ptrace.h>
84847 #include <linux/signal.h>
84848 #include <linux/export.h>
84849 #include <linux/magic.h>
84850@@ -243,6 +244,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
84851 struct page *page, *page_head;
84852 int err, ro = 0;
84853
84854+#ifdef CONFIG_PAX_SEGMEXEC
84855+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
84856+ return -EFAULT;
84857+#endif
84858+
84859 /*
84860 * The futex address must be "naturally" aligned.
84861 */
84862@@ -442,7 +448,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
84863
84864 static int get_futex_value_locked(u32 *dest, u32 __user *from)
84865 {
84866- int ret;
84867+ unsigned long ret;
84868
84869 pagefault_disable();
84870 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
84871@@ -2735,6 +2741,7 @@ static int __init futex_init(void)
84872 {
84873 u32 curval;
84874 int i;
84875+ mm_segment_t oldfs;
84876
84877 /*
84878 * This will fail and we want it. Some arch implementations do
84879@@ -2746,8 +2753,11 @@ static int __init futex_init(void)
84880 * implementation, the non-functional ones will return
84881 * -ENOSYS.
84882 */
84883+ oldfs = get_fs();
84884+ set_fs(USER_DS);
84885 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
84886 futex_cmpxchg_enabled = 1;
84887+ set_fs(oldfs);
84888
84889 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
84890 plist_head_init(&futex_queues[i].chain);
84891diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
84892index f9f44fd..29885e4 100644
84893--- a/kernel/futex_compat.c
84894+++ b/kernel/futex_compat.c
84895@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
84896 return 0;
84897 }
84898
84899-static void __user *futex_uaddr(struct robust_list __user *entry,
84900+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
84901 compat_long_t futex_offset)
84902 {
84903 compat_uptr_t base = ptr_to_compat(entry);
84904diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
84905index f45b75b..bfac6d5 100644
84906--- a/kernel/gcov/base.c
84907+++ b/kernel/gcov/base.c
84908@@ -108,11 +108,6 @@ void gcov_enable_events(void)
84909 }
84910
84911 #ifdef CONFIG_MODULES
84912-static inline int within(void *addr, void *start, unsigned long size)
84913-{
84914- return ((addr >= start) && (addr < start + size));
84915-}
84916-
84917 /* Update list and generate events when modules are unloaded. */
84918 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
84919 void *data)
84920@@ -127,7 +122,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
84921
84922 /* Remove entries located in module from linked list. */
84923 while ((info = gcov_info_next(info))) {
84924- if (within(info, mod->module_core, mod->core_size)) {
84925+ if (within_module_core_rw((unsigned long)info, mod)) {
84926 gcov_info_unlink(prev, info);
84927 if (gcov_events_enabled)
84928 gcov_event(GCOV_REMOVE, info);
84929diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
84930index 383319b..56ebb13 100644
84931--- a/kernel/hrtimer.c
84932+++ b/kernel/hrtimer.c
84933@@ -1438,7 +1438,7 @@ void hrtimer_peek_ahead_timers(void)
84934 local_irq_restore(flags);
84935 }
84936
84937-static void run_hrtimer_softirq(struct softirq_action *h)
84938+static __latent_entropy void run_hrtimer_softirq(void)
84939 {
84940 hrtimer_peek_ahead_timers();
84941 }
84942diff --git a/kernel/irq_work.c b/kernel/irq_work.c
84943index 55fcce6..0e4cf34 100644
84944--- a/kernel/irq_work.c
84945+++ b/kernel/irq_work.c
84946@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
84947 return NOTIFY_OK;
84948 }
84949
84950-static struct notifier_block cpu_notify;
84951+static struct notifier_block cpu_notify = {
84952+ .notifier_call = irq_work_cpu_notify,
84953+ .priority = 0,
84954+};
84955
84956 static __init int irq_work_init_cpu_notifier(void)
84957 {
84958- cpu_notify.notifier_call = irq_work_cpu_notify;
84959- cpu_notify.priority = 0;
84960 register_cpu_notifier(&cpu_notify);
84961 return 0;
84962 }
84963diff --git a/kernel/jump_label.c b/kernel/jump_label.c
84964index 9019f15..9a3c42e 100644
84965--- a/kernel/jump_label.c
84966+++ b/kernel/jump_label.c
84967@@ -14,6 +14,7 @@
84968 #include <linux/err.h>
84969 #include <linux/static_key.h>
84970 #include <linux/jump_label_ratelimit.h>
84971+#include <linux/mm.h>
84972
84973 #ifdef HAVE_JUMP_LABEL
84974
84975@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
84976
84977 size = (((unsigned long)stop - (unsigned long)start)
84978 / sizeof(struct jump_entry));
84979+ pax_open_kernel();
84980 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
84981+ pax_close_kernel();
84982 }
84983
84984 static void jump_label_update(struct static_key *key, int enable);
84985@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
84986 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
84987 struct jump_entry *iter;
84988
84989+ pax_open_kernel();
84990 for (iter = iter_start; iter < iter_stop; iter++) {
84991 if (within_module_init(iter->code, mod))
84992 iter->code = 0;
84993 }
84994+ pax_close_kernel();
84995 }
84996
84997 static int
84998diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
84999index 3127ad5..159d880 100644
85000--- a/kernel/kallsyms.c
85001+++ b/kernel/kallsyms.c
85002@@ -11,6 +11,9 @@
85003 * Changed the compression method from stem compression to "table lookup"
85004 * compression (see scripts/kallsyms.c for a more complete description)
85005 */
85006+#ifdef CONFIG_GRKERNSEC_HIDESYM
85007+#define __INCLUDED_BY_HIDESYM 1
85008+#endif
85009 #include <linux/kallsyms.h>
85010 #include <linux/module.h>
85011 #include <linux/init.h>
85012@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
85013
85014 static inline int is_kernel_inittext(unsigned long addr)
85015 {
85016+ if (system_state != SYSTEM_BOOTING)
85017+ return 0;
85018+
85019 if (addr >= (unsigned long)_sinittext
85020 && addr <= (unsigned long)_einittext)
85021 return 1;
85022 return 0;
85023 }
85024
85025+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85026+#ifdef CONFIG_MODULES
85027+static inline int is_module_text(unsigned long addr)
85028+{
85029+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
85030+ return 1;
85031+
85032+ addr = ktla_ktva(addr);
85033+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
85034+}
85035+#else
85036+static inline int is_module_text(unsigned long addr)
85037+{
85038+ return 0;
85039+}
85040+#endif
85041+#endif
85042+
85043 static inline int is_kernel_text(unsigned long addr)
85044 {
85045 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
85046@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
85047
85048 static inline int is_kernel(unsigned long addr)
85049 {
85050+
85051+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85052+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
85053+ return 1;
85054+
85055+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
85056+#else
85057 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
85058+#endif
85059+
85060 return 1;
85061 return in_gate_area_no_mm(addr);
85062 }
85063
85064 static int is_ksym_addr(unsigned long addr)
85065 {
85066+
85067+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85068+ if (is_module_text(addr))
85069+ return 0;
85070+#endif
85071+
85072 if (all_var)
85073 return is_kernel(addr);
85074
85075@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
85076
85077 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
85078 {
85079- iter->name[0] = '\0';
85080 iter->nameoff = get_symbol_offset(new_pos);
85081 iter->pos = new_pos;
85082 }
85083@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
85084 {
85085 struct kallsym_iter *iter = m->private;
85086
85087+#ifdef CONFIG_GRKERNSEC_HIDESYM
85088+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
85089+ return 0;
85090+#endif
85091+
85092 /* Some debugging symbols have no name. Ignore them. */
85093 if (!iter->name[0])
85094 return 0;
85095@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
85096 */
85097 type = iter->exported ? toupper(iter->type) :
85098 tolower(iter->type);
85099+
85100 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
85101 type, iter->name, iter->module_name);
85102 } else
85103@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
85104 struct kallsym_iter *iter;
85105 int ret;
85106
85107- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
85108+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
85109 if (!iter)
85110 return -ENOMEM;
85111 reset_iter(iter, 0);
85112diff --git a/kernel/kcmp.c b/kernel/kcmp.c
85113index e30ac0f..3528cac 100644
85114--- a/kernel/kcmp.c
85115+++ b/kernel/kcmp.c
85116@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
85117 struct task_struct *task1, *task2;
85118 int ret;
85119
85120+#ifdef CONFIG_GRKERNSEC
85121+ return -ENOSYS;
85122+#endif
85123+
85124 rcu_read_lock();
85125
85126 /*
85127diff --git a/kernel/kexec.c b/kernel/kexec.c
85128index 9c97016..df438f8 100644
85129--- a/kernel/kexec.c
85130+++ b/kernel/kexec.c
85131@@ -1044,7 +1044,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
85132 unsigned long flags)
85133 {
85134 struct compat_kexec_segment in;
85135- struct kexec_segment out, __user *ksegments;
85136+ struct kexec_segment out;
85137+ struct kexec_segment __user *ksegments;
85138 unsigned long i, result;
85139
85140 /* Don't allow clients that don't understand the native
85141diff --git a/kernel/kmod.c b/kernel/kmod.c
85142index b086006..655e2aa 100644
85143--- a/kernel/kmod.c
85144+++ b/kernel/kmod.c
85145@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
85146 kfree(info->argv);
85147 }
85148
85149-static int call_modprobe(char *module_name, int wait)
85150+static int call_modprobe(char *module_name, char *module_param, int wait)
85151 {
85152 struct subprocess_info *info;
85153 static char *envp[] = {
85154@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
85155 NULL
85156 };
85157
85158- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
85159+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
85160 if (!argv)
85161 goto out;
85162
85163@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
85164 argv[1] = "-q";
85165 argv[2] = "--";
85166 argv[3] = module_name; /* check free_modprobe_argv() */
85167- argv[4] = NULL;
85168+ argv[4] = module_param;
85169+ argv[5] = NULL;
85170
85171 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
85172 NULL, free_modprobe_argv, NULL);
85173@@ -129,9 +130,8 @@ out:
85174 * If module auto-loading support is disabled then this function
85175 * becomes a no-operation.
85176 */
85177-int __request_module(bool wait, const char *fmt, ...)
85178+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
85179 {
85180- va_list args;
85181 char module_name[MODULE_NAME_LEN];
85182 unsigned int max_modprobes;
85183 int ret;
85184@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
85185 if (!modprobe_path[0])
85186 return 0;
85187
85188- va_start(args, fmt);
85189- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
85190- va_end(args);
85191+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
85192 if (ret >= MODULE_NAME_LEN)
85193 return -ENAMETOOLONG;
85194
85195@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
85196 if (ret)
85197 return ret;
85198
85199+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85200+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
85201+ /* hack to workaround consolekit/udisks stupidity */
85202+ read_lock(&tasklist_lock);
85203+ if (!strcmp(current->comm, "mount") &&
85204+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
85205+ read_unlock(&tasklist_lock);
85206+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
85207+ return -EPERM;
85208+ }
85209+ read_unlock(&tasklist_lock);
85210+ }
85211+#endif
85212+
85213 /* If modprobe needs a service that is in a module, we get a recursive
85214 * loop. Limit the number of running kmod threads to max_threads/2 or
85215 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
85216@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
85217
85218 trace_module_request(module_name, wait, _RET_IP_);
85219
85220- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
85221+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
85222
85223 atomic_dec(&kmod_concurrent);
85224 return ret;
85225 }
85226+
85227+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
85228+{
85229+ va_list args;
85230+ int ret;
85231+
85232+ va_start(args, fmt);
85233+ ret = ____request_module(wait, module_param, fmt, args);
85234+ va_end(args);
85235+
85236+ return ret;
85237+}
85238+
85239+int __request_module(bool wait, const char *fmt, ...)
85240+{
85241+ va_list args;
85242+ int ret;
85243+
85244+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85245+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
85246+ char module_param[MODULE_NAME_LEN];
85247+
85248+ memset(module_param, 0, sizeof(module_param));
85249+
85250+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
85251+
85252+ va_start(args, fmt);
85253+ ret = ____request_module(wait, module_param, fmt, args);
85254+ va_end(args);
85255+
85256+ return ret;
85257+ }
85258+#endif
85259+
85260+ va_start(args, fmt);
85261+ ret = ____request_module(wait, NULL, fmt, args);
85262+ va_end(args);
85263+
85264+ return ret;
85265+}
85266+
85267 EXPORT_SYMBOL(__request_module);
85268 #endif /* CONFIG_MODULES */
85269
85270@@ -218,6 +271,19 @@ static int ____call_usermodehelper(void *data)
85271 */
85272 set_user_nice(current, 0);
85273
85274+#ifdef CONFIG_GRKERNSEC
85275+ /* this is race-free as far as userland is concerned as we copied
85276+ out the path to be used prior to this point and are now operating
85277+ on that copy
85278+ */
85279+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
85280+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7)) || strstr(sub_info->path, "..")) {
85281+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
85282+ retval = -EPERM;
85283+ goto fail;
85284+ }
85285+#endif
85286+
85287 retval = -ENOMEM;
85288 new = prepare_kernel_cred(current);
85289 if (!new)
85290@@ -240,8 +306,8 @@ static int ____call_usermodehelper(void *data)
85291 commit_creds(new);
85292
85293 retval = do_execve(sub_info->path,
85294- (const char __user *const __user *)sub_info->argv,
85295- (const char __user *const __user *)sub_info->envp);
85296+ (const char __user *const __force_user *)sub_info->argv,
85297+ (const char __user *const __force_user *)sub_info->envp);
85298 if (!retval)
85299 return 0;
85300
85301@@ -260,6 +326,10 @@ static int call_helper(void *data)
85302
85303 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
85304 {
85305+#ifdef CONFIG_GRKERNSEC
85306+ kfree(info->path);
85307+ info->path = info->origpath;
85308+#endif
85309 if (info->cleanup)
85310 (*info->cleanup)(info);
85311 kfree(info);
85312@@ -303,7 +373,7 @@ static int wait_for_helper(void *data)
85313 *
85314 * Thus the __user pointer cast is valid here.
85315 */
85316- sys_wait4(pid, (int __user *)&ret, 0, NULL);
85317+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
85318
85319 /*
85320 * If ret is 0, either ____call_usermodehelper failed and the
85321@@ -542,7 +612,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
85322 goto out;
85323
85324 INIT_WORK(&sub_info->work, __call_usermodehelper);
85325+#ifdef CONFIG_GRKERNSEC
85326+ sub_info->origpath = path;
85327+ sub_info->path = kstrdup(path, gfp_mask);
85328+#else
85329 sub_info->path = path;
85330+#endif
85331 sub_info->argv = argv;
85332 sub_info->envp = envp;
85333
85334@@ -650,7 +725,7 @@ EXPORT_SYMBOL(call_usermodehelper);
85335 static int proc_cap_handler(struct ctl_table *table, int write,
85336 void __user *buffer, size_t *lenp, loff_t *ppos)
85337 {
85338- struct ctl_table t;
85339+ ctl_table_no_const t;
85340 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
85341 kernel_cap_t new_cap;
85342 int err, i;
85343diff --git a/kernel/kprobes.c b/kernel/kprobes.c
85344index ceeadfc..11c18b6 100644
85345--- a/kernel/kprobes.c
85346+++ b/kernel/kprobes.c
85347@@ -31,6 +31,9 @@
85348 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
85349 * <prasanna@in.ibm.com> added function-return probes.
85350 */
85351+#ifdef CONFIG_GRKERNSEC_HIDESYM
85352+#define __INCLUDED_BY_HIDESYM 1
85353+#endif
85354 #include <linux/kprobes.h>
85355 #include <linux/hash.h>
85356 #include <linux/init.h>
85357@@ -135,12 +138,12 @@ enum kprobe_slot_state {
85358
85359 static void *alloc_insn_page(void)
85360 {
85361- return module_alloc(PAGE_SIZE);
85362+ return module_alloc_exec(PAGE_SIZE);
85363 }
85364
85365 static void free_insn_page(void *page)
85366 {
85367- module_free(NULL, page);
85368+ module_free_exec(NULL, page);
85369 }
85370
85371 struct kprobe_insn_cache kprobe_insn_slots = {
85372@@ -2151,11 +2154,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
85373 kprobe_type = "k";
85374
85375 if (sym)
85376- seq_printf(pi, "%p %s %s+0x%x %s ",
85377+ seq_printf(pi, "%pK %s %s+0x%x %s ",
85378 p->addr, kprobe_type, sym, offset,
85379 (modname ? modname : " "));
85380 else
85381- seq_printf(pi, "%p %s %p ",
85382+ seq_printf(pi, "%pK %s %pK ",
85383 p->addr, kprobe_type, p->addr);
85384
85385 if (!pp)
85386diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
85387index 9659d38..bffd520 100644
85388--- a/kernel/ksysfs.c
85389+++ b/kernel/ksysfs.c
85390@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
85391 {
85392 if (count+1 > UEVENT_HELPER_PATH_LEN)
85393 return -ENOENT;
85394+ if (!capable(CAP_SYS_ADMIN))
85395+ return -EPERM;
85396 memcpy(uevent_helper, buf, count);
85397 uevent_helper[count] = '\0';
85398 if (count && uevent_helper[count-1] == '\n')
85399@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
85400 return count;
85401 }
85402
85403-static struct bin_attribute notes_attr = {
85404+static bin_attribute_no_const notes_attr __read_only = {
85405 .attr = {
85406 .name = "notes",
85407 .mode = S_IRUGO,
85408diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
85409index 576ba75..7c256e4 100644
85410--- a/kernel/locking/lockdep.c
85411+++ b/kernel/locking/lockdep.c
85412@@ -596,6 +596,10 @@ static int static_obj(void *obj)
85413 end = (unsigned long) &_end,
85414 addr = (unsigned long) obj;
85415
85416+#ifdef CONFIG_PAX_KERNEXEC
85417+ start = ktla_ktva(start);
85418+#endif
85419+
85420 /*
85421 * static variable?
85422 */
85423@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
85424 if (!static_obj(lock->key)) {
85425 debug_locks_off();
85426 printk("INFO: trying to register non-static key.\n");
85427+ printk("lock:%pS key:%pS.\n", lock, lock->key);
85428 printk("the code is fine but needs lockdep annotation.\n");
85429 printk("turning off the locking correctness validator.\n");
85430 dump_stack();
85431@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
85432 if (!class)
85433 return 0;
85434 }
85435- atomic_inc((atomic_t *)&class->ops);
85436+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
85437 if (very_verbose(class)) {
85438 printk("\nacquire class [%p] %s", class->key, class->name);
85439 if (class->name_version > 1)
85440diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
85441index ef43ac4..2720dfa 100644
85442--- a/kernel/locking/lockdep_proc.c
85443+++ b/kernel/locking/lockdep_proc.c
85444@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
85445 return 0;
85446 }
85447
85448- seq_printf(m, "%p", class->key);
85449+ seq_printf(m, "%pK", class->key);
85450 #ifdef CONFIG_DEBUG_LOCKDEP
85451 seq_printf(m, " OPS:%8ld", class->ops);
85452 #endif
85453@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
85454
85455 list_for_each_entry(entry, &class->locks_after, entry) {
85456 if (entry->distance == 1) {
85457- seq_printf(m, " -> [%p] ", entry->class->key);
85458+ seq_printf(m, " -> [%pK] ", entry->class->key);
85459 print_name(m, entry->class);
85460 seq_puts(m, "\n");
85461 }
85462@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
85463 if (!class->key)
85464 continue;
85465
85466- seq_printf(m, "[%p] ", class->key);
85467+ seq_printf(m, "[%pK] ", class->key);
85468 print_name(m, class);
85469 seq_puts(m, "\n");
85470 }
85471@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
85472 if (!i)
85473 seq_line(m, '-', 40-namelen, namelen);
85474
85475- snprintf(ip, sizeof(ip), "[<%p>]",
85476+ snprintf(ip, sizeof(ip), "[<%pK>]",
85477 (void *)class->contention_point[i]);
85478 seq_printf(m, "%40s %14lu %29s %pS\n",
85479 name, stats->contention_point[i],
85480@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
85481 if (!i)
85482 seq_line(m, '-', 40-namelen, namelen);
85483
85484- snprintf(ip, sizeof(ip), "[<%p>]",
85485+ snprintf(ip, sizeof(ip), "[<%pK>]",
85486 (void *)class->contending_point[i]);
85487 seq_printf(m, "%40s %14lu %29s %pS\n",
85488 name, stats->contending_point[i],
85489diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
85490index 7e3443f..b2a1e6b 100644
85491--- a/kernel/locking/mutex-debug.c
85492+++ b/kernel/locking/mutex-debug.c
85493@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
85494 }
85495
85496 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
85497- struct thread_info *ti)
85498+ struct task_struct *task)
85499 {
85500 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
85501
85502 /* Mark the current thread as blocked on the lock: */
85503- ti->task->blocked_on = waiter;
85504+ task->blocked_on = waiter;
85505 }
85506
85507 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
85508- struct thread_info *ti)
85509+ struct task_struct *task)
85510 {
85511 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
85512- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
85513- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
85514- ti->task->blocked_on = NULL;
85515+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
85516+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
85517+ task->blocked_on = NULL;
85518
85519 list_del_init(&waiter->list);
85520 waiter->task = NULL;
85521diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
85522index 0799fd3..d06ae3b 100644
85523--- a/kernel/locking/mutex-debug.h
85524+++ b/kernel/locking/mutex-debug.h
85525@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
85526 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
85527 extern void debug_mutex_add_waiter(struct mutex *lock,
85528 struct mutex_waiter *waiter,
85529- struct thread_info *ti);
85530+ struct task_struct *task);
85531 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
85532- struct thread_info *ti);
85533+ struct task_struct *task);
85534 extern void debug_mutex_unlock(struct mutex *lock);
85535 extern void debug_mutex_init(struct mutex *lock, const char *name,
85536 struct lock_class_key *key);
85537diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
85538index 4dd6e4c..df52693 100644
85539--- a/kernel/locking/mutex.c
85540+++ b/kernel/locking/mutex.c
85541@@ -135,7 +135,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
85542 node->locked = 1;
85543 return;
85544 }
85545- ACCESS_ONCE(prev->next) = node;
85546+ ACCESS_ONCE_RW(prev->next) = node;
85547 smp_wmb();
85548 /* Wait until the lock holder passes the lock down */
85549 while (!ACCESS_ONCE(node->locked))
85550@@ -156,7 +156,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
85551 while (!(next = ACCESS_ONCE(node->next)))
85552 arch_mutex_cpu_relax();
85553 }
85554- ACCESS_ONCE(next->locked) = 1;
85555+ ACCESS_ONCE_RW(next->locked) = 1;
85556 smp_wmb();
85557 }
85558
85559@@ -520,7 +520,7 @@ slowpath:
85560 goto skip_wait;
85561
85562 debug_mutex_lock_common(lock, &waiter);
85563- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
85564+ debug_mutex_add_waiter(lock, &waiter, task);
85565
85566 /* add waiting tasks to the end of the waitqueue (FIFO): */
85567 list_add_tail(&waiter.list, &lock->wait_list);
85568@@ -564,7 +564,7 @@ slowpath:
85569 schedule_preempt_disabled();
85570 spin_lock_mutex(&lock->wait_lock, flags);
85571 }
85572- mutex_remove_waiter(lock, &waiter, current_thread_info());
85573+ mutex_remove_waiter(lock, &waiter, task);
85574 /* set it to 0 if there are no waiters left: */
85575 if (likely(list_empty(&lock->wait_list)))
85576 atomic_set(&lock->count, 0);
85577@@ -601,7 +601,7 @@ skip_wait:
85578 return 0;
85579
85580 err:
85581- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
85582+ mutex_remove_waiter(lock, &waiter, task);
85583 spin_unlock_mutex(&lock->wait_lock, flags);
85584 debug_mutex_free_waiter(&waiter);
85585 mutex_release(&lock->dep_map, 1, ip);
85586diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
85587index 1d96dd0..994ff19 100644
85588--- a/kernel/locking/rtmutex-tester.c
85589+++ b/kernel/locking/rtmutex-tester.c
85590@@ -22,7 +22,7 @@
85591 #define MAX_RT_TEST_MUTEXES 8
85592
85593 static spinlock_t rttest_lock;
85594-static atomic_t rttest_event;
85595+static atomic_unchecked_t rttest_event;
85596
85597 struct test_thread_data {
85598 int opcode;
85599@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85600
85601 case RTTEST_LOCKCONT:
85602 td->mutexes[td->opdata] = 1;
85603- td->event = atomic_add_return(1, &rttest_event);
85604+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85605 return 0;
85606
85607 case RTTEST_RESET:
85608@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85609 return 0;
85610
85611 case RTTEST_RESETEVENT:
85612- atomic_set(&rttest_event, 0);
85613+ atomic_set_unchecked(&rttest_event, 0);
85614 return 0;
85615
85616 default:
85617@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85618 return ret;
85619
85620 td->mutexes[id] = 1;
85621- td->event = atomic_add_return(1, &rttest_event);
85622+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85623 rt_mutex_lock(&mutexes[id]);
85624- td->event = atomic_add_return(1, &rttest_event);
85625+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85626 td->mutexes[id] = 4;
85627 return 0;
85628
85629@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85630 return ret;
85631
85632 td->mutexes[id] = 1;
85633- td->event = atomic_add_return(1, &rttest_event);
85634+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85635 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
85636- td->event = atomic_add_return(1, &rttest_event);
85637+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85638 td->mutexes[id] = ret ? 0 : 4;
85639 return ret ? -EINTR : 0;
85640
85641@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85642 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
85643 return ret;
85644
85645- td->event = atomic_add_return(1, &rttest_event);
85646+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85647 rt_mutex_unlock(&mutexes[id]);
85648- td->event = atomic_add_return(1, &rttest_event);
85649+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85650 td->mutexes[id] = 0;
85651 return 0;
85652
85653@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
85654 break;
85655
85656 td->mutexes[dat] = 2;
85657- td->event = atomic_add_return(1, &rttest_event);
85658+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85659 break;
85660
85661 default:
85662@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
85663 return;
85664
85665 td->mutexes[dat] = 3;
85666- td->event = atomic_add_return(1, &rttest_event);
85667+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85668 break;
85669
85670 case RTTEST_LOCKNOWAIT:
85671@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
85672 return;
85673
85674 td->mutexes[dat] = 1;
85675- td->event = atomic_add_return(1, &rttest_event);
85676+ td->event = atomic_add_return_unchecked(1, &rttest_event);
85677 return;
85678
85679 default:
85680diff --git a/kernel/module.c b/kernel/module.c
85681index f5a3b1e..97ebb15 100644
85682--- a/kernel/module.c
85683+++ b/kernel/module.c
85684@@ -61,6 +61,7 @@
85685 #include <linux/pfn.h>
85686 #include <linux/bsearch.h>
85687 #include <linux/fips.h>
85688+#include <linux/grsecurity.h>
85689 #include <uapi/linux/module.h>
85690 #include "module-internal.h"
85691
85692@@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
85693
85694 /* Bounds of module allocation, for speeding __module_address.
85695 * Protected by module_mutex. */
85696-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
85697+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
85698+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
85699
85700 int register_module_notifier(struct notifier_block * nb)
85701 {
85702@@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
85703 return true;
85704
85705 list_for_each_entry_rcu(mod, &modules, list) {
85706- struct symsearch arr[] = {
85707+ struct symsearch modarr[] = {
85708 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
85709 NOT_GPL_ONLY, false },
85710 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
85711@@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
85712 if (mod->state == MODULE_STATE_UNFORMED)
85713 continue;
85714
85715- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
85716+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
85717 return true;
85718 }
85719 return false;
85720@@ -489,7 +491,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
85721 if (!pcpusec->sh_size)
85722 return 0;
85723
85724- if (align > PAGE_SIZE) {
85725+ if (align-1 >= PAGE_SIZE) {
85726 pr_warn("%s: per-cpu alignment %li > %li\n",
85727 mod->name, align, PAGE_SIZE);
85728 align = PAGE_SIZE;
85729@@ -1064,7 +1066,7 @@ struct module_attribute module_uevent =
85730 static ssize_t show_coresize(struct module_attribute *mattr,
85731 struct module_kobject *mk, char *buffer)
85732 {
85733- return sprintf(buffer, "%u\n", mk->mod->core_size);
85734+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
85735 }
85736
85737 static struct module_attribute modinfo_coresize =
85738@@ -1073,7 +1075,7 @@ static struct module_attribute modinfo_coresize =
85739 static ssize_t show_initsize(struct module_attribute *mattr,
85740 struct module_kobject *mk, char *buffer)
85741 {
85742- return sprintf(buffer, "%u\n", mk->mod->init_size);
85743+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
85744 }
85745
85746 static struct module_attribute modinfo_initsize =
85747@@ -1165,12 +1167,29 @@ static int check_version(Elf_Shdr *sechdrs,
85748 goto bad_version;
85749 }
85750
85751+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
85752+ /*
85753+ * avoid potentially printing jibberish on attempted load
85754+ * of a module randomized with a different seed
85755+ */
85756+ pr_warn("no symbol version for %s\n", symname);
85757+#else
85758 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
85759+#endif
85760 return 0;
85761
85762 bad_version:
85763+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
85764+ /*
85765+ * avoid potentially printing jibberish on attempted load
85766+ * of a module randomized with a different seed
85767+ */
85768+ printk("attempted module disagrees about version of symbol %s\n",
85769+ symname);
85770+#else
85771 printk("%s: disagrees about version of symbol %s\n",
85772 mod->name, symname);
85773+#endif
85774 return 0;
85775 }
85776
85777@@ -1286,7 +1305,7 @@ resolve_symbol_wait(struct module *mod,
85778 */
85779 #ifdef CONFIG_SYSFS
85780
85781-#ifdef CONFIG_KALLSYMS
85782+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
85783 static inline bool sect_empty(const Elf_Shdr *sect)
85784 {
85785 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
85786@@ -1426,7 +1445,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
85787 {
85788 unsigned int notes, loaded, i;
85789 struct module_notes_attrs *notes_attrs;
85790- struct bin_attribute *nattr;
85791+ bin_attribute_no_const *nattr;
85792
85793 /* failed to create section attributes, so can't create notes */
85794 if (!mod->sect_attrs)
85795@@ -1538,7 +1557,7 @@ static void del_usage_links(struct module *mod)
85796 static int module_add_modinfo_attrs(struct module *mod)
85797 {
85798 struct module_attribute *attr;
85799- struct module_attribute *temp_attr;
85800+ module_attribute_no_const *temp_attr;
85801 int error = 0;
85802 int i;
85803
85804@@ -1759,21 +1778,21 @@ static void set_section_ro_nx(void *base,
85805
85806 static void unset_module_core_ro_nx(struct module *mod)
85807 {
85808- set_page_attributes(mod->module_core + mod->core_text_size,
85809- mod->module_core + mod->core_size,
85810+ set_page_attributes(mod->module_core_rw,
85811+ mod->module_core_rw + mod->core_size_rw,
85812 set_memory_x);
85813- set_page_attributes(mod->module_core,
85814- mod->module_core + mod->core_ro_size,
85815+ set_page_attributes(mod->module_core_rx,
85816+ mod->module_core_rx + mod->core_size_rx,
85817 set_memory_rw);
85818 }
85819
85820 static void unset_module_init_ro_nx(struct module *mod)
85821 {
85822- set_page_attributes(mod->module_init + mod->init_text_size,
85823- mod->module_init + mod->init_size,
85824+ set_page_attributes(mod->module_init_rw,
85825+ mod->module_init_rw + mod->init_size_rw,
85826 set_memory_x);
85827- set_page_attributes(mod->module_init,
85828- mod->module_init + mod->init_ro_size,
85829+ set_page_attributes(mod->module_init_rx,
85830+ mod->module_init_rx + mod->init_size_rx,
85831 set_memory_rw);
85832 }
85833
85834@@ -1786,14 +1805,14 @@ void set_all_modules_text_rw(void)
85835 list_for_each_entry_rcu(mod, &modules, list) {
85836 if (mod->state == MODULE_STATE_UNFORMED)
85837 continue;
85838- if ((mod->module_core) && (mod->core_text_size)) {
85839- set_page_attributes(mod->module_core,
85840- mod->module_core + mod->core_text_size,
85841+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
85842+ set_page_attributes(mod->module_core_rx,
85843+ mod->module_core_rx + mod->core_size_rx,
85844 set_memory_rw);
85845 }
85846- if ((mod->module_init) && (mod->init_text_size)) {
85847- set_page_attributes(mod->module_init,
85848- mod->module_init + mod->init_text_size,
85849+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
85850+ set_page_attributes(mod->module_init_rx,
85851+ mod->module_init_rx + mod->init_size_rx,
85852 set_memory_rw);
85853 }
85854 }
85855@@ -1809,14 +1828,14 @@ void set_all_modules_text_ro(void)
85856 list_for_each_entry_rcu(mod, &modules, list) {
85857 if (mod->state == MODULE_STATE_UNFORMED)
85858 continue;
85859- if ((mod->module_core) && (mod->core_text_size)) {
85860- set_page_attributes(mod->module_core,
85861- mod->module_core + mod->core_text_size,
85862+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
85863+ set_page_attributes(mod->module_core_rx,
85864+ mod->module_core_rx + mod->core_size_rx,
85865 set_memory_ro);
85866 }
85867- if ((mod->module_init) && (mod->init_text_size)) {
85868- set_page_attributes(mod->module_init,
85869- mod->module_init + mod->init_text_size,
85870+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
85871+ set_page_attributes(mod->module_init_rx,
85872+ mod->module_init_rx + mod->init_size_rx,
85873 set_memory_ro);
85874 }
85875 }
85876@@ -1867,16 +1886,19 @@ static void free_module(struct module *mod)
85877
85878 /* This may be NULL, but that's OK */
85879 unset_module_init_ro_nx(mod);
85880- module_free(mod, mod->module_init);
85881+ module_free(mod, mod->module_init_rw);
85882+ module_free_exec(mod, mod->module_init_rx);
85883 kfree(mod->args);
85884 percpu_modfree(mod);
85885
85886 /* Free lock-classes: */
85887- lockdep_free_key_range(mod->module_core, mod->core_size);
85888+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
85889+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
85890
85891 /* Finally, free the core (containing the module structure) */
85892 unset_module_core_ro_nx(mod);
85893- module_free(mod, mod->module_core);
85894+ module_free_exec(mod, mod->module_core_rx);
85895+ module_free(mod, mod->module_core_rw);
85896
85897 #ifdef CONFIG_MPU
85898 update_protections(current->mm);
85899@@ -1945,9 +1967,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
85900 int ret = 0;
85901 const struct kernel_symbol *ksym;
85902
85903+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85904+ int is_fs_load = 0;
85905+ int register_filesystem_found = 0;
85906+ char *p;
85907+
85908+ p = strstr(mod->args, "grsec_modharden_fs");
85909+ if (p) {
85910+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
85911+ /* copy \0 as well */
85912+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
85913+ is_fs_load = 1;
85914+ }
85915+#endif
85916+
85917 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
85918 const char *name = info->strtab + sym[i].st_name;
85919
85920+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85921+ /* it's a real shame this will never get ripped and copied
85922+ upstream! ;(
85923+ */
85924+ if (is_fs_load && !strcmp(name, "register_filesystem"))
85925+ register_filesystem_found = 1;
85926+#endif
85927+
85928 switch (sym[i].st_shndx) {
85929 case SHN_COMMON:
85930 /* We compiled with -fno-common. These are not
85931@@ -1968,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
85932 ksym = resolve_symbol_wait(mod, info, name);
85933 /* Ok if resolved. */
85934 if (ksym && !IS_ERR(ksym)) {
85935+ pax_open_kernel();
85936 sym[i].st_value = ksym->value;
85937+ pax_close_kernel();
85938 break;
85939 }
85940
85941@@ -1987,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
85942 secbase = (unsigned long)mod_percpu(mod);
85943 else
85944 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
85945+ pax_open_kernel();
85946 sym[i].st_value += secbase;
85947+ pax_close_kernel();
85948 break;
85949 }
85950 }
85951
85952+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85953+ if (is_fs_load && !register_filesystem_found) {
85954+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
85955+ ret = -EPERM;
85956+ }
85957+#endif
85958+
85959 return ret;
85960 }
85961
85962@@ -2075,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
85963 || s->sh_entsize != ~0UL
85964 || strstarts(sname, ".init"))
85965 continue;
85966- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
85967+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
85968+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
85969+ else
85970+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
85971 pr_debug("\t%s\n", sname);
85972 }
85973- switch (m) {
85974- case 0: /* executable */
85975- mod->core_size = debug_align(mod->core_size);
85976- mod->core_text_size = mod->core_size;
85977- break;
85978- case 1: /* RO: text and ro-data */
85979- mod->core_size = debug_align(mod->core_size);
85980- mod->core_ro_size = mod->core_size;
85981- break;
85982- case 3: /* whole core */
85983- mod->core_size = debug_align(mod->core_size);
85984- break;
85985- }
85986 }
85987
85988 pr_debug("Init section allocation order:\n");
85989@@ -2104,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
85990 || s->sh_entsize != ~0UL
85991 || !strstarts(sname, ".init"))
85992 continue;
85993- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
85994- | INIT_OFFSET_MASK);
85995+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
85996+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
85997+ else
85998+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
85999+ s->sh_entsize |= INIT_OFFSET_MASK;
86000 pr_debug("\t%s\n", sname);
86001 }
86002- switch (m) {
86003- case 0: /* executable */
86004- mod->init_size = debug_align(mod->init_size);
86005- mod->init_text_size = mod->init_size;
86006- break;
86007- case 1: /* RO: text and ro-data */
86008- mod->init_size = debug_align(mod->init_size);
86009- mod->init_ro_size = mod->init_size;
86010- break;
86011- case 3: /* whole init */
86012- mod->init_size = debug_align(mod->init_size);
86013- break;
86014- }
86015 }
86016 }
86017
86018@@ -2293,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
86019
86020 /* Put symbol section at end of init part of module. */
86021 symsect->sh_flags |= SHF_ALLOC;
86022- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
86023+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
86024 info->index.sym) | INIT_OFFSET_MASK;
86025 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
86026
86027@@ -2310,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
86028 }
86029
86030 /* Append room for core symbols at end of core part. */
86031- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
86032- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
86033- mod->core_size += strtab_size;
86034+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
86035+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
86036+ mod->core_size_rx += strtab_size;
86037
86038 /* Put string table section at end of init part of module. */
86039 strsect->sh_flags |= SHF_ALLOC;
86040- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
86041+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
86042 info->index.str) | INIT_OFFSET_MASK;
86043 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
86044 }
86045@@ -2334,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
86046 /* Make sure we get permanent strtab: don't use info->strtab. */
86047 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
86048
86049+ pax_open_kernel();
86050+
86051 /* Set types up while we still have access to sections. */
86052 for (i = 0; i < mod->num_symtab; i++)
86053 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
86054
86055- mod->core_symtab = dst = mod->module_core + info->symoffs;
86056- mod->core_strtab = s = mod->module_core + info->stroffs;
86057+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
86058+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
86059 src = mod->symtab;
86060 for (ndst = i = 0; i < mod->num_symtab; i++) {
86061 if (i == 0 ||
86062@@ -2351,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
86063 }
86064 }
86065 mod->core_num_syms = ndst;
86066+
86067+ pax_close_kernel();
86068 }
86069 #else
86070 static inline void layout_symtab(struct module *mod, struct load_info *info)
86071@@ -2384,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
86072 return vmalloc_exec(size);
86073 }
86074
86075-static void *module_alloc_update_bounds(unsigned long size)
86076+static void *module_alloc_update_bounds_rw(unsigned long size)
86077 {
86078 void *ret = module_alloc(size);
86079
86080 if (ret) {
86081 mutex_lock(&module_mutex);
86082 /* Update module bounds. */
86083- if ((unsigned long)ret < module_addr_min)
86084- module_addr_min = (unsigned long)ret;
86085- if ((unsigned long)ret + size > module_addr_max)
86086- module_addr_max = (unsigned long)ret + size;
86087+ if ((unsigned long)ret < module_addr_min_rw)
86088+ module_addr_min_rw = (unsigned long)ret;
86089+ if ((unsigned long)ret + size > module_addr_max_rw)
86090+ module_addr_max_rw = (unsigned long)ret + size;
86091+ mutex_unlock(&module_mutex);
86092+ }
86093+ return ret;
86094+}
86095+
86096+static void *module_alloc_update_bounds_rx(unsigned long size)
86097+{
86098+ void *ret = module_alloc_exec(size);
86099+
86100+ if (ret) {
86101+ mutex_lock(&module_mutex);
86102+ /* Update module bounds. */
86103+ if ((unsigned long)ret < module_addr_min_rx)
86104+ module_addr_min_rx = (unsigned long)ret;
86105+ if ((unsigned long)ret + size > module_addr_max_rx)
86106+ module_addr_max_rx = (unsigned long)ret + size;
86107 mutex_unlock(&module_mutex);
86108 }
86109 return ret;
86110@@ -2651,7 +2706,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
86111 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
86112
86113 if (info->index.sym == 0) {
86114+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
86115+ /*
86116+ * avoid potentially printing jibberish on attempted load
86117+ * of a module randomized with a different seed
86118+ */
86119+ pr_warn("module has no symbols (stripped?)\n");
86120+#else
86121 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
86122+#endif
86123 return ERR_PTR(-ENOEXEC);
86124 }
86125
86126@@ -2667,8 +2730,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
86127 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
86128 {
86129 const char *modmagic = get_modinfo(info, "vermagic");
86130+ const char *license = get_modinfo(info, "license");
86131 int err;
86132
86133+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
86134+ if (!license || !license_is_gpl_compatible(license))
86135+ return -ENOEXEC;
86136+#endif
86137+
86138 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
86139 modmagic = NULL;
86140
86141@@ -2693,7 +2762,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
86142 }
86143
86144 /* Set up license info based on the info section */
86145- set_license(mod, get_modinfo(info, "license"));
86146+ set_license(mod, license);
86147
86148 return 0;
86149 }
86150@@ -2787,7 +2856,7 @@ static int move_module(struct module *mod, struct load_info *info)
86151 void *ptr;
86152
86153 /* Do the allocs. */
86154- ptr = module_alloc_update_bounds(mod->core_size);
86155+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
86156 /*
86157 * The pointer to this block is stored in the module structure
86158 * which is inside the block. Just mark it as not being a
86159@@ -2797,11 +2866,11 @@ static int move_module(struct module *mod, struct load_info *info)
86160 if (!ptr)
86161 return -ENOMEM;
86162
86163- memset(ptr, 0, mod->core_size);
86164- mod->module_core = ptr;
86165+ memset(ptr, 0, mod->core_size_rw);
86166+ mod->module_core_rw = ptr;
86167
86168- if (mod->init_size) {
86169- ptr = module_alloc_update_bounds(mod->init_size);
86170+ if (mod->init_size_rw) {
86171+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
86172 /*
86173 * The pointer to this block is stored in the module structure
86174 * which is inside the block. This block doesn't need to be
86175@@ -2810,13 +2879,45 @@ static int move_module(struct module *mod, struct load_info *info)
86176 */
86177 kmemleak_ignore(ptr);
86178 if (!ptr) {
86179- module_free(mod, mod->module_core);
86180+ module_free(mod, mod->module_core_rw);
86181 return -ENOMEM;
86182 }
86183- memset(ptr, 0, mod->init_size);
86184- mod->module_init = ptr;
86185+ memset(ptr, 0, mod->init_size_rw);
86186+ mod->module_init_rw = ptr;
86187 } else
86188- mod->module_init = NULL;
86189+ mod->module_init_rw = NULL;
86190+
86191+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
86192+ kmemleak_not_leak(ptr);
86193+ if (!ptr) {
86194+ if (mod->module_init_rw)
86195+ module_free(mod, mod->module_init_rw);
86196+ module_free(mod, mod->module_core_rw);
86197+ return -ENOMEM;
86198+ }
86199+
86200+ pax_open_kernel();
86201+ memset(ptr, 0, mod->core_size_rx);
86202+ pax_close_kernel();
86203+ mod->module_core_rx = ptr;
86204+
86205+ if (mod->init_size_rx) {
86206+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
86207+ kmemleak_ignore(ptr);
86208+ if (!ptr && mod->init_size_rx) {
86209+ module_free_exec(mod, mod->module_core_rx);
86210+ if (mod->module_init_rw)
86211+ module_free(mod, mod->module_init_rw);
86212+ module_free(mod, mod->module_core_rw);
86213+ return -ENOMEM;
86214+ }
86215+
86216+ pax_open_kernel();
86217+ memset(ptr, 0, mod->init_size_rx);
86218+ pax_close_kernel();
86219+ mod->module_init_rx = ptr;
86220+ } else
86221+ mod->module_init_rx = NULL;
86222
86223 /* Transfer each section which specifies SHF_ALLOC */
86224 pr_debug("final section addresses:\n");
86225@@ -2827,16 +2928,45 @@ static int move_module(struct module *mod, struct load_info *info)
86226 if (!(shdr->sh_flags & SHF_ALLOC))
86227 continue;
86228
86229- if (shdr->sh_entsize & INIT_OFFSET_MASK)
86230- dest = mod->module_init
86231- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
86232- else
86233- dest = mod->module_core + shdr->sh_entsize;
86234+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
86235+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
86236+ dest = mod->module_init_rw
86237+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
86238+ else
86239+ dest = mod->module_init_rx
86240+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
86241+ } else {
86242+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
86243+ dest = mod->module_core_rw + shdr->sh_entsize;
86244+ else
86245+ dest = mod->module_core_rx + shdr->sh_entsize;
86246+ }
86247+
86248+ if (shdr->sh_type != SHT_NOBITS) {
86249+
86250+#ifdef CONFIG_PAX_KERNEXEC
86251+#ifdef CONFIG_X86_64
86252+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
86253+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
86254+#endif
86255+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
86256+ pax_open_kernel();
86257+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
86258+ pax_close_kernel();
86259+ } else
86260+#endif
86261
86262- if (shdr->sh_type != SHT_NOBITS)
86263 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
86264+ }
86265 /* Update sh_addr to point to copy in image. */
86266- shdr->sh_addr = (unsigned long)dest;
86267+
86268+#ifdef CONFIG_PAX_KERNEXEC
86269+ if (shdr->sh_flags & SHF_EXECINSTR)
86270+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
86271+ else
86272+#endif
86273+
86274+ shdr->sh_addr = (unsigned long)dest;
86275 pr_debug("\t0x%lx %s\n",
86276 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
86277 }
86278@@ -2893,12 +3023,12 @@ static void flush_module_icache(const struct module *mod)
86279 * Do it before processing of module parameters, so the module
86280 * can provide parameter accessor functions of its own.
86281 */
86282- if (mod->module_init)
86283- flush_icache_range((unsigned long)mod->module_init,
86284- (unsigned long)mod->module_init
86285- + mod->init_size);
86286- flush_icache_range((unsigned long)mod->module_core,
86287- (unsigned long)mod->module_core + mod->core_size);
86288+ if (mod->module_init_rx)
86289+ flush_icache_range((unsigned long)mod->module_init_rx,
86290+ (unsigned long)mod->module_init_rx
86291+ + mod->init_size_rx);
86292+ flush_icache_range((unsigned long)mod->module_core_rx,
86293+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
86294
86295 set_fs(old_fs);
86296 }
86297@@ -2955,8 +3085,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
86298 static void module_deallocate(struct module *mod, struct load_info *info)
86299 {
86300 percpu_modfree(mod);
86301- module_free(mod, mod->module_init);
86302- module_free(mod, mod->module_core);
86303+ module_free_exec(mod, mod->module_init_rx);
86304+ module_free_exec(mod, mod->module_core_rx);
86305+ module_free(mod, mod->module_init_rw);
86306+ module_free(mod, mod->module_core_rw);
86307 }
86308
86309 int __weak module_finalize(const Elf_Ehdr *hdr,
86310@@ -2969,7 +3101,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
86311 static int post_relocation(struct module *mod, const struct load_info *info)
86312 {
86313 /* Sort exception table now relocations are done. */
86314+ pax_open_kernel();
86315 sort_extable(mod->extable, mod->extable + mod->num_exentries);
86316+ pax_close_kernel();
86317
86318 /* Copy relocated percpu area over. */
86319 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
86320@@ -3023,16 +3157,16 @@ static int do_init_module(struct module *mod)
86321 MODULE_STATE_COMING, mod);
86322
86323 /* Set RO and NX regions for core */
86324- set_section_ro_nx(mod->module_core,
86325- mod->core_text_size,
86326- mod->core_ro_size,
86327- mod->core_size);
86328+ set_section_ro_nx(mod->module_core_rx,
86329+ mod->core_size_rx,
86330+ mod->core_size_rx,
86331+ mod->core_size_rx);
86332
86333 /* Set RO and NX regions for init */
86334- set_section_ro_nx(mod->module_init,
86335- mod->init_text_size,
86336- mod->init_ro_size,
86337- mod->init_size);
86338+ set_section_ro_nx(mod->module_init_rx,
86339+ mod->init_size_rx,
86340+ mod->init_size_rx,
86341+ mod->init_size_rx);
86342
86343 do_mod_ctors(mod);
86344 /* Start the module */
86345@@ -3093,11 +3227,12 @@ static int do_init_module(struct module *mod)
86346 mod->strtab = mod->core_strtab;
86347 #endif
86348 unset_module_init_ro_nx(mod);
86349- module_free(mod, mod->module_init);
86350- mod->module_init = NULL;
86351- mod->init_size = 0;
86352- mod->init_ro_size = 0;
86353- mod->init_text_size = 0;
86354+ module_free(mod, mod->module_init_rw);
86355+ module_free_exec(mod, mod->module_init_rx);
86356+ mod->module_init_rw = NULL;
86357+ mod->module_init_rx = NULL;
86358+ mod->init_size_rw = 0;
86359+ mod->init_size_rx = 0;
86360 mutex_unlock(&module_mutex);
86361 wake_up_all(&module_wq);
86362
86363@@ -3240,9 +3375,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
86364 if (err)
86365 goto free_unload;
86366
86367+ /* Now copy in args */
86368+ mod->args = strndup_user(uargs, ~0UL >> 1);
86369+ if (IS_ERR(mod->args)) {
86370+ err = PTR_ERR(mod->args);
86371+ goto free_unload;
86372+ }
86373+
86374 /* Set up MODINFO_ATTR fields */
86375 setup_modinfo(mod, info);
86376
86377+#ifdef CONFIG_GRKERNSEC_MODHARDEN
86378+ {
86379+ char *p, *p2;
86380+
86381+ if (strstr(mod->args, "grsec_modharden_netdev")) {
86382+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
86383+ err = -EPERM;
86384+ goto free_modinfo;
86385+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
86386+ p += sizeof("grsec_modharden_normal") - 1;
86387+ p2 = strstr(p, "_");
86388+ if (p2) {
86389+ *p2 = '\0';
86390+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
86391+ *p2 = '_';
86392+ }
86393+ err = -EPERM;
86394+ goto free_modinfo;
86395+ }
86396+ }
86397+#endif
86398+
86399 /* Fix up syms, so that st_value is a pointer to location. */
86400 err = simplify_symbols(mod, info);
86401 if (err < 0)
86402@@ -3258,13 +3422,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
86403
86404 flush_module_icache(mod);
86405
86406- /* Now copy in args */
86407- mod->args = strndup_user(uargs, ~0UL >> 1);
86408- if (IS_ERR(mod->args)) {
86409- err = PTR_ERR(mod->args);
86410- goto free_arch_cleanup;
86411- }
86412-
86413 dynamic_debug_setup(info->debug, info->num_debug);
86414
86415 /* Finally it's fully formed, ready to start executing. */
86416@@ -3299,11 +3456,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
86417 ddebug_cleanup:
86418 dynamic_debug_remove(info->debug);
86419 synchronize_sched();
86420- kfree(mod->args);
86421- free_arch_cleanup:
86422 module_arch_cleanup(mod);
86423 free_modinfo:
86424 free_modinfo(mod);
86425+ kfree(mod->args);
86426 free_unload:
86427 module_unload_free(mod);
86428 unlink_mod:
86429@@ -3386,10 +3542,16 @@ static const char *get_ksymbol(struct module *mod,
86430 unsigned long nextval;
86431
86432 /* At worse, next value is at end of module */
86433- if (within_module_init(addr, mod))
86434- nextval = (unsigned long)mod->module_init+mod->init_text_size;
86435+ if (within_module_init_rx(addr, mod))
86436+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
86437+ else if (within_module_init_rw(addr, mod))
86438+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
86439+ else if (within_module_core_rx(addr, mod))
86440+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
86441+ else if (within_module_core_rw(addr, mod))
86442+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
86443 else
86444- nextval = (unsigned long)mod->module_core+mod->core_text_size;
86445+ return NULL;
86446
86447 /* Scan for closest preceding symbol, and next symbol. (ELF
86448 starts real symbols at 1). */
86449@@ -3640,7 +3802,7 @@ static int m_show(struct seq_file *m, void *p)
86450 return 0;
86451
86452 seq_printf(m, "%s %u",
86453- mod->name, mod->init_size + mod->core_size);
86454+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
86455 print_unload_info(m, mod);
86456
86457 /* Informative for users. */
86458@@ -3649,7 +3811,7 @@ static int m_show(struct seq_file *m, void *p)
86459 mod->state == MODULE_STATE_COMING ? "Loading":
86460 "Live");
86461 /* Used by oprofile and other similar tools. */
86462- seq_printf(m, " 0x%pK", mod->module_core);
86463+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
86464
86465 /* Taints info */
86466 if (mod->taints)
86467@@ -3685,7 +3847,17 @@ static const struct file_operations proc_modules_operations = {
86468
86469 static int __init proc_modules_init(void)
86470 {
86471+#ifndef CONFIG_GRKERNSEC_HIDESYM
86472+#ifdef CONFIG_GRKERNSEC_PROC_USER
86473+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
86474+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86475+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
86476+#else
86477 proc_create("modules", 0, NULL, &proc_modules_operations);
86478+#endif
86479+#else
86480+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
86481+#endif
86482 return 0;
86483 }
86484 module_init(proc_modules_init);
86485@@ -3746,14 +3918,14 @@ struct module *__module_address(unsigned long addr)
86486 {
86487 struct module *mod;
86488
86489- if (addr < module_addr_min || addr > module_addr_max)
86490+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
86491+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
86492 return NULL;
86493
86494 list_for_each_entry_rcu(mod, &modules, list) {
86495 if (mod->state == MODULE_STATE_UNFORMED)
86496 continue;
86497- if (within_module_core(addr, mod)
86498- || within_module_init(addr, mod))
86499+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
86500 return mod;
86501 }
86502 return NULL;
86503@@ -3788,11 +3960,20 @@ bool is_module_text_address(unsigned long addr)
86504 */
86505 struct module *__module_text_address(unsigned long addr)
86506 {
86507- struct module *mod = __module_address(addr);
86508+ struct module *mod;
86509+
86510+#ifdef CONFIG_X86_32
86511+ addr = ktla_ktva(addr);
86512+#endif
86513+
86514+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
86515+ return NULL;
86516+
86517+ mod = __module_address(addr);
86518+
86519 if (mod) {
86520 /* Make sure it's within the text section. */
86521- if (!within(addr, mod->module_init, mod->init_text_size)
86522- && !within(addr, mod->module_core, mod->core_text_size))
86523+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
86524 mod = NULL;
86525 }
86526 return mod;
86527diff --git a/kernel/notifier.c b/kernel/notifier.c
86528index 2d5cc4c..d9ea600 100644
86529--- a/kernel/notifier.c
86530+++ b/kernel/notifier.c
86531@@ -5,6 +5,7 @@
86532 #include <linux/rcupdate.h>
86533 #include <linux/vmalloc.h>
86534 #include <linux/reboot.h>
86535+#include <linux/mm.h>
86536
86537 /*
86538 * Notifier list for kernel code which wants to be called
86539@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
86540 while ((*nl) != NULL) {
86541 if (n->priority > (*nl)->priority)
86542 break;
86543- nl = &((*nl)->next);
86544+ nl = (struct notifier_block **)&((*nl)->next);
86545 }
86546- n->next = *nl;
86547+ pax_open_kernel();
86548+ *(const void **)&n->next = *nl;
86549 rcu_assign_pointer(*nl, n);
86550+ pax_close_kernel();
86551 return 0;
86552 }
86553
86554@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
86555 return 0;
86556 if (n->priority > (*nl)->priority)
86557 break;
86558- nl = &((*nl)->next);
86559+ nl = (struct notifier_block **)&((*nl)->next);
86560 }
86561- n->next = *nl;
86562+ pax_open_kernel();
86563+ *(const void **)&n->next = *nl;
86564 rcu_assign_pointer(*nl, n);
86565+ pax_close_kernel();
86566 return 0;
86567 }
86568
86569@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
86570 {
86571 while ((*nl) != NULL) {
86572 if ((*nl) == n) {
86573+ pax_open_kernel();
86574 rcu_assign_pointer(*nl, n->next);
86575+ pax_close_kernel();
86576 return 0;
86577 }
86578- nl = &((*nl)->next);
86579+ nl = (struct notifier_block **)&((*nl)->next);
86580 }
86581 return -ENOENT;
86582 }
86583diff --git a/kernel/padata.c b/kernel/padata.c
86584index 2abd25d..02c4faa 100644
86585--- a/kernel/padata.c
86586+++ b/kernel/padata.c
86587@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
86588 * seq_nr mod. number of cpus in use.
86589 */
86590
86591- seq_nr = atomic_inc_return(&pd->seq_nr);
86592+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
86593 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
86594
86595 return padata_index_to_cpu(pd, cpu_index);
86596@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
86597 padata_init_pqueues(pd);
86598 padata_init_squeues(pd);
86599 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
86600- atomic_set(&pd->seq_nr, -1);
86601+ atomic_set_unchecked(&pd->seq_nr, -1);
86602 atomic_set(&pd->reorder_objects, 0);
86603 atomic_set(&pd->refcnt, 0);
86604 pd->pinst = pinst;
86605diff --git a/kernel/panic.c b/kernel/panic.c
86606index c00b4ce..a846117 100644
86607--- a/kernel/panic.c
86608+++ b/kernel/panic.c
86609@@ -407,7 +407,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
86610 disable_trace_on_warning();
86611
86612 pr_warn("------------[ cut here ]------------\n");
86613- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
86614+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
86615 raw_smp_processor_id(), current->pid, file, line, caller);
86616
86617 if (args)
86618@@ -461,7 +461,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
86619 */
86620 void __stack_chk_fail(void)
86621 {
86622- panic("stack-protector: Kernel stack is corrupted in: %p\n",
86623+ dump_stack();
86624+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
86625 __builtin_return_address(0));
86626 }
86627 EXPORT_SYMBOL(__stack_chk_fail);
86628diff --git a/kernel/pid.c b/kernel/pid.c
86629index 9b9a266..c20ef80 100644
86630--- a/kernel/pid.c
86631+++ b/kernel/pid.c
86632@@ -33,6 +33,7 @@
86633 #include <linux/rculist.h>
86634 #include <linux/bootmem.h>
86635 #include <linux/hash.h>
86636+#include <linux/security.h>
86637 #include <linux/pid_namespace.h>
86638 #include <linux/init_task.h>
86639 #include <linux/syscalls.h>
86640@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
86641
86642 int pid_max = PID_MAX_DEFAULT;
86643
86644-#define RESERVED_PIDS 300
86645+#define RESERVED_PIDS 500
86646
86647 int pid_max_min = RESERVED_PIDS + 1;
86648 int pid_max_max = PID_MAX_LIMIT;
86649@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
86650 */
86651 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
86652 {
86653+ struct task_struct *task;
86654+
86655 rcu_lockdep_assert(rcu_read_lock_held(),
86656 "find_task_by_pid_ns() needs rcu_read_lock()"
86657 " protection");
86658- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
86659+
86660+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
86661+
86662+ if (gr_pid_is_chrooted(task))
86663+ return NULL;
86664+
86665+ return task;
86666 }
86667
86668 struct task_struct *find_task_by_vpid(pid_t vnr)
86669@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
86670 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
86671 }
86672
86673+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
86674+{
86675+ rcu_lockdep_assert(rcu_read_lock_held(),
86676+ "find_task_by_pid_ns() needs rcu_read_lock()"
86677+ " protection");
86678+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
86679+}
86680+
86681 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
86682 {
86683 struct pid *pid;
86684diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
86685index 06c62de..b08cc6c 100644
86686--- a/kernel/pid_namespace.c
86687+++ b/kernel/pid_namespace.c
86688@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
86689 void __user *buffer, size_t *lenp, loff_t *ppos)
86690 {
86691 struct pid_namespace *pid_ns = task_active_pid_ns(current);
86692- struct ctl_table tmp = *table;
86693+ ctl_table_no_const tmp = *table;
86694
86695 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
86696 return -EPERM;
86697diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
86698index c7f31aa..2b44977 100644
86699--- a/kernel/posix-cpu-timers.c
86700+++ b/kernel/posix-cpu-timers.c
86701@@ -1521,14 +1521,14 @@ struct k_clock clock_posix_cpu = {
86702
86703 static __init int init_posix_cpu_timers(void)
86704 {
86705- struct k_clock process = {
86706+ static struct k_clock process = {
86707 .clock_getres = process_cpu_clock_getres,
86708 .clock_get = process_cpu_clock_get,
86709 .timer_create = process_cpu_timer_create,
86710 .nsleep = process_cpu_nsleep,
86711 .nsleep_restart = process_cpu_nsleep_restart,
86712 };
86713- struct k_clock thread = {
86714+ static struct k_clock thread = {
86715 .clock_getres = thread_cpu_clock_getres,
86716 .clock_get = thread_cpu_clock_get,
86717 .timer_create = thread_cpu_timer_create,
86718diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
86719index 424c2d4..679242f 100644
86720--- a/kernel/posix-timers.c
86721+++ b/kernel/posix-timers.c
86722@@ -43,6 +43,7 @@
86723 #include <linux/hash.h>
86724 #include <linux/posix-clock.h>
86725 #include <linux/posix-timers.h>
86726+#include <linux/grsecurity.h>
86727 #include <linux/syscalls.h>
86728 #include <linux/wait.h>
86729 #include <linux/workqueue.h>
86730@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
86731 * which we beg off on and pass to do_sys_settimeofday().
86732 */
86733
86734-static struct k_clock posix_clocks[MAX_CLOCKS];
86735+static struct k_clock *posix_clocks[MAX_CLOCKS];
86736
86737 /*
86738 * These ones are defined below.
86739@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
86740 */
86741 static __init int init_posix_timers(void)
86742 {
86743- struct k_clock clock_realtime = {
86744+ static struct k_clock clock_realtime = {
86745 .clock_getres = hrtimer_get_res,
86746 .clock_get = posix_clock_realtime_get,
86747 .clock_set = posix_clock_realtime_set,
86748@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
86749 .timer_get = common_timer_get,
86750 .timer_del = common_timer_del,
86751 };
86752- struct k_clock clock_monotonic = {
86753+ static struct k_clock clock_monotonic = {
86754 .clock_getres = hrtimer_get_res,
86755 .clock_get = posix_ktime_get_ts,
86756 .nsleep = common_nsleep,
86757@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
86758 .timer_get = common_timer_get,
86759 .timer_del = common_timer_del,
86760 };
86761- struct k_clock clock_monotonic_raw = {
86762+ static struct k_clock clock_monotonic_raw = {
86763 .clock_getres = hrtimer_get_res,
86764 .clock_get = posix_get_monotonic_raw,
86765 };
86766- struct k_clock clock_realtime_coarse = {
86767+ static struct k_clock clock_realtime_coarse = {
86768 .clock_getres = posix_get_coarse_res,
86769 .clock_get = posix_get_realtime_coarse,
86770 };
86771- struct k_clock clock_monotonic_coarse = {
86772+ static struct k_clock clock_monotonic_coarse = {
86773 .clock_getres = posix_get_coarse_res,
86774 .clock_get = posix_get_monotonic_coarse,
86775 };
86776- struct k_clock clock_tai = {
86777+ static struct k_clock clock_tai = {
86778 .clock_getres = hrtimer_get_res,
86779 .clock_get = posix_get_tai,
86780 .nsleep = common_nsleep,
86781@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
86782 .timer_get = common_timer_get,
86783 .timer_del = common_timer_del,
86784 };
86785- struct k_clock clock_boottime = {
86786+ static struct k_clock clock_boottime = {
86787 .clock_getres = hrtimer_get_res,
86788 .clock_get = posix_get_boottime,
86789 .nsleep = common_nsleep,
86790@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
86791 return;
86792 }
86793
86794- posix_clocks[clock_id] = *new_clock;
86795+ posix_clocks[clock_id] = new_clock;
86796 }
86797 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
86798
86799@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
86800 return (id & CLOCKFD_MASK) == CLOCKFD ?
86801 &clock_posix_dynamic : &clock_posix_cpu;
86802
86803- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
86804+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
86805 return NULL;
86806- return &posix_clocks[id];
86807+ return posix_clocks[id];
86808 }
86809
86810 static int common_timer_create(struct k_itimer *new_timer)
86811@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
86812 struct k_clock *kc = clockid_to_kclock(which_clock);
86813 struct k_itimer *new_timer;
86814 int error, new_timer_id;
86815- sigevent_t event;
86816+ sigevent_t event = { };
86817 int it_id_set = IT_ID_NOT_SET;
86818
86819 if (!kc)
86820@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
86821 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
86822 return -EFAULT;
86823
86824+ /* only the CLOCK_REALTIME clock can be set, all other clocks
86825+ have their clock_set fptr set to a nosettime dummy function
86826+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
86827+ call common_clock_set, which calls do_sys_settimeofday, which
86828+ we hook
86829+ */
86830+
86831 return kc->clock_set(which_clock, &new_tp);
86832 }
86833
86834diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
86835index 2fac9cc..56fef29 100644
86836--- a/kernel/power/Kconfig
86837+++ b/kernel/power/Kconfig
86838@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
86839 config HIBERNATION
86840 bool "Hibernation (aka 'suspend to disk')"
86841 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
86842+ depends on !GRKERNSEC_KMEM
86843+ depends on !PAX_MEMORY_SANITIZE
86844 select HIBERNATE_CALLBACKS
86845 select LZO_COMPRESS
86846 select LZO_DECOMPRESS
86847diff --git a/kernel/power/process.c b/kernel/power/process.c
86848index 06ec886..9dba35e 100644
86849--- a/kernel/power/process.c
86850+++ b/kernel/power/process.c
86851@@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user_only)
86852 unsigned int elapsed_msecs;
86853 bool wakeup = false;
86854 int sleep_usecs = USEC_PER_MSEC;
86855+ bool timedout = false;
86856
86857 do_gettimeofday(&start);
86858
86859@@ -44,13 +45,20 @@ static int try_to_freeze_tasks(bool user_only)
86860
86861 while (true) {
86862 todo = 0;
86863+ if (time_after(jiffies, end_time))
86864+ timedout = true;
86865 read_lock(&tasklist_lock);
86866 do_each_thread(g, p) {
86867 if (p == current || !freeze_task(p))
86868 continue;
86869
86870- if (!freezer_should_skip(p))
86871+ if (!freezer_should_skip(p)) {
86872 todo++;
86873+ if (timedout) {
86874+ printk(KERN_ERR "Task refusing to freeze:\n");
86875+ sched_show_task(p);
86876+ }
86877+ }
86878 } while_each_thread(g, p);
86879 read_unlock(&tasklist_lock);
86880
86881@@ -59,7 +67,7 @@ static int try_to_freeze_tasks(bool user_only)
86882 todo += wq_busy;
86883 }
86884
86885- if (!todo || time_after(jiffies, end_time))
86886+ if (!todo || timedout)
86887 break;
86888
86889 if (pm_wakeup_pending()) {
86890diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
86891index be7c86b..c741464 100644
86892--- a/kernel/printk/printk.c
86893+++ b/kernel/printk/printk.c
86894@@ -385,6 +385,11 @@ static int check_syslog_permissions(int type, bool from_file)
86895 if (from_file && type != SYSLOG_ACTION_OPEN)
86896 return 0;
86897
86898+#ifdef CONFIG_GRKERNSEC_DMESG
86899+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
86900+ return -EPERM;
86901+#endif
86902+
86903 if (syslog_action_restricted(type)) {
86904 if (capable(CAP_SYSLOG))
86905 return 0;
86906diff --git a/kernel/profile.c b/kernel/profile.c
86907index 6631e1e..310c266 100644
86908--- a/kernel/profile.c
86909+++ b/kernel/profile.c
86910@@ -37,7 +37,7 @@ struct profile_hit {
86911 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
86912 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
86913
86914-static atomic_t *prof_buffer;
86915+static atomic_unchecked_t *prof_buffer;
86916 static unsigned long prof_len, prof_shift;
86917
86918 int prof_on __read_mostly;
86919@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
86920 hits[i].pc = 0;
86921 continue;
86922 }
86923- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
86924+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
86925 hits[i].hits = hits[i].pc = 0;
86926 }
86927 }
86928@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
86929 * Add the current hit(s) and flush the write-queue out
86930 * to the global buffer:
86931 */
86932- atomic_add(nr_hits, &prof_buffer[pc]);
86933+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
86934 for (i = 0; i < NR_PROFILE_HIT; ++i) {
86935- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
86936+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
86937 hits[i].pc = hits[i].hits = 0;
86938 }
86939 out:
86940@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
86941 {
86942 unsigned long pc;
86943 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
86944- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
86945+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
86946 }
86947 #endif /* !CONFIG_SMP */
86948
86949@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
86950 return -EFAULT;
86951 buf++; p++; count--; read++;
86952 }
86953- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
86954+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
86955 if (copy_to_user(buf, (void *)pnt, count))
86956 return -EFAULT;
86957 read += count;
86958@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
86959 }
86960 #endif
86961 profile_discard_flip_buffers();
86962- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
86963+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
86964 return count;
86965 }
86966
86967diff --git a/kernel/ptrace.c b/kernel/ptrace.c
86968index 1f4bcb3..99cf7ab 100644
86969--- a/kernel/ptrace.c
86970+++ b/kernel/ptrace.c
86971@@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request,
86972 if (seize)
86973 flags |= PT_SEIZED;
86974 rcu_read_lock();
86975- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
86976+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
86977 flags |= PT_PTRACE_CAP;
86978 rcu_read_unlock();
86979 task->ptrace = flags;
86980@@ -538,7 +538,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
86981 break;
86982 return -EIO;
86983 }
86984- if (copy_to_user(dst, buf, retval))
86985+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
86986 return -EFAULT;
86987 copied += retval;
86988 src += retval;
86989@@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request,
86990 bool seized = child->ptrace & PT_SEIZED;
86991 int ret = -EIO;
86992 siginfo_t siginfo, *si;
86993- void __user *datavp = (void __user *) data;
86994+ void __user *datavp = (__force void __user *) data;
86995 unsigned long __user *datalp = datavp;
86996 unsigned long flags;
86997
86998@@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
86999 goto out;
87000 }
87001
87002+ if (gr_handle_ptrace(child, request)) {
87003+ ret = -EPERM;
87004+ goto out_put_task_struct;
87005+ }
87006+
87007 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
87008 ret = ptrace_attach(child, request, addr, data);
87009 /*
87010 * Some architectures need to do book-keeping after
87011 * a ptrace attach.
87012 */
87013- if (!ret)
87014+ if (!ret) {
87015 arch_ptrace_attach(child);
87016+ gr_audit_ptrace(child);
87017+ }
87018 goto out_put_task_struct;
87019 }
87020
87021@@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
87022 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
87023 if (copied != sizeof(tmp))
87024 return -EIO;
87025- return put_user(tmp, (unsigned long __user *)data);
87026+ return put_user(tmp, (__force unsigned long __user *)data);
87027 }
87028
87029 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
87030@@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
87031 }
87032
87033 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
87034- compat_long_t addr, compat_long_t data)
87035+ compat_ulong_t addr, compat_ulong_t data)
87036 {
87037 struct task_struct *child;
87038 long ret;
87039@@ -1197,14 +1204,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
87040 goto out;
87041 }
87042
87043+ if (gr_handle_ptrace(child, request)) {
87044+ ret = -EPERM;
87045+ goto out_put_task_struct;
87046+ }
87047+
87048 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
87049 ret = ptrace_attach(child, request, addr, data);
87050 /*
87051 * Some architectures need to do book-keeping after
87052 * a ptrace attach.
87053 */
87054- if (!ret)
87055+ if (!ret) {
87056 arch_ptrace_attach(child);
87057+ gr_audit_ptrace(child);
87058+ }
87059 goto out_put_task_struct;
87060 }
87061
87062diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
87063index 01d5ccb..cdcbee6 100644
87064--- a/kernel/rcu/srcu.c
87065+++ b/kernel/rcu/srcu.c
87066@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
87067
87068 idx = ACCESS_ONCE(sp->completed) & 0x1;
87069 preempt_disable();
87070- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
87071+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
87072 smp_mb(); /* B */ /* Avoid leaking the critical section. */
87073- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
87074+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
87075 preempt_enable();
87076 return idx;
87077 }
87078diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
87079index 1254f31..16258dc 100644
87080--- a/kernel/rcu/tiny.c
87081+++ b/kernel/rcu/tiny.c
87082@@ -46,7 +46,7 @@
87083 /* Forward declarations for tiny_plugin.h. */
87084 struct rcu_ctrlblk;
87085 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
87086-static void rcu_process_callbacks(struct softirq_action *unused);
87087+static void rcu_process_callbacks(void);
87088 static void __call_rcu(struct rcu_head *head,
87089 void (*func)(struct rcu_head *rcu),
87090 struct rcu_ctrlblk *rcp);
87091@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
87092 false));
87093 }
87094
87095-static void rcu_process_callbacks(struct softirq_action *unused)
87096+static __latent_entropy void rcu_process_callbacks(void)
87097 {
87098 __rcu_process_callbacks(&rcu_sched_ctrlblk);
87099 __rcu_process_callbacks(&rcu_bh_ctrlblk);
87100diff --git a/kernel/rcu/torture.c b/kernel/rcu/torture.c
87101index 3929cd4..421624d 100644
87102--- a/kernel/rcu/torture.c
87103+++ b/kernel/rcu/torture.c
87104@@ -176,12 +176,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
87105 { 0 };
87106 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
87107 { 0 };
87108-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
87109-static atomic_t n_rcu_torture_alloc;
87110-static atomic_t n_rcu_torture_alloc_fail;
87111-static atomic_t n_rcu_torture_free;
87112-static atomic_t n_rcu_torture_mberror;
87113-static atomic_t n_rcu_torture_error;
87114+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
87115+static atomic_unchecked_t n_rcu_torture_alloc;
87116+static atomic_unchecked_t n_rcu_torture_alloc_fail;
87117+static atomic_unchecked_t n_rcu_torture_free;
87118+static atomic_unchecked_t n_rcu_torture_mberror;
87119+static atomic_unchecked_t n_rcu_torture_error;
87120 static long n_rcu_torture_barrier_error;
87121 static long n_rcu_torture_boost_ktrerror;
87122 static long n_rcu_torture_boost_rterror;
87123@@ -299,11 +299,11 @@ rcu_torture_alloc(void)
87124
87125 spin_lock_bh(&rcu_torture_lock);
87126 if (list_empty(&rcu_torture_freelist)) {
87127- atomic_inc(&n_rcu_torture_alloc_fail);
87128+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
87129 spin_unlock_bh(&rcu_torture_lock);
87130 return NULL;
87131 }
87132- atomic_inc(&n_rcu_torture_alloc);
87133+ atomic_inc_unchecked(&n_rcu_torture_alloc);
87134 p = rcu_torture_freelist.next;
87135 list_del_init(p);
87136 spin_unlock_bh(&rcu_torture_lock);
87137@@ -316,7 +316,7 @@ rcu_torture_alloc(void)
87138 static void
87139 rcu_torture_free(struct rcu_torture *p)
87140 {
87141- atomic_inc(&n_rcu_torture_free);
87142+ atomic_inc_unchecked(&n_rcu_torture_free);
87143 spin_lock_bh(&rcu_torture_lock);
87144 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
87145 spin_unlock_bh(&rcu_torture_lock);
87146@@ -437,7 +437,7 @@ rcu_torture_cb(struct rcu_head *p)
87147 i = rp->rtort_pipe_count;
87148 if (i > RCU_TORTURE_PIPE_LEN)
87149 i = RCU_TORTURE_PIPE_LEN;
87150- atomic_inc(&rcu_torture_wcount[i]);
87151+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
87152 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
87153 rp->rtort_mbtest = 0;
87154 rcu_torture_free(rp);
87155@@ -827,7 +827,7 @@ rcu_torture_writer(void *arg)
87156 i = old_rp->rtort_pipe_count;
87157 if (i > RCU_TORTURE_PIPE_LEN)
87158 i = RCU_TORTURE_PIPE_LEN;
87159- atomic_inc(&rcu_torture_wcount[i]);
87160+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
87161 old_rp->rtort_pipe_count++;
87162 if (gp_normal == gp_exp)
87163 exp = !!(rcu_random(&rand) & 0x80);
87164@@ -845,7 +845,7 @@ rcu_torture_writer(void *arg)
87165 i = rp->rtort_pipe_count;
87166 if (i > RCU_TORTURE_PIPE_LEN)
87167 i = RCU_TORTURE_PIPE_LEN;
87168- atomic_inc(&rcu_torture_wcount[i]);
87169+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
87170 if (++rp->rtort_pipe_count >=
87171 RCU_TORTURE_PIPE_LEN) {
87172 rp->rtort_mbtest = 0;
87173@@ -944,7 +944,7 @@ static void rcu_torture_timer(unsigned long unused)
87174 return;
87175 }
87176 if (p->rtort_mbtest == 0)
87177- atomic_inc(&n_rcu_torture_mberror);
87178+ atomic_inc_unchecked(&n_rcu_torture_mberror);
87179 spin_lock(&rand_lock);
87180 cur_ops->read_delay(&rand);
87181 n_rcu_torture_timers++;
87182@@ -1014,7 +1014,7 @@ rcu_torture_reader(void *arg)
87183 continue;
87184 }
87185 if (p->rtort_mbtest == 0)
87186- atomic_inc(&n_rcu_torture_mberror);
87187+ atomic_inc_unchecked(&n_rcu_torture_mberror);
87188 cur_ops->read_delay(&rand);
87189 preempt_disable();
87190 pipe_count = p->rtort_pipe_count;
87191@@ -1077,11 +1077,11 @@ rcu_torture_printk(char *page)
87192 rcu_torture_current,
87193 rcu_torture_current_version,
87194 list_empty(&rcu_torture_freelist),
87195- atomic_read(&n_rcu_torture_alloc),
87196- atomic_read(&n_rcu_torture_alloc_fail),
87197- atomic_read(&n_rcu_torture_free));
87198+ atomic_read_unchecked(&n_rcu_torture_alloc),
87199+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
87200+ atomic_read_unchecked(&n_rcu_torture_free));
87201 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
87202- atomic_read(&n_rcu_torture_mberror),
87203+ atomic_read_unchecked(&n_rcu_torture_mberror),
87204 n_rcu_torture_boost_ktrerror,
87205 n_rcu_torture_boost_rterror);
87206 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
87207@@ -1100,14 +1100,14 @@ rcu_torture_printk(char *page)
87208 n_barrier_attempts,
87209 n_rcu_torture_barrier_error);
87210 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
87211- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
87212+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
87213 n_rcu_torture_barrier_error != 0 ||
87214 n_rcu_torture_boost_ktrerror != 0 ||
87215 n_rcu_torture_boost_rterror != 0 ||
87216 n_rcu_torture_boost_failure != 0 ||
87217 i > 1) {
87218 cnt += sprintf(&page[cnt], "!!! ");
87219- atomic_inc(&n_rcu_torture_error);
87220+ atomic_inc_unchecked(&n_rcu_torture_error);
87221 WARN_ON_ONCE(1);
87222 }
87223 cnt += sprintf(&page[cnt], "Reader Pipe: ");
87224@@ -1121,7 +1121,7 @@ rcu_torture_printk(char *page)
87225 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
87226 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
87227 cnt += sprintf(&page[cnt], " %d",
87228- atomic_read(&rcu_torture_wcount[i]));
87229+ atomic_read_unchecked(&rcu_torture_wcount[i]));
87230 }
87231 cnt += sprintf(&page[cnt], "\n");
87232 if (cur_ops->stats)
87233@@ -1836,7 +1836,7 @@ rcu_torture_cleanup(void)
87234
87235 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
87236
87237- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
87238+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
87239 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
87240 else if (n_online_successes != n_online_attempts ||
87241 n_offline_successes != n_offline_attempts)
87242@@ -1958,18 +1958,18 @@ rcu_torture_init(void)
87243
87244 rcu_torture_current = NULL;
87245 rcu_torture_current_version = 0;
87246- atomic_set(&n_rcu_torture_alloc, 0);
87247- atomic_set(&n_rcu_torture_alloc_fail, 0);
87248- atomic_set(&n_rcu_torture_free, 0);
87249- atomic_set(&n_rcu_torture_mberror, 0);
87250- atomic_set(&n_rcu_torture_error, 0);
87251+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
87252+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
87253+ atomic_set_unchecked(&n_rcu_torture_free, 0);
87254+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
87255+ atomic_set_unchecked(&n_rcu_torture_error, 0);
87256 n_rcu_torture_barrier_error = 0;
87257 n_rcu_torture_boost_ktrerror = 0;
87258 n_rcu_torture_boost_rterror = 0;
87259 n_rcu_torture_boost_failure = 0;
87260 n_rcu_torture_boosts = 0;
87261 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
87262- atomic_set(&rcu_torture_wcount[i], 0);
87263+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
87264 for_each_possible_cpu(cpu) {
87265 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
87266 per_cpu(rcu_torture_count, cpu)[i] = 0;
87267diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
87268index dd08198..5ccccbe 100644
87269--- a/kernel/rcu/tree.c
87270+++ b/kernel/rcu/tree.c
87271@@ -383,9 +383,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
87272 rcu_prepare_for_idle(smp_processor_id());
87273 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
87274 smp_mb__before_atomic_inc(); /* See above. */
87275- atomic_inc(&rdtp->dynticks);
87276+ atomic_inc_unchecked(&rdtp->dynticks);
87277 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
87278- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
87279+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
87280
87281 /*
87282 * It is illegal to enter an extended quiescent state while
87283@@ -502,10 +502,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
87284 int user)
87285 {
87286 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
87287- atomic_inc(&rdtp->dynticks);
87288+ atomic_inc_unchecked(&rdtp->dynticks);
87289 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
87290 smp_mb__after_atomic_inc(); /* See above. */
87291- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
87292+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
87293 rcu_cleanup_after_idle(smp_processor_id());
87294 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
87295 if (!user && !is_idle_task(current)) {
87296@@ -625,14 +625,14 @@ void rcu_nmi_enter(void)
87297 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
87298
87299 if (rdtp->dynticks_nmi_nesting == 0 &&
87300- (atomic_read(&rdtp->dynticks) & 0x1))
87301+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
87302 return;
87303 rdtp->dynticks_nmi_nesting++;
87304 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
87305- atomic_inc(&rdtp->dynticks);
87306+ atomic_inc_unchecked(&rdtp->dynticks);
87307 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
87308 smp_mb__after_atomic_inc(); /* See above. */
87309- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
87310+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
87311 }
87312
87313 /**
87314@@ -651,9 +651,9 @@ void rcu_nmi_exit(void)
87315 return;
87316 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
87317 smp_mb__before_atomic_inc(); /* See above. */
87318- atomic_inc(&rdtp->dynticks);
87319+ atomic_inc_unchecked(&rdtp->dynticks);
87320 smp_mb__after_atomic_inc(); /* Force delay to next write. */
87321- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
87322+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
87323 }
87324
87325 /**
87326@@ -666,7 +666,7 @@ void rcu_nmi_exit(void)
87327 */
87328 bool notrace __rcu_is_watching(void)
87329 {
87330- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
87331+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
87332 }
87333
87334 /**
87335@@ -749,7 +749,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
87336 static int dyntick_save_progress_counter(struct rcu_data *rdp,
87337 bool *isidle, unsigned long *maxj)
87338 {
87339- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
87340+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
87341 rcu_sysidle_check_cpu(rdp, isidle, maxj);
87342 return (rdp->dynticks_snap & 0x1) == 0;
87343 }
87344@@ -766,7 +766,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
87345 unsigned int curr;
87346 unsigned int snap;
87347
87348- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
87349+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
87350 snap = (unsigned int)rdp->dynticks_snap;
87351
87352 /*
87353@@ -1412,9 +1412,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
87354 rdp = this_cpu_ptr(rsp->rda);
87355 rcu_preempt_check_blocked_tasks(rnp);
87356 rnp->qsmask = rnp->qsmaskinit;
87357- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
87358+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
87359 WARN_ON_ONCE(rnp->completed != rsp->completed);
87360- ACCESS_ONCE(rnp->completed) = rsp->completed;
87361+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
87362 if (rnp == rdp->mynode)
87363 __note_gp_changes(rsp, rnp, rdp);
87364 rcu_preempt_boost_start_gp(rnp);
87365@@ -1505,7 +1505,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
87366 */
87367 rcu_for_each_node_breadth_first(rsp, rnp) {
87368 raw_spin_lock_irq(&rnp->lock);
87369- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
87370+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
87371 rdp = this_cpu_ptr(rsp->rda);
87372 if (rnp == rdp->mynode)
87373 __note_gp_changes(rsp, rnp, rdp);
87374@@ -1865,7 +1865,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
87375 rsp->qlen += rdp->qlen;
87376 rdp->n_cbs_orphaned += rdp->qlen;
87377 rdp->qlen_lazy = 0;
87378- ACCESS_ONCE(rdp->qlen) = 0;
87379+ ACCESS_ONCE_RW(rdp->qlen) = 0;
87380 }
87381
87382 /*
87383@@ -2111,7 +2111,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
87384 }
87385 smp_mb(); /* List handling before counting for rcu_barrier(). */
87386 rdp->qlen_lazy -= count_lazy;
87387- ACCESS_ONCE(rdp->qlen) -= count;
87388+ ACCESS_ONCE_RW(rdp->qlen) -= count;
87389 rdp->n_cbs_invoked += count;
87390
87391 /* Reinstate batch limit if we have worked down the excess. */
87392@@ -2308,7 +2308,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
87393 /*
87394 * Do RCU core processing for the current CPU.
87395 */
87396-static void rcu_process_callbacks(struct softirq_action *unused)
87397+static void rcu_process_callbacks(void)
87398 {
87399 struct rcu_state *rsp;
87400
87401@@ -2415,7 +2415,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
87402 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
87403 if (debug_rcu_head_queue(head)) {
87404 /* Probable double call_rcu(), so leak the callback. */
87405- ACCESS_ONCE(head->func) = rcu_leak_callback;
87406+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
87407 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
87408 return;
87409 }
87410@@ -2443,7 +2443,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
87411 local_irq_restore(flags);
87412 return;
87413 }
87414- ACCESS_ONCE(rdp->qlen)++;
87415+ ACCESS_ONCE_RW(rdp->qlen)++;
87416 if (lazy)
87417 rdp->qlen_lazy++;
87418 else
87419@@ -2652,11 +2652,11 @@ void synchronize_sched_expedited(void)
87420 * counter wrap on a 32-bit system. Quite a few more CPUs would of
87421 * course be required on a 64-bit system.
87422 */
87423- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
87424+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
87425 (ulong)atomic_long_read(&rsp->expedited_done) +
87426 ULONG_MAX / 8)) {
87427 synchronize_sched();
87428- atomic_long_inc(&rsp->expedited_wrap);
87429+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
87430 return;
87431 }
87432
87433@@ -2664,7 +2664,7 @@ void synchronize_sched_expedited(void)
87434 * Take a ticket. Note that atomic_inc_return() implies a
87435 * full memory barrier.
87436 */
87437- snap = atomic_long_inc_return(&rsp->expedited_start);
87438+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
87439 firstsnap = snap;
87440 get_online_cpus();
87441 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
87442@@ -2677,14 +2677,14 @@ void synchronize_sched_expedited(void)
87443 synchronize_sched_expedited_cpu_stop,
87444 NULL) == -EAGAIN) {
87445 put_online_cpus();
87446- atomic_long_inc(&rsp->expedited_tryfail);
87447+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
87448
87449 /* Check to see if someone else did our work for us. */
87450 s = atomic_long_read(&rsp->expedited_done);
87451 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
87452 /* ensure test happens before caller kfree */
87453 smp_mb__before_atomic_inc(); /* ^^^ */
87454- atomic_long_inc(&rsp->expedited_workdone1);
87455+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
87456 return;
87457 }
87458
87459@@ -2693,7 +2693,7 @@ void synchronize_sched_expedited(void)
87460 udelay(trycount * num_online_cpus());
87461 } else {
87462 wait_rcu_gp(call_rcu_sched);
87463- atomic_long_inc(&rsp->expedited_normal);
87464+ atomic_long_inc_unchecked(&rsp->expedited_normal);
87465 return;
87466 }
87467
87468@@ -2702,7 +2702,7 @@ void synchronize_sched_expedited(void)
87469 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
87470 /* ensure test happens before caller kfree */
87471 smp_mb__before_atomic_inc(); /* ^^^ */
87472- atomic_long_inc(&rsp->expedited_workdone2);
87473+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
87474 return;
87475 }
87476
87477@@ -2714,10 +2714,10 @@ void synchronize_sched_expedited(void)
87478 * period works for us.
87479 */
87480 get_online_cpus();
87481- snap = atomic_long_read(&rsp->expedited_start);
87482+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
87483 smp_mb(); /* ensure read is before try_stop_cpus(). */
87484 }
87485- atomic_long_inc(&rsp->expedited_stoppedcpus);
87486+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
87487
87488 /*
87489 * Everyone up to our most recent fetch is covered by our grace
87490@@ -2726,16 +2726,16 @@ void synchronize_sched_expedited(void)
87491 * than we did already did their update.
87492 */
87493 do {
87494- atomic_long_inc(&rsp->expedited_done_tries);
87495+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
87496 s = atomic_long_read(&rsp->expedited_done);
87497 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
87498 /* ensure test happens before caller kfree */
87499 smp_mb__before_atomic_inc(); /* ^^^ */
87500- atomic_long_inc(&rsp->expedited_done_lost);
87501+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
87502 break;
87503 }
87504 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
87505- atomic_long_inc(&rsp->expedited_done_exit);
87506+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
87507
87508 put_online_cpus();
87509 }
87510@@ -2931,7 +2931,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
87511 * ACCESS_ONCE() to prevent the compiler from speculating
87512 * the increment to precede the early-exit check.
87513 */
87514- ACCESS_ONCE(rsp->n_barrier_done)++;
87515+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
87516 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
87517 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
87518 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
87519@@ -2981,7 +2981,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
87520
87521 /* Increment ->n_barrier_done to prevent duplicate work. */
87522 smp_mb(); /* Keep increment after above mechanism. */
87523- ACCESS_ONCE(rsp->n_barrier_done)++;
87524+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
87525 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
87526 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
87527 smp_mb(); /* Keep increment before caller's subsequent code. */
87528@@ -3026,10 +3026,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
87529 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
87530 init_callback_list(rdp);
87531 rdp->qlen_lazy = 0;
87532- ACCESS_ONCE(rdp->qlen) = 0;
87533+ ACCESS_ONCE_RW(rdp->qlen) = 0;
87534 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
87535 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
87536- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
87537+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
87538 rdp->cpu = cpu;
87539 rdp->rsp = rsp;
87540 rcu_boot_init_nocb_percpu_data(rdp);
87541@@ -3063,8 +3063,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
87542 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
87543 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
87544 rcu_sysidle_init_percpu_data(rdp->dynticks);
87545- atomic_set(&rdp->dynticks->dynticks,
87546- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
87547+ atomic_set_unchecked(&rdp->dynticks->dynticks,
87548+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
87549 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
87550
87551 /* Add CPU to rcu_node bitmasks. */
87552diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
87553index 52be957..365ded3 100644
87554--- a/kernel/rcu/tree.h
87555+++ b/kernel/rcu/tree.h
87556@@ -87,11 +87,11 @@ struct rcu_dynticks {
87557 long long dynticks_nesting; /* Track irq/process nesting level. */
87558 /* Process level is worth LLONG_MAX/2. */
87559 int dynticks_nmi_nesting; /* Track NMI nesting level. */
87560- atomic_t dynticks; /* Even value for idle, else odd. */
87561+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
87562 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
87563 long long dynticks_idle_nesting;
87564 /* irq/process nesting level from idle. */
87565- atomic_t dynticks_idle; /* Even value for idle, else odd. */
87566+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
87567 /* "Idle" excludes userspace execution. */
87568 unsigned long dynticks_idle_jiffies;
87569 /* End of last non-NMI non-idle period. */
87570@@ -429,17 +429,17 @@ struct rcu_state {
87571 /* _rcu_barrier(). */
87572 /* End of fields guarded by barrier_mutex. */
87573
87574- atomic_long_t expedited_start; /* Starting ticket. */
87575- atomic_long_t expedited_done; /* Done ticket. */
87576- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
87577- atomic_long_t expedited_tryfail; /* # acquisition failures. */
87578- atomic_long_t expedited_workdone1; /* # done by others #1. */
87579- atomic_long_t expedited_workdone2; /* # done by others #2. */
87580- atomic_long_t expedited_normal; /* # fallbacks to normal. */
87581- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
87582- atomic_long_t expedited_done_tries; /* # tries to update _done. */
87583- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
87584- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
87585+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
87586+ atomic_long_t expedited_done; /* Done ticket. */
87587+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
87588+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
87589+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
87590+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
87591+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
87592+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
87593+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
87594+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
87595+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
87596
87597 unsigned long jiffies_force_qs; /* Time at which to invoke */
87598 /* force_quiescent_state(). */
87599diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
87600index 08a7652..3598c7e 100644
87601--- a/kernel/rcu/tree_plugin.h
87602+++ b/kernel/rcu/tree_plugin.h
87603@@ -749,7 +749,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
87604 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
87605 {
87606 return !rcu_preempted_readers_exp(rnp) &&
87607- ACCESS_ONCE(rnp->expmask) == 0;
87608+ ACCESS_ONCE_RW(rnp->expmask) == 0;
87609 }
87610
87611 /*
87612@@ -905,7 +905,7 @@ void synchronize_rcu_expedited(void)
87613
87614 /* Clean up and exit. */
87615 smp_mb(); /* ensure expedited GP seen before counter increment. */
87616- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
87617+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
87618 unlock_mb_ret:
87619 mutex_unlock(&sync_rcu_preempt_exp_mutex);
87620 mb_ret:
87621@@ -1479,7 +1479,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
87622 free_cpumask_var(cm);
87623 }
87624
87625-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
87626+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
87627 .store = &rcu_cpu_kthread_task,
87628 .thread_should_run = rcu_cpu_kthread_should_run,
87629 .thread_fn = rcu_cpu_kthread,
87630@@ -1946,7 +1946,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
87631 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
87632 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
87633 cpu, ticks_value, ticks_title,
87634- atomic_read(&rdtp->dynticks) & 0xfff,
87635+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
87636 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
87637 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
87638 fast_no_hz);
87639@@ -2109,7 +2109,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
87640
87641 /* Enqueue the callback on the nocb list and update counts. */
87642 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
87643- ACCESS_ONCE(*old_rhpp) = rhp;
87644+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
87645 atomic_long_add(rhcount, &rdp->nocb_q_count);
87646 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
87647
87648@@ -2272,12 +2272,12 @@ static int rcu_nocb_kthread(void *arg)
87649 * Extract queued callbacks, update counts, and wait
87650 * for a grace period to elapse.
87651 */
87652- ACCESS_ONCE(rdp->nocb_head) = NULL;
87653+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
87654 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
87655 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
87656 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
87657- ACCESS_ONCE(rdp->nocb_p_count) += c;
87658- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
87659+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
87660+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
87661 rcu_nocb_wait_gp(rdp);
87662
87663 /* Each pass through the following loop invokes a callback. */
87664@@ -2303,8 +2303,8 @@ static int rcu_nocb_kthread(void *arg)
87665 list = next;
87666 }
87667 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
87668- ACCESS_ONCE(rdp->nocb_p_count) -= c;
87669- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
87670+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
87671+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
87672 rdp->n_nocbs_invoked += c;
87673 }
87674 return 0;
87675@@ -2331,7 +2331,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
87676 t = kthread_run(rcu_nocb_kthread, rdp,
87677 "rcuo%c/%d", rsp->abbr, cpu);
87678 BUG_ON(IS_ERR(t));
87679- ACCESS_ONCE(rdp->nocb_kthread) = t;
87680+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
87681 }
87682 }
87683
87684@@ -2457,11 +2457,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
87685
87686 /* Record start of fully idle period. */
87687 j = jiffies;
87688- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
87689+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
87690 smp_mb__before_atomic_inc();
87691- atomic_inc(&rdtp->dynticks_idle);
87692+ atomic_inc_unchecked(&rdtp->dynticks_idle);
87693 smp_mb__after_atomic_inc();
87694- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
87695+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
87696 }
87697
87698 /*
87699@@ -2526,9 +2526,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
87700
87701 /* Record end of idle period. */
87702 smp_mb__before_atomic_inc();
87703- atomic_inc(&rdtp->dynticks_idle);
87704+ atomic_inc_unchecked(&rdtp->dynticks_idle);
87705 smp_mb__after_atomic_inc();
87706- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
87707+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
87708
87709 /*
87710 * If we are the timekeeping CPU, we are permitted to be non-idle
87711@@ -2569,7 +2569,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
87712 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
87713
87714 /* Pick up current idle and NMI-nesting counter and check. */
87715- cur = atomic_read(&rdtp->dynticks_idle);
87716+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
87717 if (cur & 0x1) {
87718 *isidle = false; /* We are not idle! */
87719 return;
87720@@ -2632,7 +2632,7 @@ static void rcu_sysidle(unsigned long j)
87721 case RCU_SYSIDLE_NOT:
87722
87723 /* First time all are idle, so note a short idle period. */
87724- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
87725+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
87726 break;
87727
87728 case RCU_SYSIDLE_SHORT:
87729@@ -2669,7 +2669,7 @@ static void rcu_sysidle(unsigned long j)
87730 static void rcu_sysidle_cancel(void)
87731 {
87732 smp_mb();
87733- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
87734+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
87735 }
87736
87737 /*
87738@@ -2717,7 +2717,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
87739 smp_mb(); /* grace period precedes setting inuse. */
87740
87741 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
87742- ACCESS_ONCE(rshp->inuse) = 0;
87743+ ACCESS_ONCE_RW(rshp->inuse) = 0;
87744 }
87745
87746 /*
87747diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
87748index 3596797..f78391c 100644
87749--- a/kernel/rcu/tree_trace.c
87750+++ b/kernel/rcu/tree_trace.c
87751@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
87752 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
87753 rdp->passed_quiesce, rdp->qs_pending);
87754 seq_printf(m, " dt=%d/%llx/%d df=%lu",
87755- atomic_read(&rdp->dynticks->dynticks),
87756+ atomic_read_unchecked(&rdp->dynticks->dynticks),
87757 rdp->dynticks->dynticks_nesting,
87758 rdp->dynticks->dynticks_nmi_nesting,
87759 rdp->dynticks_fqs);
87760@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
87761 struct rcu_state *rsp = (struct rcu_state *)m->private;
87762
87763 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
87764- atomic_long_read(&rsp->expedited_start),
87765+ atomic_long_read_unchecked(&rsp->expedited_start),
87766 atomic_long_read(&rsp->expedited_done),
87767- atomic_long_read(&rsp->expedited_wrap),
87768- atomic_long_read(&rsp->expedited_tryfail),
87769- atomic_long_read(&rsp->expedited_workdone1),
87770- atomic_long_read(&rsp->expedited_workdone2),
87771- atomic_long_read(&rsp->expedited_normal),
87772- atomic_long_read(&rsp->expedited_stoppedcpus),
87773- atomic_long_read(&rsp->expedited_done_tries),
87774- atomic_long_read(&rsp->expedited_done_lost),
87775- atomic_long_read(&rsp->expedited_done_exit));
87776+ atomic_long_read_unchecked(&rsp->expedited_wrap),
87777+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
87778+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
87779+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
87780+ atomic_long_read_unchecked(&rsp->expedited_normal),
87781+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
87782+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
87783+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
87784+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
87785 return 0;
87786 }
87787
87788diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
87789index 6cb3dff..dc5710f 100644
87790--- a/kernel/rcu/update.c
87791+++ b/kernel/rcu/update.c
87792@@ -318,10 +318,10 @@ int rcu_jiffies_till_stall_check(void)
87793 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
87794 */
87795 if (till_stall_check < 3) {
87796- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
87797+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
87798 till_stall_check = 3;
87799 } else if (till_stall_check > 300) {
87800- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
87801+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
87802 till_stall_check = 300;
87803 }
87804 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
87805diff --git a/kernel/resource.c b/kernel/resource.c
87806index 3f285dc..5755f62 100644
87807--- a/kernel/resource.c
87808+++ b/kernel/resource.c
87809@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
87810
87811 static int __init ioresources_init(void)
87812 {
87813+#ifdef CONFIG_GRKERNSEC_PROC_ADD
87814+#ifdef CONFIG_GRKERNSEC_PROC_USER
87815+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
87816+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
87817+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
87818+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
87819+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
87820+#endif
87821+#else
87822 proc_create("ioports", 0, NULL, &proc_ioports_operations);
87823 proc_create("iomem", 0, NULL, &proc_iomem_operations);
87824+#endif
87825 return 0;
87826 }
87827 __initcall(ioresources_init);
87828diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
87829index 4a07353..66b5291 100644
87830--- a/kernel/sched/auto_group.c
87831+++ b/kernel/sched/auto_group.c
87832@@ -11,7 +11,7 @@
87833
87834 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
87835 static struct autogroup autogroup_default;
87836-static atomic_t autogroup_seq_nr;
87837+static atomic_unchecked_t autogroup_seq_nr;
87838
87839 void __init autogroup_init(struct task_struct *init_task)
87840 {
87841@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
87842
87843 kref_init(&ag->kref);
87844 init_rwsem(&ag->lock);
87845- ag->id = atomic_inc_return(&autogroup_seq_nr);
87846+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
87847 ag->tg = tg;
87848 #ifdef CONFIG_RT_GROUP_SCHED
87849 /*
87850diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
87851index a63f4dc..349bbb0 100644
87852--- a/kernel/sched/completion.c
87853+++ b/kernel/sched/completion.c
87854@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
87855 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
87856 * or number of jiffies left till timeout) if completed.
87857 */
87858-long __sched
87859+long __sched __intentional_overflow(-1)
87860 wait_for_completion_interruptible_timeout(struct completion *x,
87861 unsigned long timeout)
87862 {
87863@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
87864 *
87865 * Return: -ERESTARTSYS if interrupted, 0 if completed.
87866 */
87867-int __sched wait_for_completion_killable(struct completion *x)
87868+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
87869 {
87870 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
87871 if (t == -ERESTARTSYS)
87872@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
87873 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
87874 * or number of jiffies left till timeout) if completed.
87875 */
87876-long __sched
87877+long __sched __intentional_overflow(-1)
87878 wait_for_completion_killable_timeout(struct completion *x,
87879 unsigned long timeout)
87880 {
87881diff --git a/kernel/sched/core.c b/kernel/sched/core.c
87882index a88f4a4..9d57ac9 100644
87883--- a/kernel/sched/core.c
87884+++ b/kernel/sched/core.c
87885@@ -2871,6 +2871,8 @@ int can_nice(const struct task_struct *p, const int nice)
87886 /* convert nice value [19,-20] to rlimit style value [1,40] */
87887 int nice_rlim = 20 - nice;
87888
87889+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
87890+
87891 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
87892 capable(CAP_SYS_NICE));
87893 }
87894@@ -2904,7 +2906,8 @@ SYSCALL_DEFINE1(nice, int, increment)
87895 if (nice > 19)
87896 nice = 19;
87897
87898- if (increment < 0 && !can_nice(current, nice))
87899+ if (increment < 0 && (!can_nice(current, nice) ||
87900+ gr_handle_chroot_nice()))
87901 return -EPERM;
87902
87903 retval = security_task_setnice(current, nice);
87904@@ -3066,6 +3069,7 @@ recheck:
87905 unsigned long rlim_rtprio =
87906 task_rlimit(p, RLIMIT_RTPRIO);
87907
87908+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
87909 /* can't set/change the rt policy */
87910 if (policy != p->policy && !rlim_rtprio)
87911 return -EPERM;
87912@@ -4232,7 +4236,7 @@ static void migrate_tasks(unsigned int dead_cpu)
87913
87914 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
87915
87916-static struct ctl_table sd_ctl_dir[] = {
87917+static ctl_table_no_const sd_ctl_dir[] __read_only = {
87918 {
87919 .procname = "sched_domain",
87920 .mode = 0555,
87921@@ -4249,17 +4253,17 @@ static struct ctl_table sd_ctl_root[] = {
87922 {}
87923 };
87924
87925-static struct ctl_table *sd_alloc_ctl_entry(int n)
87926+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
87927 {
87928- struct ctl_table *entry =
87929+ ctl_table_no_const *entry =
87930 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
87931
87932 return entry;
87933 }
87934
87935-static void sd_free_ctl_entry(struct ctl_table **tablep)
87936+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
87937 {
87938- struct ctl_table *entry;
87939+ ctl_table_no_const *entry;
87940
87941 /*
87942 * In the intermediate directories, both the child directory and
87943@@ -4267,22 +4271,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
87944 * will always be set. In the lowest directory the names are
87945 * static strings and all have proc handlers.
87946 */
87947- for (entry = *tablep; entry->mode; entry++) {
87948- if (entry->child)
87949- sd_free_ctl_entry(&entry->child);
87950+ for (entry = tablep; entry->mode; entry++) {
87951+ if (entry->child) {
87952+ sd_free_ctl_entry(entry->child);
87953+ pax_open_kernel();
87954+ entry->child = NULL;
87955+ pax_close_kernel();
87956+ }
87957 if (entry->proc_handler == NULL)
87958 kfree(entry->procname);
87959 }
87960
87961- kfree(*tablep);
87962- *tablep = NULL;
87963+ kfree(tablep);
87964 }
87965
87966 static int min_load_idx = 0;
87967 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
87968
87969 static void
87970-set_table_entry(struct ctl_table *entry,
87971+set_table_entry(ctl_table_no_const *entry,
87972 const char *procname, void *data, int maxlen,
87973 umode_t mode, proc_handler *proc_handler,
87974 bool load_idx)
87975@@ -4302,7 +4309,7 @@ set_table_entry(struct ctl_table *entry,
87976 static struct ctl_table *
87977 sd_alloc_ctl_domain_table(struct sched_domain *sd)
87978 {
87979- struct ctl_table *table = sd_alloc_ctl_entry(13);
87980+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
87981
87982 if (table == NULL)
87983 return NULL;
87984@@ -4337,9 +4344,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
87985 return table;
87986 }
87987
87988-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
87989+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
87990 {
87991- struct ctl_table *entry, *table;
87992+ ctl_table_no_const *entry, *table;
87993 struct sched_domain *sd;
87994 int domain_num = 0, i;
87995 char buf[32];
87996@@ -4366,11 +4373,13 @@ static struct ctl_table_header *sd_sysctl_header;
87997 static void register_sched_domain_sysctl(void)
87998 {
87999 int i, cpu_num = num_possible_cpus();
88000- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
88001+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
88002 char buf[32];
88003
88004 WARN_ON(sd_ctl_dir[0].child);
88005+ pax_open_kernel();
88006 sd_ctl_dir[0].child = entry;
88007+ pax_close_kernel();
88008
88009 if (entry == NULL)
88010 return;
88011@@ -4393,8 +4402,12 @@ static void unregister_sched_domain_sysctl(void)
88012 if (sd_sysctl_header)
88013 unregister_sysctl_table(sd_sysctl_header);
88014 sd_sysctl_header = NULL;
88015- if (sd_ctl_dir[0].child)
88016- sd_free_ctl_entry(&sd_ctl_dir[0].child);
88017+ if (sd_ctl_dir[0].child) {
88018+ sd_free_ctl_entry(sd_ctl_dir[0].child);
88019+ pax_open_kernel();
88020+ sd_ctl_dir[0].child = NULL;
88021+ pax_close_kernel();
88022+ }
88023 }
88024 #else
88025 static void register_sched_domain_sysctl(void)
88026diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
88027index e64b079..a46bd34 100644
88028--- a/kernel/sched/fair.c
88029+++ b/kernel/sched/fair.c
88030@@ -1652,7 +1652,7 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
88031
88032 static void reset_ptenuma_scan(struct task_struct *p)
88033 {
88034- ACCESS_ONCE(p->mm->numa_scan_seq)++;
88035+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
88036 p->mm->numa_scan_offset = 0;
88037 }
88038
88039@@ -6863,7 +6863,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
88040 * run_rebalance_domains is triggered when needed from the scheduler tick.
88041 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
88042 */
88043-static void run_rebalance_domains(struct softirq_action *h)
88044+static __latent_entropy void run_rebalance_domains(void)
88045 {
88046 int this_cpu = smp_processor_id();
88047 struct rq *this_rq = cpu_rq(this_cpu);
88048diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
88049index 88c85b2..a1dec86 100644
88050--- a/kernel/sched/sched.h
88051+++ b/kernel/sched/sched.h
88052@@ -1035,7 +1035,7 @@ struct sched_class {
88053 #ifdef CONFIG_FAIR_GROUP_SCHED
88054 void (*task_move_group) (struct task_struct *p, int on_rq);
88055 #endif
88056-};
88057+} __do_const;
88058
88059 #define sched_class_highest (&stop_sched_class)
88060 #define for_each_class(class) \
88061diff --git a/kernel/signal.c b/kernel/signal.c
88062index 940b30e..7fd6041 100644
88063--- a/kernel/signal.c
88064+++ b/kernel/signal.c
88065@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
88066
88067 int print_fatal_signals __read_mostly;
88068
88069-static void __user *sig_handler(struct task_struct *t, int sig)
88070+static __sighandler_t sig_handler(struct task_struct *t, int sig)
88071 {
88072 return t->sighand->action[sig - 1].sa.sa_handler;
88073 }
88074
88075-static int sig_handler_ignored(void __user *handler, int sig)
88076+static int sig_handler_ignored(__sighandler_t handler, int sig)
88077 {
88078 /* Is it explicitly or implicitly ignored? */
88079 return handler == SIG_IGN ||
88080@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
88081
88082 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
88083 {
88084- void __user *handler;
88085+ __sighandler_t handler;
88086
88087 handler = sig_handler(t, sig);
88088
88089@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
88090 atomic_inc(&user->sigpending);
88091 rcu_read_unlock();
88092
88093+ if (!override_rlimit)
88094+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
88095+
88096 if (override_rlimit ||
88097 atomic_read(&user->sigpending) <=
88098 task_rlimit(t, RLIMIT_SIGPENDING)) {
88099@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
88100
88101 int unhandled_signal(struct task_struct *tsk, int sig)
88102 {
88103- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
88104+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
88105 if (is_global_init(tsk))
88106 return 1;
88107 if (handler != SIG_IGN && handler != SIG_DFL)
88108@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
88109 }
88110 }
88111
88112+ /* allow glibc communication via tgkill to other threads in our
88113+ thread group */
88114+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
88115+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
88116+ && gr_handle_signal(t, sig))
88117+ return -EPERM;
88118+
88119 return security_task_kill(t, info, sig, 0);
88120 }
88121
88122@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
88123 return send_signal(sig, info, p, 1);
88124 }
88125
88126-static int
88127+int
88128 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
88129 {
88130 return send_signal(sig, info, t, 0);
88131@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
88132 unsigned long int flags;
88133 int ret, blocked, ignored;
88134 struct k_sigaction *action;
88135+ int is_unhandled = 0;
88136
88137 spin_lock_irqsave(&t->sighand->siglock, flags);
88138 action = &t->sighand->action[sig-1];
88139@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
88140 }
88141 if (action->sa.sa_handler == SIG_DFL)
88142 t->signal->flags &= ~SIGNAL_UNKILLABLE;
88143+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
88144+ is_unhandled = 1;
88145 ret = specific_send_sig_info(sig, info, t);
88146 spin_unlock_irqrestore(&t->sighand->siglock, flags);
88147
88148+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
88149+ normal operation */
88150+ if (is_unhandled) {
88151+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
88152+ gr_handle_crash(t, sig);
88153+ }
88154+
88155 return ret;
88156 }
88157
88158@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
88159 ret = check_kill_permission(sig, info, p);
88160 rcu_read_unlock();
88161
88162- if (!ret && sig)
88163+ if (!ret && sig) {
88164 ret = do_send_sig_info(sig, info, p, true);
88165+ if (!ret)
88166+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
88167+ }
88168
88169 return ret;
88170 }
88171@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
88172 int error = -ESRCH;
88173
88174 rcu_read_lock();
88175- p = find_task_by_vpid(pid);
88176+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
88177+ /* allow glibc communication via tgkill to other threads in our
88178+ thread group */
88179+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
88180+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
88181+ p = find_task_by_vpid_unrestricted(pid);
88182+ else
88183+#endif
88184+ p = find_task_by_vpid(pid);
88185 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
88186 error = check_kill_permission(sig, info, p);
88187 /*
88188@@ -3240,8 +3271,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
88189 }
88190 seg = get_fs();
88191 set_fs(KERNEL_DS);
88192- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
88193- (stack_t __force __user *) &uoss,
88194+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
88195+ (stack_t __force_user *) &uoss,
88196 compat_user_stack_pointer());
88197 set_fs(seg);
88198 if (ret >= 0 && uoss_ptr) {
88199diff --git a/kernel/smpboot.c b/kernel/smpboot.c
88200index eb89e18..a4e6792 100644
88201--- a/kernel/smpboot.c
88202+++ b/kernel/smpboot.c
88203@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
88204 }
88205 smpboot_unpark_thread(plug_thread, cpu);
88206 }
88207- list_add(&plug_thread->list, &hotplug_threads);
88208+ pax_list_add(&plug_thread->list, &hotplug_threads);
88209 out:
88210 mutex_unlock(&smpboot_threads_lock);
88211 return ret;
88212@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
88213 {
88214 get_online_cpus();
88215 mutex_lock(&smpboot_threads_lock);
88216- list_del(&plug_thread->list);
88217+ pax_list_del(&plug_thread->list);
88218 smpboot_destroy_threads(plug_thread);
88219 mutex_unlock(&smpboot_threads_lock);
88220 put_online_cpus();
88221diff --git a/kernel/softirq.c b/kernel/softirq.c
88222index 11025cc..bc0e4dc 100644
88223--- a/kernel/softirq.c
88224+++ b/kernel/softirq.c
88225@@ -50,11 +50,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
88226 EXPORT_SYMBOL(irq_stat);
88227 #endif
88228
88229-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
88230+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
88231
88232 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
88233
88234-char *softirq_to_name[NR_SOFTIRQS] = {
88235+const char * const softirq_to_name[NR_SOFTIRQS] = {
88236 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
88237 "TASKLET", "SCHED", "HRTIMER", "RCU"
88238 };
88239@@ -250,7 +250,7 @@ restart:
88240 kstat_incr_softirqs_this_cpu(vec_nr);
88241
88242 trace_softirq_entry(vec_nr);
88243- h->action(h);
88244+ h->action();
88245 trace_softirq_exit(vec_nr);
88246 if (unlikely(prev_count != preempt_count())) {
88247 printk(KERN_ERR "huh, entered softirq %u %s %p"
88248@@ -419,7 +419,7 @@ void __raise_softirq_irqoff(unsigned int nr)
88249 or_softirq_pending(1UL << nr);
88250 }
88251
88252-void open_softirq(int nr, void (*action)(struct softirq_action *))
88253+void __init open_softirq(int nr, void (*action)(void))
88254 {
88255 softirq_vec[nr].action = action;
88256 }
88257@@ -475,7 +475,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
88258
88259 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
88260
88261-static void tasklet_action(struct softirq_action *a)
88262+static __latent_entropy void tasklet_action(void)
88263 {
88264 struct tasklet_struct *list;
88265
88266@@ -510,7 +510,7 @@ static void tasklet_action(struct softirq_action *a)
88267 }
88268 }
88269
88270-static void tasklet_hi_action(struct softirq_action *a)
88271+static __latent_entropy void tasklet_hi_action(void)
88272 {
88273 struct tasklet_struct *list;
88274
88275@@ -740,7 +740,7 @@ static struct notifier_block cpu_nfb = {
88276 .notifier_call = cpu_callback
88277 };
88278
88279-static struct smp_hotplug_thread softirq_threads = {
88280+static struct smp_hotplug_thread softirq_threads __read_only = {
88281 .store = &ksoftirqd,
88282 .thread_should_run = ksoftirqd_should_run,
88283 .thread_fn = run_ksoftirqd,
88284diff --git a/kernel/sys.c b/kernel/sys.c
88285index c723113..46bf922 100644
88286--- a/kernel/sys.c
88287+++ b/kernel/sys.c
88288@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
88289 error = -EACCES;
88290 goto out;
88291 }
88292+
88293+ if (gr_handle_chroot_setpriority(p, niceval)) {
88294+ error = -EACCES;
88295+ goto out;
88296+ }
88297+
88298 no_nice = security_task_setnice(p, niceval);
88299 if (no_nice) {
88300 error = no_nice;
88301@@ -351,6 +357,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
88302 goto error;
88303 }
88304
88305+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
88306+ goto error;
88307+
88308 if (rgid != (gid_t) -1 ||
88309 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
88310 new->sgid = new->egid;
88311@@ -386,6 +395,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
88312 old = current_cred();
88313
88314 retval = -EPERM;
88315+
88316+ if (gr_check_group_change(kgid, kgid, kgid))
88317+ goto error;
88318+
88319 if (ns_capable(old->user_ns, CAP_SETGID))
88320 new->gid = new->egid = new->sgid = new->fsgid = kgid;
88321 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
88322@@ -403,7 +416,7 @@ error:
88323 /*
88324 * change the user struct in a credentials set to match the new UID
88325 */
88326-static int set_user(struct cred *new)
88327+int set_user(struct cred *new)
88328 {
88329 struct user_struct *new_user;
88330
88331@@ -483,6 +496,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
88332 goto error;
88333 }
88334
88335+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
88336+ goto error;
88337+
88338 if (!uid_eq(new->uid, old->uid)) {
88339 retval = set_user(new);
88340 if (retval < 0)
88341@@ -533,6 +549,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
88342 old = current_cred();
88343
88344 retval = -EPERM;
88345+
88346+ if (gr_check_crash_uid(kuid))
88347+ goto error;
88348+ if (gr_check_user_change(kuid, kuid, kuid))
88349+ goto error;
88350+
88351 if (ns_capable(old->user_ns, CAP_SETUID)) {
88352 new->suid = new->uid = kuid;
88353 if (!uid_eq(kuid, old->uid)) {
88354@@ -602,6 +624,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
88355 goto error;
88356 }
88357
88358+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
88359+ goto error;
88360+
88361 if (ruid != (uid_t) -1) {
88362 new->uid = kruid;
88363 if (!uid_eq(kruid, old->uid)) {
88364@@ -684,6 +709,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
88365 goto error;
88366 }
88367
88368+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
88369+ goto error;
88370+
88371 if (rgid != (gid_t) -1)
88372 new->gid = krgid;
88373 if (egid != (gid_t) -1)
88374@@ -745,12 +773,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
88375 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
88376 ns_capable(old->user_ns, CAP_SETUID)) {
88377 if (!uid_eq(kuid, old->fsuid)) {
88378+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
88379+ goto error;
88380+
88381 new->fsuid = kuid;
88382 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
88383 goto change_okay;
88384 }
88385 }
88386
88387+error:
88388 abort_creds(new);
88389 return old_fsuid;
88390
88391@@ -783,12 +815,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
88392 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
88393 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
88394 ns_capable(old->user_ns, CAP_SETGID)) {
88395+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
88396+ goto error;
88397+
88398 if (!gid_eq(kgid, old->fsgid)) {
88399 new->fsgid = kgid;
88400 goto change_okay;
88401 }
88402 }
88403
88404+error:
88405 abort_creds(new);
88406 return old_fsgid;
88407
88408@@ -1168,19 +1204,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
88409 return -EFAULT;
88410
88411 down_read(&uts_sem);
88412- error = __copy_to_user(&name->sysname, &utsname()->sysname,
88413+ error = __copy_to_user(name->sysname, &utsname()->sysname,
88414 __OLD_UTS_LEN);
88415 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
88416- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
88417+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
88418 __OLD_UTS_LEN);
88419 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
88420- error |= __copy_to_user(&name->release, &utsname()->release,
88421+ error |= __copy_to_user(name->release, &utsname()->release,
88422 __OLD_UTS_LEN);
88423 error |= __put_user(0, name->release + __OLD_UTS_LEN);
88424- error |= __copy_to_user(&name->version, &utsname()->version,
88425+ error |= __copy_to_user(name->version, &utsname()->version,
88426 __OLD_UTS_LEN);
88427 error |= __put_user(0, name->version + __OLD_UTS_LEN);
88428- error |= __copy_to_user(&name->machine, &utsname()->machine,
88429+ error |= __copy_to_user(name->machine, &utsname()->machine,
88430 __OLD_UTS_LEN);
88431 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
88432 up_read(&uts_sem);
88433@@ -1382,6 +1418,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
88434 */
88435 new_rlim->rlim_cur = 1;
88436 }
88437+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
88438+ is changed to a lower value. Since tasks can be created by the same
88439+ user in between this limit change and an execve by this task, force
88440+ a recheck only for this task by setting PF_NPROC_EXCEEDED
88441+ */
88442+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
88443+ tsk->flags |= PF_NPROC_EXCEEDED;
88444 }
88445 if (!retval) {
88446 if (old_rlim)
88447diff --git a/kernel/sysctl.c b/kernel/sysctl.c
88448index 34a6047..5665aa7 100644
88449--- a/kernel/sysctl.c
88450+++ b/kernel/sysctl.c
88451@@ -93,7 +93,6 @@
88452
88453
88454 #if defined(CONFIG_SYSCTL)
88455-
88456 /* External variables not in a header file. */
88457 extern int sysctl_overcommit_memory;
88458 extern int sysctl_overcommit_ratio;
88459@@ -119,17 +118,18 @@ extern int blk_iopoll_enabled;
88460
88461 /* Constants used for minimum and maximum */
88462 #ifdef CONFIG_LOCKUP_DETECTOR
88463-static int sixty = 60;
88464+static int sixty __read_only = 60;
88465 #endif
88466
88467-static int zero;
88468-static int __maybe_unused one = 1;
88469-static int __maybe_unused two = 2;
88470-static int __maybe_unused three = 3;
88471-static unsigned long one_ul = 1;
88472-static int one_hundred = 100;
88473+static int neg_one __read_only = -1;
88474+static int zero __read_only = 0;
88475+static int __maybe_unused one __read_only = 1;
88476+static int __maybe_unused two __read_only = 2;
88477+static int __maybe_unused three __read_only = 3;
88478+static unsigned long one_ul __read_only = 1;
88479+static int one_hundred __read_only = 100;
88480 #ifdef CONFIG_PRINTK
88481-static int ten_thousand = 10000;
88482+static int ten_thousand __read_only = 10000;
88483 #endif
88484
88485 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
88486@@ -176,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
88487 void __user *buffer, size_t *lenp, loff_t *ppos);
88488 #endif
88489
88490-#ifdef CONFIG_PRINTK
88491 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
88492 void __user *buffer, size_t *lenp, loff_t *ppos);
88493-#endif
88494
88495 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
88496 void __user *buffer, size_t *lenp, loff_t *ppos);
88497@@ -210,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
88498
88499 #endif
88500
88501+extern struct ctl_table grsecurity_table[];
88502+
88503 static struct ctl_table kern_table[];
88504 static struct ctl_table vm_table[];
88505 static struct ctl_table fs_table[];
88506@@ -224,6 +224,20 @@ extern struct ctl_table epoll_table[];
88507 int sysctl_legacy_va_layout;
88508 #endif
88509
88510+#ifdef CONFIG_PAX_SOFTMODE
88511+static ctl_table pax_table[] = {
88512+ {
88513+ .procname = "softmode",
88514+ .data = &pax_softmode,
88515+ .maxlen = sizeof(unsigned int),
88516+ .mode = 0600,
88517+ .proc_handler = &proc_dointvec,
88518+ },
88519+
88520+ { }
88521+};
88522+#endif
88523+
88524 /* The default sysctl tables: */
88525
88526 static struct ctl_table sysctl_base_table[] = {
88527@@ -272,6 +286,22 @@ static int max_extfrag_threshold = 1000;
88528 #endif
88529
88530 static struct ctl_table kern_table[] = {
88531+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
88532+ {
88533+ .procname = "grsecurity",
88534+ .mode = 0500,
88535+ .child = grsecurity_table,
88536+ },
88537+#endif
88538+
88539+#ifdef CONFIG_PAX_SOFTMODE
88540+ {
88541+ .procname = "pax",
88542+ .mode = 0500,
88543+ .child = pax_table,
88544+ },
88545+#endif
88546+
88547 {
88548 .procname = "sched_child_runs_first",
88549 .data = &sysctl_sched_child_runs_first,
88550@@ -620,7 +650,7 @@ static struct ctl_table kern_table[] = {
88551 .data = &modprobe_path,
88552 .maxlen = KMOD_PATH_LEN,
88553 .mode = 0644,
88554- .proc_handler = proc_dostring,
88555+ .proc_handler = proc_dostring_modpriv,
88556 },
88557 {
88558 .procname = "modules_disabled",
88559@@ -787,16 +817,20 @@ static struct ctl_table kern_table[] = {
88560 .extra1 = &zero,
88561 .extra2 = &one,
88562 },
88563+#endif
88564 {
88565 .procname = "kptr_restrict",
88566 .data = &kptr_restrict,
88567 .maxlen = sizeof(int),
88568 .mode = 0644,
88569 .proc_handler = proc_dointvec_minmax_sysadmin,
88570+#ifdef CONFIG_GRKERNSEC_HIDESYM
88571+ .extra1 = &two,
88572+#else
88573 .extra1 = &zero,
88574+#endif
88575 .extra2 = &two,
88576 },
88577-#endif
88578 {
88579 .procname = "ngroups_max",
88580 .data = &ngroups_max,
88581@@ -1039,10 +1073,17 @@ static struct ctl_table kern_table[] = {
88582 */
88583 {
88584 .procname = "perf_event_paranoid",
88585- .data = &sysctl_perf_event_paranoid,
88586- .maxlen = sizeof(sysctl_perf_event_paranoid),
88587+ .data = &sysctl_perf_event_legitimately_concerned,
88588+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
88589 .mode = 0644,
88590- .proc_handler = proc_dointvec,
88591+ /* go ahead, be a hero */
88592+ .proc_handler = proc_dointvec_minmax_sysadmin,
88593+ .extra1 = &neg_one,
88594+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88595+ .extra2 = &three,
88596+#else
88597+ .extra2 = &two,
88598+#endif
88599 },
88600 {
88601 .procname = "perf_event_mlock_kb",
88602@@ -1306,6 +1347,13 @@ static struct ctl_table vm_table[] = {
88603 .proc_handler = proc_dointvec_minmax,
88604 .extra1 = &zero,
88605 },
88606+ {
88607+ .procname = "heap_stack_gap",
88608+ .data = &sysctl_heap_stack_gap,
88609+ .maxlen = sizeof(sysctl_heap_stack_gap),
88610+ .mode = 0644,
88611+ .proc_handler = proc_doulongvec_minmax,
88612+ },
88613 #else
88614 {
88615 .procname = "nr_trim_pages",
88616@@ -1770,6 +1818,16 @@ int proc_dostring(struct ctl_table *table, int write,
88617 buffer, lenp, ppos);
88618 }
88619
88620+int proc_dostring_modpriv(struct ctl_table *table, int write,
88621+ void __user *buffer, size_t *lenp, loff_t *ppos)
88622+{
88623+ if (write && !capable(CAP_SYS_MODULE))
88624+ return -EPERM;
88625+
88626+ return _proc_do_string(table->data, table->maxlen, write,
88627+ buffer, lenp, ppos);
88628+}
88629+
88630 static size_t proc_skip_spaces(char **buf)
88631 {
88632 size_t ret;
88633@@ -1875,6 +1933,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
88634 len = strlen(tmp);
88635 if (len > *size)
88636 len = *size;
88637+ if (len > sizeof(tmp))
88638+ len = sizeof(tmp);
88639 if (copy_to_user(*buf, tmp, len))
88640 return -EFAULT;
88641 *size -= len;
88642@@ -2039,7 +2099,7 @@ int proc_dointvec(struct ctl_table *table, int write,
88643 static int proc_taint(struct ctl_table *table, int write,
88644 void __user *buffer, size_t *lenp, loff_t *ppos)
88645 {
88646- struct ctl_table t;
88647+ ctl_table_no_const t;
88648 unsigned long tmptaint = get_taint();
88649 int err;
88650
88651@@ -2067,7 +2127,6 @@ static int proc_taint(struct ctl_table *table, int write,
88652 return err;
88653 }
88654
88655-#ifdef CONFIG_PRINTK
88656 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
88657 void __user *buffer, size_t *lenp, loff_t *ppos)
88658 {
88659@@ -2076,7 +2135,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
88660
88661 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
88662 }
88663-#endif
88664
88665 struct do_proc_dointvec_minmax_conv_param {
88666 int *min;
88667@@ -2623,6 +2681,12 @@ int proc_dostring(struct ctl_table *table, int write,
88668 return -ENOSYS;
88669 }
88670
88671+int proc_dostring_modpriv(struct ctl_table *table, int write,
88672+ void __user *buffer, size_t *lenp, loff_t *ppos)
88673+{
88674+ return -ENOSYS;
88675+}
88676+
88677 int proc_dointvec(struct ctl_table *table, int write,
88678 void __user *buffer, size_t *lenp, loff_t *ppos)
88679 {
88680@@ -2679,5 +2743,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
88681 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
88682 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
88683 EXPORT_SYMBOL(proc_dostring);
88684+EXPORT_SYMBOL(proc_dostring_modpriv);
88685 EXPORT_SYMBOL(proc_doulongvec_minmax);
88686 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
88687diff --git a/kernel/taskstats.c b/kernel/taskstats.c
88688index 13d2f7c..c93d0b0 100644
88689--- a/kernel/taskstats.c
88690+++ b/kernel/taskstats.c
88691@@ -28,9 +28,12 @@
88692 #include <linux/fs.h>
88693 #include <linux/file.h>
88694 #include <linux/pid_namespace.h>
88695+#include <linux/grsecurity.h>
88696 #include <net/genetlink.h>
88697 #include <linux/atomic.h>
88698
88699+extern int gr_is_taskstats_denied(int pid);
88700+
88701 /*
88702 * Maximum length of a cpumask that can be specified in
88703 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
88704@@ -576,6 +579,9 @@ err:
88705
88706 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
88707 {
88708+ if (gr_is_taskstats_denied(current->pid))
88709+ return -EACCES;
88710+
88711 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
88712 return cmd_attr_register_cpumask(info);
88713 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
88714diff --git a/kernel/time.c b/kernel/time.c
88715index 7c7964c..2a0d412 100644
88716--- a/kernel/time.c
88717+++ b/kernel/time.c
88718@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
88719 return error;
88720
88721 if (tz) {
88722+ /* we log in do_settimeofday called below, so don't log twice
88723+ */
88724+ if (!tv)
88725+ gr_log_timechange();
88726+
88727 sys_tz = *tz;
88728 update_vsyscall_tz();
88729 if (firsttime) {
88730diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
88731index 88c9c65..7497ebc 100644
88732--- a/kernel/time/alarmtimer.c
88733+++ b/kernel/time/alarmtimer.c
88734@@ -795,7 +795,7 @@ static int __init alarmtimer_init(void)
88735 struct platform_device *pdev;
88736 int error = 0;
88737 int i;
88738- struct k_clock alarm_clock = {
88739+ static struct k_clock alarm_clock = {
88740 .clock_getres = alarm_clock_getres,
88741 .clock_get = alarm_clock_get,
88742 .timer_create = alarm_timer_create,
88743diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
88744index 87b4f00..b7f77a7 100644
88745--- a/kernel/time/timekeeping.c
88746+++ b/kernel/time/timekeeping.c
88747@@ -15,6 +15,7 @@
88748 #include <linux/init.h>
88749 #include <linux/mm.h>
88750 #include <linux/sched.h>
88751+#include <linux/grsecurity.h>
88752 #include <linux/syscore_ops.h>
88753 #include <linux/clocksource.h>
88754 #include <linux/jiffies.h>
88755@@ -500,6 +501,8 @@ int do_settimeofday(const struct timespec *tv)
88756 if (!timespec_valid_strict(tv))
88757 return -EINVAL;
88758
88759+ gr_log_timechange();
88760+
88761 raw_spin_lock_irqsave(&timekeeper_lock, flags);
88762 write_seqcount_begin(&timekeeper_seq);
88763
88764diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
88765index 61ed862..3b52c65 100644
88766--- a/kernel/time/timer_list.c
88767+++ b/kernel/time/timer_list.c
88768@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
88769
88770 static void print_name_offset(struct seq_file *m, void *sym)
88771 {
88772+#ifdef CONFIG_GRKERNSEC_HIDESYM
88773+ SEQ_printf(m, "<%p>", NULL);
88774+#else
88775 char symname[KSYM_NAME_LEN];
88776
88777 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
88778 SEQ_printf(m, "<%pK>", sym);
88779 else
88780 SEQ_printf(m, "%s", symname);
88781+#endif
88782 }
88783
88784 static void
88785@@ -119,7 +123,11 @@ next_one:
88786 static void
88787 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
88788 {
88789+#ifdef CONFIG_GRKERNSEC_HIDESYM
88790+ SEQ_printf(m, " .base: %p\n", NULL);
88791+#else
88792 SEQ_printf(m, " .base: %pK\n", base);
88793+#endif
88794 SEQ_printf(m, " .index: %d\n",
88795 base->index);
88796 SEQ_printf(m, " .resolution: %Lu nsecs\n",
88797@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
88798 {
88799 struct proc_dir_entry *pe;
88800
88801+#ifdef CONFIG_GRKERNSEC_PROC_ADD
88802+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
88803+#else
88804 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
88805+#endif
88806 if (!pe)
88807 return -ENOMEM;
88808 return 0;
88809diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
88810index 1fb08f2..ca4bb1e 100644
88811--- a/kernel/time/timer_stats.c
88812+++ b/kernel/time/timer_stats.c
88813@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
88814 static unsigned long nr_entries;
88815 static struct entry entries[MAX_ENTRIES];
88816
88817-static atomic_t overflow_count;
88818+static atomic_unchecked_t overflow_count;
88819
88820 /*
88821 * The entries are in a hash-table, for fast lookup:
88822@@ -140,7 +140,7 @@ static void reset_entries(void)
88823 nr_entries = 0;
88824 memset(entries, 0, sizeof(entries));
88825 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
88826- atomic_set(&overflow_count, 0);
88827+ atomic_set_unchecked(&overflow_count, 0);
88828 }
88829
88830 static struct entry *alloc_entry(void)
88831@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
88832 if (likely(entry))
88833 entry->count++;
88834 else
88835- atomic_inc(&overflow_count);
88836+ atomic_inc_unchecked(&overflow_count);
88837
88838 out_unlock:
88839 raw_spin_unlock_irqrestore(lock, flags);
88840@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
88841
88842 static void print_name_offset(struct seq_file *m, unsigned long addr)
88843 {
88844+#ifdef CONFIG_GRKERNSEC_HIDESYM
88845+ seq_printf(m, "<%p>", NULL);
88846+#else
88847 char symname[KSYM_NAME_LEN];
88848
88849 if (lookup_symbol_name(addr, symname) < 0)
88850- seq_printf(m, "<%p>", (void *)addr);
88851+ seq_printf(m, "<%pK>", (void *)addr);
88852 else
88853 seq_printf(m, "%s", symname);
88854+#endif
88855 }
88856
88857 static int tstats_show(struct seq_file *m, void *v)
88858@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
88859
88860 seq_puts(m, "Timer Stats Version: v0.3\n");
88861 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
88862- if (atomic_read(&overflow_count))
88863- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
88864+ if (atomic_read_unchecked(&overflow_count))
88865+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
88866 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
88867
88868 for (i = 0; i < nr_entries; i++) {
88869@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
88870 {
88871 struct proc_dir_entry *pe;
88872
88873+#ifdef CONFIG_GRKERNSEC_PROC_ADD
88874+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
88875+#else
88876 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
88877+#endif
88878 if (!pe)
88879 return -ENOMEM;
88880 return 0;
88881diff --git a/kernel/timer.c b/kernel/timer.c
88882index accfd24..e00f0c0 100644
88883--- a/kernel/timer.c
88884+++ b/kernel/timer.c
88885@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
88886 /*
88887 * This function runs timers and the timer-tq in bottom half context.
88888 */
88889-static void run_timer_softirq(struct softirq_action *h)
88890+static __latent_entropy void run_timer_softirq(void)
88891 {
88892 struct tvec_base *base = __this_cpu_read(tvec_bases);
88893
88894@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
88895 *
88896 * In all cases the return value is guaranteed to be non-negative.
88897 */
88898-signed long __sched schedule_timeout(signed long timeout)
88899+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
88900 {
88901 struct timer_list timer;
88902 unsigned long expire;
88903diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
88904index f785aef..59f1b18 100644
88905--- a/kernel/trace/blktrace.c
88906+++ b/kernel/trace/blktrace.c
88907@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
88908 struct blk_trace *bt = filp->private_data;
88909 char buf[16];
88910
88911- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
88912+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
88913
88914 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
88915 }
88916@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
88917 return 1;
88918
88919 bt = buf->chan->private_data;
88920- atomic_inc(&bt->dropped);
88921+ atomic_inc_unchecked(&bt->dropped);
88922 return 0;
88923 }
88924
88925@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
88926
88927 bt->dir = dir;
88928 bt->dev = dev;
88929- atomic_set(&bt->dropped, 0);
88930+ atomic_set_unchecked(&bt->dropped, 0);
88931 INIT_LIST_HEAD(&bt->running_list);
88932
88933 ret = -EIO;
88934diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
88935index 72a0f81..0bbfd090 100644
88936--- a/kernel/trace/ftrace.c
88937+++ b/kernel/trace/ftrace.c
88938@@ -1944,12 +1944,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
88939 if (unlikely(ftrace_disabled))
88940 return 0;
88941
88942+ ret = ftrace_arch_code_modify_prepare();
88943+ FTRACE_WARN_ON(ret);
88944+ if (ret)
88945+ return 0;
88946+
88947 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
88948+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
88949 if (ret) {
88950 ftrace_bug(ret, ip);
88951- return 0;
88952 }
88953- return 1;
88954+ return ret ? 0 : 1;
88955 }
88956
88957 /*
88958@@ -4119,8 +4124,10 @@ static int ftrace_process_locs(struct module *mod,
88959 if (!count)
88960 return 0;
88961
88962+ pax_open_kernel();
88963 sort(start, count, sizeof(*start),
88964 ftrace_cmp_ips, ftrace_swap_ips);
88965+ pax_close_kernel();
88966
88967 start_pg = ftrace_allocate_pages(count);
88968 if (!start_pg)
88969@@ -4851,8 +4858,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
88970 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
88971
88972 static int ftrace_graph_active;
88973-static struct notifier_block ftrace_suspend_notifier;
88974-
88975 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
88976 {
88977 return 0;
88978@@ -5003,6 +5008,10 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
88979 FTRACE_OPS_FL_RECURSION_SAFE,
88980 };
88981
88982+static struct notifier_block ftrace_suspend_notifier = {
88983+ .notifier_call = ftrace_suspend_notifier_call
88984+};
88985+
88986 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
88987 trace_func_graph_ent_t entryfunc)
88988 {
88989@@ -5016,7 +5025,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
88990 goto out;
88991 }
88992
88993- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
88994 register_pm_notifier(&ftrace_suspend_notifier);
88995
88996 ftrace_graph_active++;
88997diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
88998index cc2f66f..05edd54 100644
88999--- a/kernel/trace/ring_buffer.c
89000+++ b/kernel/trace/ring_buffer.c
89001@@ -352,9 +352,9 @@ struct buffer_data_page {
89002 */
89003 struct buffer_page {
89004 struct list_head list; /* list of buffer pages */
89005- local_t write; /* index for next write */
89006+ local_unchecked_t write; /* index for next write */
89007 unsigned read; /* index for next read */
89008- local_t entries; /* entries on this page */
89009+ local_unchecked_t entries; /* entries on this page */
89010 unsigned long real_end; /* real end of data */
89011 struct buffer_data_page *page; /* Actual data page */
89012 };
89013@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
89014 unsigned long last_overrun;
89015 local_t entries_bytes;
89016 local_t entries;
89017- local_t overrun;
89018- local_t commit_overrun;
89019+ local_unchecked_t overrun;
89020+ local_unchecked_t commit_overrun;
89021 local_t dropped_events;
89022 local_t committing;
89023 local_t commits;
89024@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
89025 *
89026 * We add a counter to the write field to denote this.
89027 */
89028- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
89029- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
89030+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
89031+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
89032
89033 /*
89034 * Just make sure we have seen our old_write and synchronize
89035@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
89036 * cmpxchg to only update if an interrupt did not already
89037 * do it for us. If the cmpxchg fails, we don't care.
89038 */
89039- (void)local_cmpxchg(&next_page->write, old_write, val);
89040- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
89041+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
89042+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
89043
89044 /*
89045 * No need to worry about races with clearing out the commit.
89046@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
89047
89048 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
89049 {
89050- return local_read(&bpage->entries) & RB_WRITE_MASK;
89051+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
89052 }
89053
89054 static inline unsigned long rb_page_write(struct buffer_page *bpage)
89055 {
89056- return local_read(&bpage->write) & RB_WRITE_MASK;
89057+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
89058 }
89059
89060 static int
89061@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
89062 * bytes consumed in ring buffer from here.
89063 * Increment overrun to account for the lost events.
89064 */
89065- local_add(page_entries, &cpu_buffer->overrun);
89066+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
89067 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
89068 }
89069
89070@@ -2064,7 +2064,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
89071 * it is our responsibility to update
89072 * the counters.
89073 */
89074- local_add(entries, &cpu_buffer->overrun);
89075+ local_add_unchecked(entries, &cpu_buffer->overrun);
89076 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
89077
89078 /*
89079@@ -2214,7 +2214,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
89080 if (tail == BUF_PAGE_SIZE)
89081 tail_page->real_end = 0;
89082
89083- local_sub(length, &tail_page->write);
89084+ local_sub_unchecked(length, &tail_page->write);
89085 return;
89086 }
89087
89088@@ -2249,7 +2249,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
89089 rb_event_set_padding(event);
89090
89091 /* Set the write back to the previous setting */
89092- local_sub(length, &tail_page->write);
89093+ local_sub_unchecked(length, &tail_page->write);
89094 return;
89095 }
89096
89097@@ -2261,7 +2261,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
89098
89099 /* Set write to end of buffer */
89100 length = (tail + length) - BUF_PAGE_SIZE;
89101- local_sub(length, &tail_page->write);
89102+ local_sub_unchecked(length, &tail_page->write);
89103 }
89104
89105 /*
89106@@ -2287,7 +2287,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
89107 * about it.
89108 */
89109 if (unlikely(next_page == commit_page)) {
89110- local_inc(&cpu_buffer->commit_overrun);
89111+ local_inc_unchecked(&cpu_buffer->commit_overrun);
89112 goto out_reset;
89113 }
89114
89115@@ -2343,7 +2343,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
89116 cpu_buffer->tail_page) &&
89117 (cpu_buffer->commit_page ==
89118 cpu_buffer->reader_page))) {
89119- local_inc(&cpu_buffer->commit_overrun);
89120+ local_inc_unchecked(&cpu_buffer->commit_overrun);
89121 goto out_reset;
89122 }
89123 }
89124@@ -2391,7 +2391,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
89125 length += RB_LEN_TIME_EXTEND;
89126
89127 tail_page = cpu_buffer->tail_page;
89128- write = local_add_return(length, &tail_page->write);
89129+ write = local_add_return_unchecked(length, &tail_page->write);
89130
89131 /* set write to only the index of the write */
89132 write &= RB_WRITE_MASK;
89133@@ -2408,7 +2408,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
89134 kmemcheck_annotate_bitfield(event, bitfield);
89135 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
89136
89137- local_inc(&tail_page->entries);
89138+ local_inc_unchecked(&tail_page->entries);
89139
89140 /*
89141 * If this is the first commit on the page, then update
89142@@ -2441,7 +2441,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
89143
89144 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
89145 unsigned long write_mask =
89146- local_read(&bpage->write) & ~RB_WRITE_MASK;
89147+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
89148 unsigned long event_length = rb_event_length(event);
89149 /*
89150 * This is on the tail page. It is possible that
89151@@ -2451,7 +2451,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
89152 */
89153 old_index += write_mask;
89154 new_index += write_mask;
89155- index = local_cmpxchg(&bpage->write, old_index, new_index);
89156+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
89157 if (index == old_index) {
89158 /* update counters */
89159 local_sub(event_length, &cpu_buffer->entries_bytes);
89160@@ -2843,7 +2843,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
89161
89162 /* Do the likely case first */
89163 if (likely(bpage->page == (void *)addr)) {
89164- local_dec(&bpage->entries);
89165+ local_dec_unchecked(&bpage->entries);
89166 return;
89167 }
89168
89169@@ -2855,7 +2855,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
89170 start = bpage;
89171 do {
89172 if (bpage->page == (void *)addr) {
89173- local_dec(&bpage->entries);
89174+ local_dec_unchecked(&bpage->entries);
89175 return;
89176 }
89177 rb_inc_page(cpu_buffer, &bpage);
89178@@ -3139,7 +3139,7 @@ static inline unsigned long
89179 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
89180 {
89181 return local_read(&cpu_buffer->entries) -
89182- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
89183+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
89184 }
89185
89186 /**
89187@@ -3228,7 +3228,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
89188 return 0;
89189
89190 cpu_buffer = buffer->buffers[cpu];
89191- ret = local_read(&cpu_buffer->overrun);
89192+ ret = local_read_unchecked(&cpu_buffer->overrun);
89193
89194 return ret;
89195 }
89196@@ -3251,7 +3251,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
89197 return 0;
89198
89199 cpu_buffer = buffer->buffers[cpu];
89200- ret = local_read(&cpu_buffer->commit_overrun);
89201+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
89202
89203 return ret;
89204 }
89205@@ -3336,7 +3336,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
89206 /* if you care about this being correct, lock the buffer */
89207 for_each_buffer_cpu(buffer, cpu) {
89208 cpu_buffer = buffer->buffers[cpu];
89209- overruns += local_read(&cpu_buffer->overrun);
89210+ overruns += local_read_unchecked(&cpu_buffer->overrun);
89211 }
89212
89213 return overruns;
89214@@ -3512,8 +3512,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
89215 /*
89216 * Reset the reader page to size zero.
89217 */
89218- local_set(&cpu_buffer->reader_page->write, 0);
89219- local_set(&cpu_buffer->reader_page->entries, 0);
89220+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
89221+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
89222 local_set(&cpu_buffer->reader_page->page->commit, 0);
89223 cpu_buffer->reader_page->real_end = 0;
89224
89225@@ -3547,7 +3547,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
89226 * want to compare with the last_overrun.
89227 */
89228 smp_mb();
89229- overwrite = local_read(&(cpu_buffer->overrun));
89230+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
89231
89232 /*
89233 * Here's the tricky part.
89234@@ -4117,8 +4117,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
89235
89236 cpu_buffer->head_page
89237 = list_entry(cpu_buffer->pages, struct buffer_page, list);
89238- local_set(&cpu_buffer->head_page->write, 0);
89239- local_set(&cpu_buffer->head_page->entries, 0);
89240+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
89241+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
89242 local_set(&cpu_buffer->head_page->page->commit, 0);
89243
89244 cpu_buffer->head_page->read = 0;
89245@@ -4128,14 +4128,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
89246
89247 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
89248 INIT_LIST_HEAD(&cpu_buffer->new_pages);
89249- local_set(&cpu_buffer->reader_page->write, 0);
89250- local_set(&cpu_buffer->reader_page->entries, 0);
89251+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
89252+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
89253 local_set(&cpu_buffer->reader_page->page->commit, 0);
89254 cpu_buffer->reader_page->read = 0;
89255
89256 local_set(&cpu_buffer->entries_bytes, 0);
89257- local_set(&cpu_buffer->overrun, 0);
89258- local_set(&cpu_buffer->commit_overrun, 0);
89259+ local_set_unchecked(&cpu_buffer->overrun, 0);
89260+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
89261 local_set(&cpu_buffer->dropped_events, 0);
89262 local_set(&cpu_buffer->entries, 0);
89263 local_set(&cpu_buffer->committing, 0);
89264@@ -4540,8 +4540,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
89265 rb_init_page(bpage);
89266 bpage = reader->page;
89267 reader->page = *data_page;
89268- local_set(&reader->write, 0);
89269- local_set(&reader->entries, 0);
89270+ local_set_unchecked(&reader->write, 0);
89271+ local_set_unchecked(&reader->entries, 0);
89272 reader->read = 0;
89273 *data_page = bpage;
89274
89275diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
89276index 9d20cd9..221d816 100644
89277--- a/kernel/trace/trace.c
89278+++ b/kernel/trace/trace.c
89279@@ -3346,7 +3346,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
89280 return 0;
89281 }
89282
89283-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
89284+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
89285 {
89286 /* do nothing if flag is already set */
89287 if (!!(trace_flags & mask) == !!enabled)
89288diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
89289index ea189e0..a5b48c4 100644
89290--- a/kernel/trace/trace.h
89291+++ b/kernel/trace/trace.h
89292@@ -1040,7 +1040,7 @@ extern const char *__stop___tracepoint_str[];
89293 void trace_printk_init_buffers(void);
89294 void trace_printk_start_comm(void);
89295 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
89296-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
89297+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
89298
89299 /*
89300 * Normal trace_printk() and friends allocates special buffers
89301diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
89302index 26dc348..8708ca7 100644
89303--- a/kernel/trace/trace_clock.c
89304+++ b/kernel/trace/trace_clock.c
89305@@ -123,7 +123,7 @@ u64 notrace trace_clock_global(void)
89306 return now;
89307 }
89308
89309-static atomic64_t trace_counter;
89310+static atomic64_unchecked_t trace_counter;
89311
89312 /*
89313 * trace_clock_counter(): simply an atomic counter.
89314@@ -132,5 +132,5 @@ static atomic64_t trace_counter;
89315 */
89316 u64 notrace trace_clock_counter(void)
89317 {
89318- return atomic64_add_return(1, &trace_counter);
89319+ return atomic64_inc_return_unchecked(&trace_counter);
89320 }
89321diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
89322index a11800a..3dafde5 100644
89323--- a/kernel/trace/trace_events.c
89324+++ b/kernel/trace/trace_events.c
89325@@ -1681,7 +1681,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
89326 return 0;
89327 }
89328
89329-struct ftrace_module_file_ops;
89330 static void __add_event_to_tracers(struct ftrace_event_call *call);
89331
89332 /* Add an additional event_call dynamically */
89333diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
89334index 0abd9b8..6a663a2 100644
89335--- a/kernel/trace/trace_mmiotrace.c
89336+++ b/kernel/trace/trace_mmiotrace.c
89337@@ -24,7 +24,7 @@ struct header_iter {
89338 static struct trace_array *mmio_trace_array;
89339 static bool overrun_detected;
89340 static unsigned long prev_overruns;
89341-static atomic_t dropped_count;
89342+static atomic_unchecked_t dropped_count;
89343
89344 static void mmio_reset_data(struct trace_array *tr)
89345 {
89346@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
89347
89348 static unsigned long count_overruns(struct trace_iterator *iter)
89349 {
89350- unsigned long cnt = atomic_xchg(&dropped_count, 0);
89351+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
89352 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
89353
89354 if (over > prev_overruns)
89355@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
89356 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
89357 sizeof(*entry), 0, pc);
89358 if (!event) {
89359- atomic_inc(&dropped_count);
89360+ atomic_inc_unchecked(&dropped_count);
89361 return;
89362 }
89363 entry = ring_buffer_event_data(event);
89364@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
89365 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
89366 sizeof(*entry), 0, pc);
89367 if (!event) {
89368- atomic_inc(&dropped_count);
89369+ atomic_inc_unchecked(&dropped_count);
89370 return;
89371 }
89372 entry = ring_buffer_event_data(event);
89373diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
89374index ed32284..884d6c3 100644
89375--- a/kernel/trace/trace_output.c
89376+++ b/kernel/trace/trace_output.c
89377@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
89378
89379 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
89380 if (!IS_ERR(p)) {
89381- p = mangle_path(s->buffer + s->len, p, "\n");
89382+ p = mangle_path(s->buffer + s->len, p, "\n\\");
89383 if (p) {
89384 s->len = p - s->buffer;
89385 return 1;
89386@@ -908,14 +908,16 @@ int register_ftrace_event(struct trace_event *event)
89387 goto out;
89388 }
89389
89390+ pax_open_kernel();
89391 if (event->funcs->trace == NULL)
89392- event->funcs->trace = trace_nop_print;
89393+ *(void **)&event->funcs->trace = trace_nop_print;
89394 if (event->funcs->raw == NULL)
89395- event->funcs->raw = trace_nop_print;
89396+ *(void **)&event->funcs->raw = trace_nop_print;
89397 if (event->funcs->hex == NULL)
89398- event->funcs->hex = trace_nop_print;
89399+ *(void **)&event->funcs->hex = trace_nop_print;
89400 if (event->funcs->binary == NULL)
89401- event->funcs->binary = trace_nop_print;
89402+ *(void **)&event->funcs->binary = trace_nop_print;
89403+ pax_close_kernel();
89404
89405 key = event->type & (EVENT_HASHSIZE - 1);
89406
89407diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
89408index b20428c..4845a10 100644
89409--- a/kernel/trace/trace_stack.c
89410+++ b/kernel/trace/trace_stack.c
89411@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
89412 return;
89413
89414 /* we do not handle interrupt stacks yet */
89415- if (!object_is_on_stack(stack))
89416+ if (!object_starts_on_stack(stack))
89417 return;
89418
89419 local_irq_save(flags);
89420diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
89421index 240fb62..583473e 100644
89422--- a/kernel/user_namespace.c
89423+++ b/kernel/user_namespace.c
89424@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
89425 !kgid_has_mapping(parent_ns, group))
89426 return -EPERM;
89427
89428+#ifdef CONFIG_GRKERNSEC
89429+ /*
89430+ * This doesn't really inspire confidence:
89431+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
89432+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
89433+ * Increases kernel attack surface in areas developers
89434+ * previously cared little about ("low importance due
89435+ * to requiring "root" capability")
89436+ * To be removed when this code receives *proper* review
89437+ */
89438+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
89439+ !capable(CAP_SETGID))
89440+ return -EPERM;
89441+#endif
89442+
89443 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
89444 if (!ns)
89445 return -ENOMEM;
89446@@ -866,7 +881,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
89447 if (atomic_read(&current->mm->mm_users) > 1)
89448 return -EINVAL;
89449
89450- if (current->fs->users != 1)
89451+ if (atomic_read(&current->fs->users) != 1)
89452 return -EINVAL;
89453
89454 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
89455diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
89456index 4f69f9a..7c6f8f8 100644
89457--- a/kernel/utsname_sysctl.c
89458+++ b/kernel/utsname_sysctl.c
89459@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
89460 static int proc_do_uts_string(ctl_table *table, int write,
89461 void __user *buffer, size_t *lenp, loff_t *ppos)
89462 {
89463- struct ctl_table uts_table;
89464+ ctl_table_no_const uts_table;
89465 int r;
89466 memcpy(&uts_table, table, sizeof(uts_table));
89467 uts_table.data = get_uts(table, write);
89468diff --git a/kernel/watchdog.c b/kernel/watchdog.c
89469index 4431610..4265616 100644
89470--- a/kernel/watchdog.c
89471+++ b/kernel/watchdog.c
89472@@ -475,7 +475,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
89473 static void watchdog_nmi_disable(unsigned int cpu) { return; }
89474 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
89475
89476-static struct smp_hotplug_thread watchdog_threads = {
89477+static struct smp_hotplug_thread watchdog_threads __read_only = {
89478 .store = &softlockup_watchdog,
89479 .thread_should_run = watchdog_should_run,
89480 .thread_fn = watchdog,
89481diff --git a/kernel/workqueue.c b/kernel/workqueue.c
89482index b010eac..e4bda78 100644
89483--- a/kernel/workqueue.c
89484+++ b/kernel/workqueue.c
89485@@ -4671,7 +4671,7 @@ static void rebind_workers(struct worker_pool *pool)
89486 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
89487 worker_flags |= WORKER_REBOUND;
89488 worker_flags &= ~WORKER_UNBOUND;
89489- ACCESS_ONCE(worker->flags) = worker_flags;
89490+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
89491 }
89492
89493 spin_unlock_irq(&pool->lock);
89494diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
89495index db25707..8b16430 100644
89496--- a/lib/Kconfig.debug
89497+++ b/lib/Kconfig.debug
89498@@ -845,7 +845,7 @@ config DEBUG_MUTEXES
89499
89500 config DEBUG_WW_MUTEX_SLOWPATH
89501 bool "Wait/wound mutex debugging: Slowpath testing"
89502- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
89503+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
89504 select DEBUG_LOCK_ALLOC
89505 select DEBUG_SPINLOCK
89506 select DEBUG_MUTEXES
89507@@ -858,7 +858,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
89508
89509 config DEBUG_LOCK_ALLOC
89510 bool "Lock debugging: detect incorrect freeing of live locks"
89511- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
89512+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
89513 select DEBUG_SPINLOCK
89514 select DEBUG_MUTEXES
89515 select LOCKDEP
89516@@ -872,7 +872,7 @@ config DEBUG_LOCK_ALLOC
89517
89518 config PROVE_LOCKING
89519 bool "Lock debugging: prove locking correctness"
89520- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
89521+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
89522 select LOCKDEP
89523 select DEBUG_SPINLOCK
89524 select DEBUG_MUTEXES
89525@@ -923,7 +923,7 @@ config LOCKDEP
89526
89527 config LOCK_STAT
89528 bool "Lock usage statistics"
89529- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
89530+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
89531 select LOCKDEP
89532 select DEBUG_SPINLOCK
89533 select DEBUG_MUTEXES
89534@@ -1385,6 +1385,7 @@ config LATENCYTOP
89535 depends on DEBUG_KERNEL
89536 depends on STACKTRACE_SUPPORT
89537 depends on PROC_FS
89538+ depends on !GRKERNSEC_HIDESYM
89539 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
89540 select KALLSYMS
89541 select KALLSYMS_ALL
89542@@ -1401,7 +1402,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
89543 config DEBUG_STRICT_USER_COPY_CHECKS
89544 bool "Strict user copy size checks"
89545 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
89546- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
89547+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
89548 help
89549 Enabling this option turns a certain set of sanity checks for user
89550 copy operations into compile time failures.
89551@@ -1520,7 +1521,7 @@ endmenu # runtime tests
89552
89553 config PROVIDE_OHCI1394_DMA_INIT
89554 bool "Remote debugging over FireWire early on boot"
89555- depends on PCI && X86
89556+ depends on PCI && X86 && !GRKERNSEC
89557 help
89558 If you want to debug problems which hang or crash the kernel early
89559 on boot and the crashing machine has a FireWire port, you can use
89560@@ -1549,7 +1550,7 @@ config PROVIDE_OHCI1394_DMA_INIT
89561
89562 config FIREWIRE_OHCI_REMOTE_DMA
89563 bool "Remote debugging over FireWire with firewire-ohci"
89564- depends on FIREWIRE_OHCI
89565+ depends on FIREWIRE_OHCI && !GRKERNSEC
89566 help
89567 This option lets you use the FireWire bus for remote debugging
89568 with help of the firewire-ohci driver. It enables unfiltered
89569diff --git a/lib/Makefile b/lib/Makefile
89570index a459c31..3320e82 100644
89571--- a/lib/Makefile
89572+++ b/lib/Makefile
89573@@ -49,7 +49,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
89574 obj-$(CONFIG_BTREE) += btree.o
89575 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
89576 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
89577-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
89578+obj-y += list_debug.o
89579 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
89580
89581 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
89582diff --git a/lib/bitmap.c b/lib/bitmap.c
89583index 06f7e4f..f3cf2b0 100644
89584--- a/lib/bitmap.c
89585+++ b/lib/bitmap.c
89586@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
89587 {
89588 int c, old_c, totaldigits, ndigits, nchunks, nbits;
89589 u32 chunk;
89590- const char __user __force *ubuf = (const char __user __force *)buf;
89591+ const char __user *ubuf = (const char __force_user *)buf;
89592
89593 bitmap_zero(maskp, nmaskbits);
89594
89595@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
89596 {
89597 if (!access_ok(VERIFY_READ, ubuf, ulen))
89598 return -EFAULT;
89599- return __bitmap_parse((const char __force *)ubuf,
89600+ return __bitmap_parse((const char __force_kernel *)ubuf,
89601 ulen, 1, maskp, nmaskbits);
89602
89603 }
89604@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
89605 {
89606 unsigned a, b;
89607 int c, old_c, totaldigits;
89608- const char __user __force *ubuf = (const char __user __force *)buf;
89609+ const char __user *ubuf = (const char __force_user *)buf;
89610 int exp_digit, in_range;
89611
89612 totaldigits = c = 0;
89613@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
89614 {
89615 if (!access_ok(VERIFY_READ, ubuf, ulen))
89616 return -EFAULT;
89617- return __bitmap_parselist((const char __force *)ubuf,
89618+ return __bitmap_parselist((const char __force_kernel *)ubuf,
89619 ulen, 1, maskp, nmaskbits);
89620 }
89621 EXPORT_SYMBOL(bitmap_parselist_user);
89622diff --git a/lib/bug.c b/lib/bug.c
89623index 1686034..a9c00c8 100644
89624--- a/lib/bug.c
89625+++ b/lib/bug.c
89626@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
89627 return BUG_TRAP_TYPE_NONE;
89628
89629 bug = find_bug(bugaddr);
89630+ if (!bug)
89631+ return BUG_TRAP_TYPE_NONE;
89632
89633 file = NULL;
89634 line = 0;
89635diff --git a/lib/debugobjects.c b/lib/debugobjects.c
89636index e0731c3..ad66444 100644
89637--- a/lib/debugobjects.c
89638+++ b/lib/debugobjects.c
89639@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
89640 if (limit > 4)
89641 return;
89642
89643- is_on_stack = object_is_on_stack(addr);
89644+ is_on_stack = object_starts_on_stack(addr);
89645 if (is_on_stack == onstack)
89646 return;
89647
89648diff --git a/lib/devres.c b/lib/devres.c
89649index 8235331..5881053 100644
89650--- a/lib/devres.c
89651+++ b/lib/devres.c
89652@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
89653 void devm_iounmap(struct device *dev, void __iomem *addr)
89654 {
89655 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
89656- (void *)addr));
89657+ (void __force *)addr));
89658 iounmap(addr);
89659 }
89660 EXPORT_SYMBOL(devm_iounmap);
89661@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
89662 {
89663 ioport_unmap(addr);
89664 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
89665- devm_ioport_map_match, (void *)addr));
89666+ devm_ioport_map_match, (void __force *)addr));
89667 }
89668 EXPORT_SYMBOL(devm_ioport_unmap);
89669 #endif /* CONFIG_HAS_IOPORT */
89670diff --git a/lib/div64.c b/lib/div64.c
89671index 4382ad7..08aa558 100644
89672--- a/lib/div64.c
89673+++ b/lib/div64.c
89674@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
89675 EXPORT_SYMBOL(__div64_32);
89676
89677 #ifndef div_s64_rem
89678-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
89679+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
89680 {
89681 u64 quotient;
89682
89683@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
89684 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
89685 */
89686 #ifndef div64_u64
89687-u64 div64_u64(u64 dividend, u64 divisor)
89688+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
89689 {
89690 u32 high = divisor >> 32;
89691 u64 quot;
89692diff --git a/lib/dma-debug.c b/lib/dma-debug.c
89693index d87a17a..ac0d79a 100644
89694--- a/lib/dma-debug.c
89695+++ b/lib/dma-debug.c
89696@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
89697
89698 void dma_debug_add_bus(struct bus_type *bus)
89699 {
89700- struct notifier_block *nb;
89701+ notifier_block_no_const *nb;
89702
89703 if (global_disable)
89704 return;
89705@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
89706
89707 static void check_for_stack(struct device *dev, void *addr)
89708 {
89709- if (object_is_on_stack(addr))
89710+ if (object_starts_on_stack(addr))
89711 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
89712 "stack [addr=%p]\n", addr);
89713 }
89714diff --git a/lib/inflate.c b/lib/inflate.c
89715index 013a761..c28f3fc 100644
89716--- a/lib/inflate.c
89717+++ b/lib/inflate.c
89718@@ -269,7 +269,7 @@ static void free(void *where)
89719 malloc_ptr = free_mem_ptr;
89720 }
89721 #else
89722-#define malloc(a) kmalloc(a, GFP_KERNEL)
89723+#define malloc(a) kmalloc((a), GFP_KERNEL)
89724 #define free(a) kfree(a)
89725 #endif
89726
89727diff --git a/lib/ioremap.c b/lib/ioremap.c
89728index 0c9216c..863bd89 100644
89729--- a/lib/ioremap.c
89730+++ b/lib/ioremap.c
89731@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
89732 unsigned long next;
89733
89734 phys_addr -= addr;
89735- pmd = pmd_alloc(&init_mm, pud, addr);
89736+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
89737 if (!pmd)
89738 return -ENOMEM;
89739 do {
89740@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
89741 unsigned long next;
89742
89743 phys_addr -= addr;
89744- pud = pud_alloc(&init_mm, pgd, addr);
89745+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
89746 if (!pud)
89747 return -ENOMEM;
89748 do {
89749diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
89750index bd2bea9..6b3c95e 100644
89751--- a/lib/is_single_threaded.c
89752+++ b/lib/is_single_threaded.c
89753@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
89754 struct task_struct *p, *t;
89755 bool ret;
89756
89757+ if (!mm)
89758+ return true;
89759+
89760 if (atomic_read(&task->signal->live) != 1)
89761 return false;
89762
89763diff --git a/lib/kobject.c b/lib/kobject.c
89764index 5b4b888..c2950f7 100644
89765--- a/lib/kobject.c
89766+++ b/lib/kobject.c
89767@@ -957,9 +957,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
89768
89769
89770 static DEFINE_SPINLOCK(kobj_ns_type_lock);
89771-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
89772+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
89773
89774-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
89775+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
89776 {
89777 enum kobj_ns_type type = ops->type;
89778 int error;
89779diff --git a/lib/list_debug.c b/lib/list_debug.c
89780index c24c2f7..f0296f4 100644
89781--- a/lib/list_debug.c
89782+++ b/lib/list_debug.c
89783@@ -11,7 +11,9 @@
89784 #include <linux/bug.h>
89785 #include <linux/kernel.h>
89786 #include <linux/rculist.h>
89787+#include <linux/mm.h>
89788
89789+#ifdef CONFIG_DEBUG_LIST
89790 /*
89791 * Insert a new entry between two known consecutive entries.
89792 *
89793@@ -19,21 +21,40 @@
89794 * the prev/next entries already!
89795 */
89796
89797+static bool __list_add_debug(struct list_head *new,
89798+ struct list_head *prev,
89799+ struct list_head *next)
89800+{
89801+ if (unlikely(next->prev != prev)) {
89802+ printk(KERN_ERR "list_add corruption. next->prev should be "
89803+ "prev (%p), but was %p. (next=%p).\n",
89804+ prev, next->prev, next);
89805+ BUG();
89806+ return false;
89807+ }
89808+ if (unlikely(prev->next != next)) {
89809+ printk(KERN_ERR "list_add corruption. prev->next should be "
89810+ "next (%p), but was %p. (prev=%p).\n",
89811+ next, prev->next, prev);
89812+ BUG();
89813+ return false;
89814+ }
89815+ if (unlikely(new == prev || new == next)) {
89816+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
89817+ new, prev, next);
89818+ BUG();
89819+ return false;
89820+ }
89821+ return true;
89822+}
89823+
89824 void __list_add(struct list_head *new,
89825- struct list_head *prev,
89826- struct list_head *next)
89827+ struct list_head *prev,
89828+ struct list_head *next)
89829 {
89830- WARN(next->prev != prev,
89831- "list_add corruption. next->prev should be "
89832- "prev (%p), but was %p. (next=%p).\n",
89833- prev, next->prev, next);
89834- WARN(prev->next != next,
89835- "list_add corruption. prev->next should be "
89836- "next (%p), but was %p. (prev=%p).\n",
89837- next, prev->next, prev);
89838- WARN(new == prev || new == next,
89839- "list_add double add: new=%p, prev=%p, next=%p.\n",
89840- new, prev, next);
89841+ if (!__list_add_debug(new, prev, next))
89842+ return;
89843+
89844 next->prev = new;
89845 new->next = next;
89846 new->prev = prev;
89847@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
89848 }
89849 EXPORT_SYMBOL(__list_add);
89850
89851-void __list_del_entry(struct list_head *entry)
89852+static bool __list_del_entry_debug(struct list_head *entry)
89853 {
89854 struct list_head *prev, *next;
89855
89856 prev = entry->prev;
89857 next = entry->next;
89858
89859- if (WARN(next == LIST_POISON1,
89860- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
89861- entry, LIST_POISON1) ||
89862- WARN(prev == LIST_POISON2,
89863- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
89864- entry, LIST_POISON2) ||
89865- WARN(prev->next != entry,
89866- "list_del corruption. prev->next should be %p, "
89867- "but was %p\n", entry, prev->next) ||
89868- WARN(next->prev != entry,
89869- "list_del corruption. next->prev should be %p, "
89870- "but was %p\n", entry, next->prev))
89871+ if (unlikely(next == LIST_POISON1)) {
89872+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
89873+ entry, LIST_POISON1);
89874+ BUG();
89875+ return false;
89876+ }
89877+ if (unlikely(prev == LIST_POISON2)) {
89878+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
89879+ entry, LIST_POISON2);
89880+ BUG();
89881+ return false;
89882+ }
89883+ if (unlikely(entry->prev->next != entry)) {
89884+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
89885+ "but was %p\n", entry, prev->next);
89886+ BUG();
89887+ return false;
89888+ }
89889+ if (unlikely(entry->next->prev != entry)) {
89890+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
89891+ "but was %p\n", entry, next->prev);
89892+ BUG();
89893+ return false;
89894+ }
89895+ return true;
89896+}
89897+
89898+void __list_del_entry(struct list_head *entry)
89899+{
89900+ if (!__list_del_entry_debug(entry))
89901 return;
89902
89903- __list_del(prev, next);
89904+ __list_del(entry->prev, entry->next);
89905 }
89906 EXPORT_SYMBOL(__list_del_entry);
89907
89908@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
89909 void __list_add_rcu(struct list_head *new,
89910 struct list_head *prev, struct list_head *next)
89911 {
89912- WARN(next->prev != prev,
89913- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
89914- prev, next->prev, next);
89915- WARN(prev->next != next,
89916- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
89917- next, prev->next, prev);
89918+ if (!__list_add_debug(new, prev, next))
89919+ return;
89920+
89921 new->next = next;
89922 new->prev = prev;
89923 rcu_assign_pointer(list_next_rcu(prev), new);
89924 next->prev = new;
89925 }
89926 EXPORT_SYMBOL(__list_add_rcu);
89927+#endif
89928+
89929+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
89930+{
89931+#ifdef CONFIG_DEBUG_LIST
89932+ if (!__list_add_debug(new, prev, next))
89933+ return;
89934+#endif
89935+
89936+ pax_open_kernel();
89937+ next->prev = new;
89938+ new->next = next;
89939+ new->prev = prev;
89940+ prev->next = new;
89941+ pax_close_kernel();
89942+}
89943+EXPORT_SYMBOL(__pax_list_add);
89944+
89945+void pax_list_del(struct list_head *entry)
89946+{
89947+#ifdef CONFIG_DEBUG_LIST
89948+ if (!__list_del_entry_debug(entry))
89949+ return;
89950+#endif
89951+
89952+ pax_open_kernel();
89953+ __list_del(entry->prev, entry->next);
89954+ entry->next = LIST_POISON1;
89955+ entry->prev = LIST_POISON2;
89956+ pax_close_kernel();
89957+}
89958+EXPORT_SYMBOL(pax_list_del);
89959+
89960+void pax_list_del_init(struct list_head *entry)
89961+{
89962+ pax_open_kernel();
89963+ __list_del(entry->prev, entry->next);
89964+ INIT_LIST_HEAD(entry);
89965+ pax_close_kernel();
89966+}
89967+EXPORT_SYMBOL(pax_list_del_init);
89968+
89969+void __pax_list_add_rcu(struct list_head *new,
89970+ struct list_head *prev, struct list_head *next)
89971+{
89972+#ifdef CONFIG_DEBUG_LIST
89973+ if (!__list_add_debug(new, prev, next))
89974+ return;
89975+#endif
89976+
89977+ pax_open_kernel();
89978+ new->next = next;
89979+ new->prev = prev;
89980+ rcu_assign_pointer(list_next_rcu(prev), new);
89981+ next->prev = new;
89982+ pax_close_kernel();
89983+}
89984+EXPORT_SYMBOL(__pax_list_add_rcu);
89985+
89986+void pax_list_del_rcu(struct list_head *entry)
89987+{
89988+#ifdef CONFIG_DEBUG_LIST
89989+ if (!__list_del_entry_debug(entry))
89990+ return;
89991+#endif
89992+
89993+ pax_open_kernel();
89994+ __list_del(entry->prev, entry->next);
89995+ entry->next = LIST_POISON1;
89996+ entry->prev = LIST_POISON2;
89997+ pax_close_kernel();
89998+}
89999+EXPORT_SYMBOL(pax_list_del_rcu);
90000diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
90001index 1a53d49..ace934c 100644
90002--- a/lib/percpu-refcount.c
90003+++ b/lib/percpu-refcount.c
90004@@ -29,7 +29,7 @@
90005 * can't hit 0 before we've added up all the percpu refs.
90006 */
90007
90008-#define PCPU_COUNT_BIAS (1U << 31)
90009+#define PCPU_COUNT_BIAS (1U << 30)
90010
90011 /**
90012 * percpu_ref_init - initialize a percpu refcount
90013diff --git a/lib/radix-tree.c b/lib/radix-tree.c
90014index 7811ed3..f80ca19 100644
90015--- a/lib/radix-tree.c
90016+++ b/lib/radix-tree.c
90017@@ -93,7 +93,7 @@ struct radix_tree_preload {
90018 int nr;
90019 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
90020 };
90021-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
90022+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
90023
90024 static inline void *ptr_to_indirect(void *ptr)
90025 {
90026diff --git a/lib/rbtree.c b/lib/rbtree.c
90027index 65f4eff..2cfa167 100644
90028--- a/lib/rbtree.c
90029+++ b/lib/rbtree.c
90030@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
90031 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
90032
90033 static const struct rb_augment_callbacks dummy_callbacks = {
90034- dummy_propagate, dummy_copy, dummy_rotate
90035+ .propagate = dummy_propagate,
90036+ .copy = dummy_copy,
90037+ .rotate = dummy_rotate
90038 };
90039
90040 void rb_insert_color(struct rb_node *node, struct rb_root *root)
90041diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
90042index bb2b201..46abaf9 100644
90043--- a/lib/strncpy_from_user.c
90044+++ b/lib/strncpy_from_user.c
90045@@ -21,7 +21,7 @@
90046 */
90047 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
90048 {
90049- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
90050+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
90051 long res = 0;
90052
90053 /*
90054diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
90055index a28df52..3d55877 100644
90056--- a/lib/strnlen_user.c
90057+++ b/lib/strnlen_user.c
90058@@ -26,7 +26,7 @@
90059 */
90060 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
90061 {
90062- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
90063+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
90064 long align, res = 0;
90065 unsigned long c;
90066
90067diff --git a/lib/swiotlb.c b/lib/swiotlb.c
90068index e4399fa..5e8b214 100644
90069--- a/lib/swiotlb.c
90070+++ b/lib/swiotlb.c
90071@@ -668,7 +668,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
90072
90073 void
90074 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
90075- dma_addr_t dev_addr)
90076+ dma_addr_t dev_addr, struct dma_attrs *attrs)
90077 {
90078 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
90079
90080diff --git a/lib/usercopy.c b/lib/usercopy.c
90081index 4f5b1dd..7cab418 100644
90082--- a/lib/usercopy.c
90083+++ b/lib/usercopy.c
90084@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
90085 WARN(1, "Buffer overflow detected!\n");
90086 }
90087 EXPORT_SYMBOL(copy_from_user_overflow);
90088+
90089+void copy_to_user_overflow(void)
90090+{
90091+ WARN(1, "Buffer overflow detected!\n");
90092+}
90093+EXPORT_SYMBOL(copy_to_user_overflow);
90094diff --git a/lib/vsprintf.c b/lib/vsprintf.c
90095index 10909c5..653e1b8 100644
90096--- a/lib/vsprintf.c
90097+++ b/lib/vsprintf.c
90098@@ -16,6 +16,9 @@
90099 * - scnprintf and vscnprintf
90100 */
90101
90102+#ifdef CONFIG_GRKERNSEC_HIDESYM
90103+#define __INCLUDED_BY_HIDESYM 1
90104+#endif
90105 #include <stdarg.h>
90106 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
90107 #include <linux/types.h>
90108@@ -1155,7 +1158,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
90109 return number(buf, end, *(const netdev_features_t *)addr, spec);
90110 }
90111
90112+#ifdef CONFIG_GRKERNSEC_HIDESYM
90113+int kptr_restrict __read_mostly = 2;
90114+#else
90115 int kptr_restrict __read_mostly;
90116+#endif
90117
90118 /*
90119 * Show a '%p' thing. A kernel extension is that the '%p' is followed
90120@@ -1168,6 +1175,7 @@ int kptr_restrict __read_mostly;
90121 * - 'f' For simple symbolic function names without offset
90122 * - 'S' For symbolic direct pointers with offset
90123 * - 's' For symbolic direct pointers without offset
90124+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
90125 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
90126 * - 'B' For backtraced symbolic direct pointers with offset
90127 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
90128@@ -1234,12 +1242,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
90129
90130 if (!ptr && *fmt != 'K') {
90131 /*
90132- * Print (null) with the same width as a pointer so it makes
90133+ * Print (nil) with the same width as a pointer so it makes
90134 * tabular output look nice.
90135 */
90136 if (spec.field_width == -1)
90137 spec.field_width = default_width;
90138- return string(buf, end, "(null)", spec);
90139+ return string(buf, end, "(nil)", spec);
90140 }
90141
90142 switch (*fmt) {
90143@@ -1249,6 +1257,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
90144 /* Fallthrough */
90145 case 'S':
90146 case 's':
90147+#ifdef CONFIG_GRKERNSEC_HIDESYM
90148+ break;
90149+#else
90150+ return symbol_string(buf, end, ptr, spec, fmt);
90151+#endif
90152+ case 'A':
90153 case 'B':
90154 return symbol_string(buf, end, ptr, spec, fmt);
90155 case 'R':
90156@@ -1304,6 +1318,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
90157 va_end(va);
90158 return buf;
90159 }
90160+ case 'P':
90161+ break;
90162 case 'K':
90163 /*
90164 * %pK cannot be used in IRQ context because its test
90165@@ -1365,6 +1381,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
90166 ((const struct file *)ptr)->f_path.dentry,
90167 spec, fmt);
90168 }
90169+
90170+#ifdef CONFIG_GRKERNSEC_HIDESYM
90171+ /* 'P' = approved pointers to copy to userland,
90172+ as in the /proc/kallsyms case, as we make it display nothing
90173+ for non-root users, and the real contents for root users
90174+ Also ignore 'K' pointers, since we force their NULLing for non-root users
90175+ above
90176+ */
90177+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
90178+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
90179+ dump_stack();
90180+ ptr = NULL;
90181+ }
90182+#endif
90183+
90184 spec.flags |= SMALL;
90185 if (spec.field_width == -1) {
90186 spec.field_width = default_width;
90187@@ -2086,11 +2117,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
90188 typeof(type) value; \
90189 if (sizeof(type) == 8) { \
90190 args = PTR_ALIGN(args, sizeof(u32)); \
90191- *(u32 *)&value = *(u32 *)args; \
90192- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
90193+ *(u32 *)&value = *(const u32 *)args; \
90194+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
90195 } else { \
90196 args = PTR_ALIGN(args, sizeof(type)); \
90197- value = *(typeof(type) *)args; \
90198+ value = *(const typeof(type) *)args; \
90199 } \
90200 args += sizeof(type); \
90201 value; \
90202@@ -2153,7 +2184,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
90203 case FORMAT_TYPE_STR: {
90204 const char *str_arg = args;
90205 args += strlen(str_arg) + 1;
90206- str = string(str, end, (char *)str_arg, spec);
90207+ str = string(str, end, str_arg, spec);
90208 break;
90209 }
90210
90211diff --git a/localversion-grsec b/localversion-grsec
90212new file mode 100644
90213index 0000000..7cd6065
90214--- /dev/null
90215+++ b/localversion-grsec
90216@@ -0,0 +1 @@
90217+-grsec
90218diff --git a/mm/Kconfig b/mm/Kconfig
90219index 723bbe0..ea624b1 100644
90220--- a/mm/Kconfig
90221+++ b/mm/Kconfig
90222@@ -326,10 +326,11 @@ config KSM
90223 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
90224
90225 config DEFAULT_MMAP_MIN_ADDR
90226- int "Low address space to protect from user allocation"
90227+ int "Low address space to protect from user allocation"
90228 depends on MMU
90229- default 4096
90230- help
90231+ default 32768 if ALPHA || ARM || PARISC || SPARC32
90232+ default 65536
90233+ help
90234 This is the portion of low virtual memory which should be protected
90235 from userspace allocation. Keeping a user from writing to low pages
90236 can help reduce the impact of kernel NULL pointer bugs.
90237@@ -360,7 +361,7 @@ config MEMORY_FAILURE
90238
90239 config HWPOISON_INJECT
90240 tristate "HWPoison pages injector"
90241- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
90242+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
90243 select PROC_PAGE_MONITOR
90244
90245 config NOMMU_INITIAL_TRIM_EXCESS
90246diff --git a/mm/backing-dev.c b/mm/backing-dev.c
90247index ce682f7..1fb54f9 100644
90248--- a/mm/backing-dev.c
90249+++ b/mm/backing-dev.c
90250@@ -12,7 +12,7 @@
90251 #include <linux/device.h>
90252 #include <trace/events/writeback.h>
90253
90254-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
90255+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
90256
90257 struct backing_dev_info default_backing_dev_info = {
90258 .name = "default",
90259@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
90260 return err;
90261
90262 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
90263- atomic_long_inc_return(&bdi_seq));
90264+ atomic_long_inc_return_unchecked(&bdi_seq));
90265 if (err) {
90266 bdi_destroy(bdi);
90267 return err;
90268diff --git a/mm/filemap.c b/mm/filemap.c
90269index b7749a9..50d1123 100644
90270--- a/mm/filemap.c
90271+++ b/mm/filemap.c
90272@@ -1768,7 +1768,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
90273 struct address_space *mapping = file->f_mapping;
90274
90275 if (!mapping->a_ops->readpage)
90276- return -ENOEXEC;
90277+ return -ENODEV;
90278 file_accessed(file);
90279 vma->vm_ops = &generic_file_vm_ops;
90280 return 0;
90281@@ -1950,7 +1950,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
90282
90283 while (bytes) {
90284 char __user *buf = iov->iov_base + base;
90285- int copy = min(bytes, iov->iov_len - base);
90286+ size_t copy = min(bytes, iov->iov_len - base);
90287
90288 base = 0;
90289 left = __copy_from_user_inatomic(vaddr, buf, copy);
90290@@ -1979,7 +1979,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
90291 BUG_ON(!in_atomic());
90292 kaddr = kmap_atomic(page);
90293 if (likely(i->nr_segs == 1)) {
90294- int left;
90295+ size_t left;
90296 char __user *buf = i->iov->iov_base + i->iov_offset;
90297 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
90298 copied = bytes - left;
90299@@ -2007,7 +2007,7 @@ size_t iov_iter_copy_from_user(struct page *page,
90300
90301 kaddr = kmap(page);
90302 if (likely(i->nr_segs == 1)) {
90303- int left;
90304+ size_t left;
90305 char __user *buf = i->iov->iov_base + i->iov_offset;
90306 left = __copy_from_user(kaddr + offset, buf, bytes);
90307 copied = bytes - left;
90308@@ -2037,7 +2037,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
90309 * zero-length segments (without overruning the iovec).
90310 */
90311 while (bytes || unlikely(i->count && !iov->iov_len)) {
90312- int copy;
90313+ size_t copy;
90314
90315 copy = min(bytes, iov->iov_len - base);
90316 BUG_ON(!i->count || i->count < copy);
90317@@ -2108,6 +2108,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
90318 *pos = i_size_read(inode);
90319
90320 if (limit != RLIM_INFINITY) {
90321+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
90322 if (*pos >= limit) {
90323 send_sig(SIGXFSZ, current, 0);
90324 return -EFBIG;
90325diff --git a/mm/fremap.c b/mm/fremap.c
90326index bbc4d66..117b798 100644
90327--- a/mm/fremap.c
90328+++ b/mm/fremap.c
90329@@ -163,6 +163,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
90330 retry:
90331 vma = find_vma(mm, start);
90332
90333+#ifdef CONFIG_PAX_SEGMEXEC
90334+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
90335+ goto out;
90336+#endif
90337+
90338 /*
90339 * Make sure the vma is shared, that it supports prefaulting,
90340 * and that the remapped range is valid and fully within
90341diff --git a/mm/highmem.c b/mm/highmem.c
90342index b32b70c..e512eb0 100644
90343--- a/mm/highmem.c
90344+++ b/mm/highmem.c
90345@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
90346 * So no dangers, even with speculative execution.
90347 */
90348 page = pte_page(pkmap_page_table[i]);
90349+ pax_open_kernel();
90350 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
90351-
90352+ pax_close_kernel();
90353 set_page_address(page, NULL);
90354 need_flush = 1;
90355 }
90356@@ -198,9 +199,11 @@ start:
90357 }
90358 }
90359 vaddr = PKMAP_ADDR(last_pkmap_nr);
90360+
90361+ pax_open_kernel();
90362 set_pte_at(&init_mm, vaddr,
90363 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
90364-
90365+ pax_close_kernel();
90366 pkmap_count[last_pkmap_nr] = 1;
90367 set_page_address(page, (void *)vaddr);
90368
90369diff --git a/mm/hugetlb.c b/mm/hugetlb.c
90370index dee6cf4..52b94f7 100644
90371--- a/mm/hugetlb.c
90372+++ b/mm/hugetlb.c
90373@@ -2077,15 +2077,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
90374 struct hstate *h = &default_hstate;
90375 unsigned long tmp;
90376 int ret;
90377+ ctl_table_no_const hugetlb_table;
90378
90379 tmp = h->max_huge_pages;
90380
90381 if (write && h->order >= MAX_ORDER)
90382 return -EINVAL;
90383
90384- table->data = &tmp;
90385- table->maxlen = sizeof(unsigned long);
90386- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
90387+ hugetlb_table = *table;
90388+ hugetlb_table.data = &tmp;
90389+ hugetlb_table.maxlen = sizeof(unsigned long);
90390+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
90391 if (ret)
90392 goto out;
90393
90394@@ -2130,15 +2132,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
90395 struct hstate *h = &default_hstate;
90396 unsigned long tmp;
90397 int ret;
90398+ ctl_table_no_const hugetlb_table;
90399
90400 tmp = h->nr_overcommit_huge_pages;
90401
90402 if (write && h->order >= MAX_ORDER)
90403 return -EINVAL;
90404
90405- table->data = &tmp;
90406- table->maxlen = sizeof(unsigned long);
90407- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
90408+ hugetlb_table = *table;
90409+ hugetlb_table.data = &tmp;
90410+ hugetlb_table.maxlen = sizeof(unsigned long);
90411+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
90412 if (ret)
90413 goto out;
90414
90415@@ -2596,6 +2600,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
90416 return 1;
90417 }
90418
90419+#ifdef CONFIG_PAX_SEGMEXEC
90420+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
90421+{
90422+ struct mm_struct *mm = vma->vm_mm;
90423+ struct vm_area_struct *vma_m;
90424+ unsigned long address_m;
90425+ pte_t *ptep_m;
90426+
90427+ vma_m = pax_find_mirror_vma(vma);
90428+ if (!vma_m)
90429+ return;
90430+
90431+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
90432+ address_m = address + SEGMEXEC_TASK_SIZE;
90433+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
90434+ get_page(page_m);
90435+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
90436+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
90437+}
90438+#endif
90439+
90440 /*
90441 * Hugetlb_cow() should be called with page lock of the original hugepage held.
90442 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
90443@@ -2712,6 +2737,11 @@ retry_avoidcopy:
90444 make_huge_pte(vma, new_page, 1));
90445 page_remove_rmap(old_page);
90446 hugepage_add_new_anon_rmap(new_page, vma, address);
90447+
90448+#ifdef CONFIG_PAX_SEGMEXEC
90449+ pax_mirror_huge_pte(vma, address, new_page);
90450+#endif
90451+
90452 /* Make the old page be freed below */
90453 new_page = old_page;
90454 }
90455@@ -2876,6 +2906,10 @@ retry:
90456 && (vma->vm_flags & VM_SHARED)));
90457 set_huge_pte_at(mm, address, ptep, new_pte);
90458
90459+#ifdef CONFIG_PAX_SEGMEXEC
90460+ pax_mirror_huge_pte(vma, address, page);
90461+#endif
90462+
90463 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
90464 /* Optimization, do the COW without a second fault */
90465 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
90466@@ -2906,6 +2940,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
90467 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
90468 struct hstate *h = hstate_vma(vma);
90469
90470+#ifdef CONFIG_PAX_SEGMEXEC
90471+ struct vm_area_struct *vma_m;
90472+#endif
90473+
90474 address &= huge_page_mask(h);
90475
90476 ptep = huge_pte_offset(mm, address);
90477@@ -2919,6 +2957,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
90478 VM_FAULT_SET_HINDEX(hstate_index(h));
90479 }
90480
90481+#ifdef CONFIG_PAX_SEGMEXEC
90482+ vma_m = pax_find_mirror_vma(vma);
90483+ if (vma_m) {
90484+ unsigned long address_m;
90485+
90486+ if (vma->vm_start > vma_m->vm_start) {
90487+ address_m = address;
90488+ address -= SEGMEXEC_TASK_SIZE;
90489+ vma = vma_m;
90490+ h = hstate_vma(vma);
90491+ } else
90492+ address_m = address + SEGMEXEC_TASK_SIZE;
90493+
90494+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
90495+ return VM_FAULT_OOM;
90496+ address_m &= HPAGE_MASK;
90497+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
90498+ }
90499+#endif
90500+
90501 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
90502 if (!ptep)
90503 return VM_FAULT_OOM;
90504diff --git a/mm/internal.h b/mm/internal.h
90505index 684f7aa..9eb9edc 100644
90506--- a/mm/internal.h
90507+++ b/mm/internal.h
90508@@ -97,6 +97,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
90509 * in mm/page_alloc.c
90510 */
90511 extern void __free_pages_bootmem(struct page *page, unsigned int order);
90512+extern void free_compound_page(struct page *page);
90513 extern void prep_compound_page(struct page *page, unsigned long order);
90514 #ifdef CONFIG_MEMORY_FAILURE
90515 extern bool is_free_buddy_page(struct page *page);
90516@@ -352,7 +353,7 @@ extern u32 hwpoison_filter_enable;
90517
90518 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
90519 unsigned long, unsigned long,
90520- unsigned long, unsigned long);
90521+ unsigned long, unsigned long) __intentional_overflow(-1);
90522
90523 extern void set_pageblock_order(void);
90524 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
90525diff --git a/mm/kmemleak.c b/mm/kmemleak.c
90526index 31f01c5..7015178 100644
90527--- a/mm/kmemleak.c
90528+++ b/mm/kmemleak.c
90529@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
90530
90531 for (i = 0; i < object->trace_len; i++) {
90532 void *ptr = (void *)object->trace[i];
90533- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
90534+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
90535 }
90536 }
90537
90538@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
90539 return -ENOMEM;
90540 }
90541
90542- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
90543+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
90544 &kmemleak_fops);
90545 if (!dentry)
90546 pr_warning("Failed to create the debugfs kmemleak file\n");
90547diff --git a/mm/maccess.c b/mm/maccess.c
90548index d53adf9..03a24bf 100644
90549--- a/mm/maccess.c
90550+++ b/mm/maccess.c
90551@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
90552 set_fs(KERNEL_DS);
90553 pagefault_disable();
90554 ret = __copy_from_user_inatomic(dst,
90555- (__force const void __user *)src, size);
90556+ (const void __force_user *)src, size);
90557 pagefault_enable();
90558 set_fs(old_fs);
90559
90560@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
90561
90562 set_fs(KERNEL_DS);
90563 pagefault_disable();
90564- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
90565+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
90566 pagefault_enable();
90567 set_fs(old_fs);
90568
90569diff --git a/mm/madvise.c b/mm/madvise.c
90570index 539eeb9..e24a987 100644
90571--- a/mm/madvise.c
90572+++ b/mm/madvise.c
90573@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
90574 pgoff_t pgoff;
90575 unsigned long new_flags = vma->vm_flags;
90576
90577+#ifdef CONFIG_PAX_SEGMEXEC
90578+ struct vm_area_struct *vma_m;
90579+#endif
90580+
90581 switch (behavior) {
90582 case MADV_NORMAL:
90583 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
90584@@ -126,6 +130,13 @@ success:
90585 /*
90586 * vm_flags is protected by the mmap_sem held in write mode.
90587 */
90588+
90589+#ifdef CONFIG_PAX_SEGMEXEC
90590+ vma_m = pax_find_mirror_vma(vma);
90591+ if (vma_m)
90592+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
90593+#endif
90594+
90595 vma->vm_flags = new_flags;
90596
90597 out:
90598@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
90599 struct vm_area_struct **prev,
90600 unsigned long start, unsigned long end)
90601 {
90602+
90603+#ifdef CONFIG_PAX_SEGMEXEC
90604+ struct vm_area_struct *vma_m;
90605+#endif
90606+
90607 *prev = vma;
90608 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
90609 return -EINVAL;
90610@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
90611 zap_page_range(vma, start, end - start, &details);
90612 } else
90613 zap_page_range(vma, start, end - start, NULL);
90614+
90615+#ifdef CONFIG_PAX_SEGMEXEC
90616+ vma_m = pax_find_mirror_vma(vma);
90617+ if (vma_m) {
90618+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
90619+ struct zap_details details = {
90620+ .nonlinear_vma = vma_m,
90621+ .last_index = ULONG_MAX,
90622+ };
90623+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
90624+ } else
90625+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
90626+ }
90627+#endif
90628+
90629 return 0;
90630 }
90631
90632@@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
90633 if (end < start)
90634 return error;
90635
90636+#ifdef CONFIG_PAX_SEGMEXEC
90637+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
90638+ if (end > SEGMEXEC_TASK_SIZE)
90639+ return error;
90640+ } else
90641+#endif
90642+
90643+ if (end > TASK_SIZE)
90644+ return error;
90645+
90646 error = 0;
90647 if (end == start)
90648 return error;
90649diff --git a/mm/memory-failure.c b/mm/memory-failure.c
90650index fabe550..f31b51c 100644
90651--- a/mm/memory-failure.c
90652+++ b/mm/memory-failure.c
90653@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
90654
90655 int sysctl_memory_failure_recovery __read_mostly = 1;
90656
90657-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
90658+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
90659
90660 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
90661
90662@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
90663 pfn, t->comm, t->pid);
90664 si.si_signo = SIGBUS;
90665 si.si_errno = 0;
90666- si.si_addr = (void *)addr;
90667+ si.si_addr = (void __user *)addr;
90668 #ifdef __ARCH_SI_TRAPNO
90669 si.si_trapno = trapno;
90670 #endif
90671@@ -762,7 +762,7 @@ static struct page_state {
90672 unsigned long res;
90673 char *msg;
90674 int (*action)(struct page *p, unsigned long pfn);
90675-} error_states[] = {
90676+} __do_const error_states[] = {
90677 { reserved, reserved, "reserved kernel", me_kernel },
90678 /*
90679 * free pages are specially detected outside this table:
90680@@ -1063,7 +1063,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
90681 nr_pages = 1 << compound_order(hpage);
90682 else /* normal page or thp */
90683 nr_pages = 1;
90684- atomic_long_add(nr_pages, &num_poisoned_pages);
90685+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
90686
90687 /*
90688 * We need/can do nothing about count=0 pages.
90689@@ -1093,7 +1093,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
90690 if (!PageHWPoison(hpage)
90691 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
90692 || (p != hpage && TestSetPageHWPoison(hpage))) {
90693- atomic_long_sub(nr_pages, &num_poisoned_pages);
90694+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
90695 return 0;
90696 }
90697 set_page_hwpoison_huge_page(hpage);
90698@@ -1162,7 +1162,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
90699 }
90700 if (hwpoison_filter(p)) {
90701 if (TestClearPageHWPoison(p))
90702- atomic_long_sub(nr_pages, &num_poisoned_pages);
90703+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
90704 unlock_page(hpage);
90705 put_page(hpage);
90706 return 0;
90707@@ -1380,7 +1380,7 @@ int unpoison_memory(unsigned long pfn)
90708 return 0;
90709 }
90710 if (TestClearPageHWPoison(p))
90711- atomic_long_dec(&num_poisoned_pages);
90712+ atomic_long_dec_unchecked(&num_poisoned_pages);
90713 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
90714 return 0;
90715 }
90716@@ -1394,7 +1394,7 @@ int unpoison_memory(unsigned long pfn)
90717 */
90718 if (TestClearPageHWPoison(page)) {
90719 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
90720- atomic_long_sub(nr_pages, &num_poisoned_pages);
90721+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
90722 freeit = 1;
90723 if (PageHuge(page))
90724 clear_page_hwpoison_huge_page(page);
90725@@ -1519,11 +1519,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
90726 if (PageHuge(page)) {
90727 set_page_hwpoison_huge_page(hpage);
90728 dequeue_hwpoisoned_huge_page(hpage);
90729- atomic_long_add(1 << compound_order(hpage),
90730+ atomic_long_add_unchecked(1 << compound_order(hpage),
90731 &num_poisoned_pages);
90732 } else {
90733 SetPageHWPoison(page);
90734- atomic_long_inc(&num_poisoned_pages);
90735+ atomic_long_inc_unchecked(&num_poisoned_pages);
90736 }
90737 }
90738 return ret;
90739@@ -1562,7 +1562,7 @@ static int __soft_offline_page(struct page *page, int flags)
90740 put_page(page);
90741 pr_info("soft_offline: %#lx: invalidated\n", pfn);
90742 SetPageHWPoison(page);
90743- atomic_long_inc(&num_poisoned_pages);
90744+ atomic_long_inc_unchecked(&num_poisoned_pages);
90745 return 0;
90746 }
90747
90748@@ -1607,7 +1607,7 @@ static int __soft_offline_page(struct page *page, int flags)
90749 if (!is_free_buddy_page(page))
90750 pr_info("soft offline: %#lx: page leaked\n",
90751 pfn);
90752- atomic_long_inc(&num_poisoned_pages);
90753+ atomic_long_inc_unchecked(&num_poisoned_pages);
90754 }
90755 } else {
90756 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
90757@@ -1681,11 +1681,11 @@ int soft_offline_page(struct page *page, int flags)
90758 if (PageHuge(page)) {
90759 set_page_hwpoison_huge_page(hpage);
90760 dequeue_hwpoisoned_huge_page(hpage);
90761- atomic_long_add(1 << compound_order(hpage),
90762+ atomic_long_add_unchecked(1 << compound_order(hpage),
90763 &num_poisoned_pages);
90764 } else {
90765 SetPageHWPoison(page);
90766- atomic_long_inc(&num_poisoned_pages);
90767+ atomic_long_inc_unchecked(&num_poisoned_pages);
90768 }
90769 }
90770 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
90771diff --git a/mm/memory.c b/mm/memory.c
90772index 6768ce9..4c41d69 100644
90773--- a/mm/memory.c
90774+++ b/mm/memory.c
90775@@ -402,6 +402,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
90776 free_pte_range(tlb, pmd, addr);
90777 } while (pmd++, addr = next, addr != end);
90778
90779+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
90780 start &= PUD_MASK;
90781 if (start < floor)
90782 return;
90783@@ -416,6 +417,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
90784 pmd = pmd_offset(pud, start);
90785 pud_clear(pud);
90786 pmd_free_tlb(tlb, pmd, start);
90787+#endif
90788+
90789 }
90790
90791 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
90792@@ -435,6 +438,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
90793 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
90794 } while (pud++, addr = next, addr != end);
90795
90796+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
90797 start &= PGDIR_MASK;
90798 if (start < floor)
90799 return;
90800@@ -449,6 +453,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
90801 pud = pud_offset(pgd, start);
90802 pgd_clear(pgd);
90803 pud_free_tlb(tlb, pud, start);
90804+#endif
90805+
90806 }
90807
90808 /*
90809@@ -1635,12 +1641,6 @@ no_page_table:
90810 return page;
90811 }
90812
90813-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
90814-{
90815- return stack_guard_page_start(vma, addr) ||
90816- stack_guard_page_end(vma, addr+PAGE_SIZE);
90817-}
90818-
90819 /**
90820 * __get_user_pages() - pin user pages in memory
90821 * @tsk: task_struct of target task
90822@@ -1727,10 +1727,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
90823
90824 i = 0;
90825
90826- do {
90827+ while (nr_pages) {
90828 struct vm_area_struct *vma;
90829
90830- vma = find_extend_vma(mm, start);
90831+ vma = find_vma(mm, start);
90832 if (!vma && in_gate_area(mm, start)) {
90833 unsigned long pg = start & PAGE_MASK;
90834 pgd_t *pgd;
90835@@ -1779,7 +1779,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
90836 goto next_page;
90837 }
90838
90839- if (!vma ||
90840+ if (!vma || start < vma->vm_start ||
90841 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
90842 !(vm_flags & vma->vm_flags))
90843 return i ? : -EFAULT;
90844@@ -1808,11 +1808,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
90845 int ret;
90846 unsigned int fault_flags = 0;
90847
90848- /* For mlock, just skip the stack guard page. */
90849- if (foll_flags & FOLL_MLOCK) {
90850- if (stack_guard_page(vma, start))
90851- goto next_page;
90852- }
90853 if (foll_flags & FOLL_WRITE)
90854 fault_flags |= FAULT_FLAG_WRITE;
90855 if (nonblocking)
90856@@ -1892,7 +1887,7 @@ next_page:
90857 start += page_increm * PAGE_SIZE;
90858 nr_pages -= page_increm;
90859 } while (nr_pages && start < vma->vm_end);
90860- } while (nr_pages);
90861+ }
90862 return i;
90863 }
90864 EXPORT_SYMBOL(__get_user_pages);
90865@@ -2099,6 +2094,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
90866 page_add_file_rmap(page);
90867 set_pte_at(mm, addr, pte, mk_pte(page, prot));
90868
90869+#ifdef CONFIG_PAX_SEGMEXEC
90870+ pax_mirror_file_pte(vma, addr, page, ptl);
90871+#endif
90872+
90873 retval = 0;
90874 pte_unmap_unlock(pte, ptl);
90875 return retval;
90876@@ -2143,9 +2142,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
90877 if (!page_count(page))
90878 return -EINVAL;
90879 if (!(vma->vm_flags & VM_MIXEDMAP)) {
90880+
90881+#ifdef CONFIG_PAX_SEGMEXEC
90882+ struct vm_area_struct *vma_m;
90883+#endif
90884+
90885 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
90886 BUG_ON(vma->vm_flags & VM_PFNMAP);
90887 vma->vm_flags |= VM_MIXEDMAP;
90888+
90889+#ifdef CONFIG_PAX_SEGMEXEC
90890+ vma_m = pax_find_mirror_vma(vma);
90891+ if (vma_m)
90892+ vma_m->vm_flags |= VM_MIXEDMAP;
90893+#endif
90894+
90895 }
90896 return insert_page(vma, addr, page, vma->vm_page_prot);
90897 }
90898@@ -2228,6 +2239,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
90899 unsigned long pfn)
90900 {
90901 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
90902+ BUG_ON(vma->vm_mirror);
90903
90904 if (addr < vma->vm_start || addr >= vma->vm_end)
90905 return -EFAULT;
90906@@ -2475,7 +2487,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
90907
90908 BUG_ON(pud_huge(*pud));
90909
90910- pmd = pmd_alloc(mm, pud, addr);
90911+ pmd = (mm == &init_mm) ?
90912+ pmd_alloc_kernel(mm, pud, addr) :
90913+ pmd_alloc(mm, pud, addr);
90914 if (!pmd)
90915 return -ENOMEM;
90916 do {
90917@@ -2495,7 +2509,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
90918 unsigned long next;
90919 int err;
90920
90921- pud = pud_alloc(mm, pgd, addr);
90922+ pud = (mm == &init_mm) ?
90923+ pud_alloc_kernel(mm, pgd, addr) :
90924+ pud_alloc(mm, pgd, addr);
90925 if (!pud)
90926 return -ENOMEM;
90927 do {
90928@@ -2583,6 +2599,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
90929 copy_user_highpage(dst, src, va, vma);
90930 }
90931
90932+#ifdef CONFIG_PAX_SEGMEXEC
90933+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
90934+{
90935+ struct mm_struct *mm = vma->vm_mm;
90936+ spinlock_t *ptl;
90937+ pte_t *pte, entry;
90938+
90939+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
90940+ entry = *pte;
90941+ if (!pte_present(entry)) {
90942+ if (!pte_none(entry)) {
90943+ BUG_ON(pte_file(entry));
90944+ free_swap_and_cache(pte_to_swp_entry(entry));
90945+ pte_clear_not_present_full(mm, address, pte, 0);
90946+ }
90947+ } else {
90948+ struct page *page;
90949+
90950+ flush_cache_page(vma, address, pte_pfn(entry));
90951+ entry = ptep_clear_flush(vma, address, pte);
90952+ BUG_ON(pte_dirty(entry));
90953+ page = vm_normal_page(vma, address, entry);
90954+ if (page) {
90955+ update_hiwater_rss(mm);
90956+ if (PageAnon(page))
90957+ dec_mm_counter_fast(mm, MM_ANONPAGES);
90958+ else
90959+ dec_mm_counter_fast(mm, MM_FILEPAGES);
90960+ page_remove_rmap(page);
90961+ page_cache_release(page);
90962+ }
90963+ }
90964+ pte_unmap_unlock(pte, ptl);
90965+}
90966+
90967+/* PaX: if vma is mirrored, synchronize the mirror's PTE
90968+ *
90969+ * the ptl of the lower mapped page is held on entry and is not released on exit
90970+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
90971+ */
90972+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
90973+{
90974+ struct mm_struct *mm = vma->vm_mm;
90975+ unsigned long address_m;
90976+ spinlock_t *ptl_m;
90977+ struct vm_area_struct *vma_m;
90978+ pmd_t *pmd_m;
90979+ pte_t *pte_m, entry_m;
90980+
90981+ BUG_ON(!page_m || !PageAnon(page_m));
90982+
90983+ vma_m = pax_find_mirror_vma(vma);
90984+ if (!vma_m)
90985+ return;
90986+
90987+ BUG_ON(!PageLocked(page_m));
90988+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
90989+ address_m = address + SEGMEXEC_TASK_SIZE;
90990+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
90991+ pte_m = pte_offset_map(pmd_m, address_m);
90992+ ptl_m = pte_lockptr(mm, pmd_m);
90993+ if (ptl != ptl_m) {
90994+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
90995+ if (!pte_none(*pte_m))
90996+ goto out;
90997+ }
90998+
90999+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
91000+ page_cache_get(page_m);
91001+ page_add_anon_rmap(page_m, vma_m, address_m);
91002+ inc_mm_counter_fast(mm, MM_ANONPAGES);
91003+ set_pte_at(mm, address_m, pte_m, entry_m);
91004+ update_mmu_cache(vma_m, address_m, pte_m);
91005+out:
91006+ if (ptl != ptl_m)
91007+ spin_unlock(ptl_m);
91008+ pte_unmap(pte_m);
91009+ unlock_page(page_m);
91010+}
91011+
91012+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
91013+{
91014+ struct mm_struct *mm = vma->vm_mm;
91015+ unsigned long address_m;
91016+ spinlock_t *ptl_m;
91017+ struct vm_area_struct *vma_m;
91018+ pmd_t *pmd_m;
91019+ pte_t *pte_m, entry_m;
91020+
91021+ BUG_ON(!page_m || PageAnon(page_m));
91022+
91023+ vma_m = pax_find_mirror_vma(vma);
91024+ if (!vma_m)
91025+ return;
91026+
91027+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
91028+ address_m = address + SEGMEXEC_TASK_SIZE;
91029+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
91030+ pte_m = pte_offset_map(pmd_m, address_m);
91031+ ptl_m = pte_lockptr(mm, pmd_m);
91032+ if (ptl != ptl_m) {
91033+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
91034+ if (!pte_none(*pte_m))
91035+ goto out;
91036+ }
91037+
91038+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
91039+ page_cache_get(page_m);
91040+ page_add_file_rmap(page_m);
91041+ inc_mm_counter_fast(mm, MM_FILEPAGES);
91042+ set_pte_at(mm, address_m, pte_m, entry_m);
91043+ update_mmu_cache(vma_m, address_m, pte_m);
91044+out:
91045+ if (ptl != ptl_m)
91046+ spin_unlock(ptl_m);
91047+ pte_unmap(pte_m);
91048+}
91049+
91050+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
91051+{
91052+ struct mm_struct *mm = vma->vm_mm;
91053+ unsigned long address_m;
91054+ spinlock_t *ptl_m;
91055+ struct vm_area_struct *vma_m;
91056+ pmd_t *pmd_m;
91057+ pte_t *pte_m, entry_m;
91058+
91059+ vma_m = pax_find_mirror_vma(vma);
91060+ if (!vma_m)
91061+ return;
91062+
91063+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
91064+ address_m = address + SEGMEXEC_TASK_SIZE;
91065+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
91066+ pte_m = pte_offset_map(pmd_m, address_m);
91067+ ptl_m = pte_lockptr(mm, pmd_m);
91068+ if (ptl != ptl_m) {
91069+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
91070+ if (!pte_none(*pte_m))
91071+ goto out;
91072+ }
91073+
91074+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
91075+ set_pte_at(mm, address_m, pte_m, entry_m);
91076+out:
91077+ if (ptl != ptl_m)
91078+ spin_unlock(ptl_m);
91079+ pte_unmap(pte_m);
91080+}
91081+
91082+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
91083+{
91084+ struct page *page_m;
91085+ pte_t entry;
91086+
91087+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
91088+ goto out;
91089+
91090+ entry = *pte;
91091+ page_m = vm_normal_page(vma, address, entry);
91092+ if (!page_m)
91093+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
91094+ else if (PageAnon(page_m)) {
91095+ if (pax_find_mirror_vma(vma)) {
91096+ pte_unmap_unlock(pte, ptl);
91097+ lock_page(page_m);
91098+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
91099+ if (pte_same(entry, *pte))
91100+ pax_mirror_anon_pte(vma, address, page_m, ptl);
91101+ else
91102+ unlock_page(page_m);
91103+ }
91104+ } else
91105+ pax_mirror_file_pte(vma, address, page_m, ptl);
91106+
91107+out:
91108+ pte_unmap_unlock(pte, ptl);
91109+}
91110+#endif
91111+
91112 /*
91113 * This routine handles present pages, when users try to write
91114 * to a shared page. It is done by copying the page to a new address
91115@@ -2807,6 +3003,12 @@ gotten:
91116 */
91117 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
91118 if (likely(pte_same(*page_table, orig_pte))) {
91119+
91120+#ifdef CONFIG_PAX_SEGMEXEC
91121+ if (pax_find_mirror_vma(vma))
91122+ BUG_ON(!trylock_page(new_page));
91123+#endif
91124+
91125 if (old_page) {
91126 if (!PageAnon(old_page)) {
91127 dec_mm_counter_fast(mm, MM_FILEPAGES);
91128@@ -2858,6 +3060,10 @@ gotten:
91129 page_remove_rmap(old_page);
91130 }
91131
91132+#ifdef CONFIG_PAX_SEGMEXEC
91133+ pax_mirror_anon_pte(vma, address, new_page, ptl);
91134+#endif
91135+
91136 /* Free the old page.. */
91137 new_page = old_page;
91138 ret |= VM_FAULT_WRITE;
91139@@ -3135,6 +3341,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
91140 swap_free(entry);
91141 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
91142 try_to_free_swap(page);
91143+
91144+#ifdef CONFIG_PAX_SEGMEXEC
91145+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
91146+#endif
91147+
91148 unlock_page(page);
91149 if (page != swapcache) {
91150 /*
91151@@ -3158,6 +3369,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
91152
91153 /* No need to invalidate - it was non-present before */
91154 update_mmu_cache(vma, address, page_table);
91155+
91156+#ifdef CONFIG_PAX_SEGMEXEC
91157+ pax_mirror_anon_pte(vma, address, page, ptl);
91158+#endif
91159+
91160 unlock:
91161 pte_unmap_unlock(page_table, ptl);
91162 out:
91163@@ -3177,40 +3393,6 @@ out_release:
91164 }
91165
91166 /*
91167- * This is like a special single-page "expand_{down|up}wards()",
91168- * except we must first make sure that 'address{-|+}PAGE_SIZE'
91169- * doesn't hit another vma.
91170- */
91171-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
91172-{
91173- address &= PAGE_MASK;
91174- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
91175- struct vm_area_struct *prev = vma->vm_prev;
91176-
91177- /*
91178- * Is there a mapping abutting this one below?
91179- *
91180- * That's only ok if it's the same stack mapping
91181- * that has gotten split..
91182- */
91183- if (prev && prev->vm_end == address)
91184- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
91185-
91186- expand_downwards(vma, address - PAGE_SIZE);
91187- }
91188- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
91189- struct vm_area_struct *next = vma->vm_next;
91190-
91191- /* As VM_GROWSDOWN but s/below/above/ */
91192- if (next && next->vm_start == address + PAGE_SIZE)
91193- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
91194-
91195- expand_upwards(vma, address + PAGE_SIZE);
91196- }
91197- return 0;
91198-}
91199-
91200-/*
91201 * We enter with non-exclusive mmap_sem (to exclude vma changes,
91202 * but allow concurrent faults), and pte mapped but not yet locked.
91203 * We return with mmap_sem still held, but pte unmapped and unlocked.
91204@@ -3219,27 +3401,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
91205 unsigned long address, pte_t *page_table, pmd_t *pmd,
91206 unsigned int flags)
91207 {
91208- struct page *page;
91209+ struct page *page = NULL;
91210 spinlock_t *ptl;
91211 pte_t entry;
91212
91213- pte_unmap(page_table);
91214-
91215- /* Check if we need to add a guard page to the stack */
91216- if (check_stack_guard_page(vma, address) < 0)
91217- return VM_FAULT_SIGBUS;
91218-
91219- /* Use the zero-page for reads */
91220 if (!(flags & FAULT_FLAG_WRITE)) {
91221 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
91222 vma->vm_page_prot));
91223- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
91224+ ptl = pte_lockptr(mm, pmd);
91225+ spin_lock(ptl);
91226 if (!pte_none(*page_table))
91227 goto unlock;
91228 goto setpte;
91229 }
91230
91231 /* Allocate our own private page. */
91232+ pte_unmap(page_table);
91233+
91234 if (unlikely(anon_vma_prepare(vma)))
91235 goto oom;
91236 page = alloc_zeroed_user_highpage_movable(vma, address);
91237@@ -3263,6 +3441,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
91238 if (!pte_none(*page_table))
91239 goto release;
91240
91241+#ifdef CONFIG_PAX_SEGMEXEC
91242+ if (pax_find_mirror_vma(vma))
91243+ BUG_ON(!trylock_page(page));
91244+#endif
91245+
91246 inc_mm_counter_fast(mm, MM_ANONPAGES);
91247 page_add_new_anon_rmap(page, vma, address);
91248 setpte:
91249@@ -3270,6 +3453,12 @@ setpte:
91250
91251 /* No need to invalidate - it was non-present before */
91252 update_mmu_cache(vma, address, page_table);
91253+
91254+#ifdef CONFIG_PAX_SEGMEXEC
91255+ if (page)
91256+ pax_mirror_anon_pte(vma, address, page, ptl);
91257+#endif
91258+
91259 unlock:
91260 pte_unmap_unlock(page_table, ptl);
91261 return 0;
91262@@ -3413,6 +3602,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91263 */
91264 /* Only go through if we didn't race with anybody else... */
91265 if (likely(pte_same(*page_table, orig_pte))) {
91266+
91267+#ifdef CONFIG_PAX_SEGMEXEC
91268+ if (anon && pax_find_mirror_vma(vma))
91269+ BUG_ON(!trylock_page(page));
91270+#endif
91271+
91272 flush_icache_page(vma, page);
91273 entry = mk_pte(page, vma->vm_page_prot);
91274 if (flags & FAULT_FLAG_WRITE)
91275@@ -3434,6 +3629,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91276
91277 /* no need to invalidate: a not-present page won't be cached */
91278 update_mmu_cache(vma, address, page_table);
91279+
91280+#ifdef CONFIG_PAX_SEGMEXEC
91281+ if (anon)
91282+ pax_mirror_anon_pte(vma, address, page, ptl);
91283+ else
91284+ pax_mirror_file_pte(vma, address, page, ptl);
91285+#endif
91286+
91287 } else {
91288 if (cow_page)
91289 mem_cgroup_uncharge_page(cow_page);
91290@@ -3681,6 +3884,12 @@ static int handle_pte_fault(struct mm_struct *mm,
91291 if (flags & FAULT_FLAG_WRITE)
91292 flush_tlb_fix_spurious_fault(vma, address);
91293 }
91294+
91295+#ifdef CONFIG_PAX_SEGMEXEC
91296+ pax_mirror_pte(vma, address, pte, pmd, ptl);
91297+ return 0;
91298+#endif
91299+
91300 unlock:
91301 pte_unmap_unlock(pte, ptl);
91302 return 0;
91303@@ -3697,9 +3906,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91304 pmd_t *pmd;
91305 pte_t *pte;
91306
91307+#ifdef CONFIG_PAX_SEGMEXEC
91308+ struct vm_area_struct *vma_m;
91309+#endif
91310+
91311 if (unlikely(is_vm_hugetlb_page(vma)))
91312 return hugetlb_fault(mm, vma, address, flags);
91313
91314+#ifdef CONFIG_PAX_SEGMEXEC
91315+ vma_m = pax_find_mirror_vma(vma);
91316+ if (vma_m) {
91317+ unsigned long address_m;
91318+ pgd_t *pgd_m;
91319+ pud_t *pud_m;
91320+ pmd_t *pmd_m;
91321+
91322+ if (vma->vm_start > vma_m->vm_start) {
91323+ address_m = address;
91324+ address -= SEGMEXEC_TASK_SIZE;
91325+ vma = vma_m;
91326+ } else
91327+ address_m = address + SEGMEXEC_TASK_SIZE;
91328+
91329+ pgd_m = pgd_offset(mm, address_m);
91330+ pud_m = pud_alloc(mm, pgd_m, address_m);
91331+ if (!pud_m)
91332+ return VM_FAULT_OOM;
91333+ pmd_m = pmd_alloc(mm, pud_m, address_m);
91334+ if (!pmd_m)
91335+ return VM_FAULT_OOM;
91336+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
91337+ return VM_FAULT_OOM;
91338+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
91339+ }
91340+#endif
91341+
91342 retry:
91343 pgd = pgd_offset(mm, address);
91344 pud = pud_alloc(mm, pgd, address);
91345@@ -3838,6 +4079,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
91346 spin_unlock(&mm->page_table_lock);
91347 return 0;
91348 }
91349+
91350+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
91351+{
91352+ pud_t *new = pud_alloc_one(mm, address);
91353+ if (!new)
91354+ return -ENOMEM;
91355+
91356+ smp_wmb(); /* See comment in __pte_alloc */
91357+
91358+ spin_lock(&mm->page_table_lock);
91359+ if (pgd_present(*pgd)) /* Another has populated it */
91360+ pud_free(mm, new);
91361+ else
91362+ pgd_populate_kernel(mm, pgd, new);
91363+ spin_unlock(&mm->page_table_lock);
91364+ return 0;
91365+}
91366 #endif /* __PAGETABLE_PUD_FOLDED */
91367
91368 #ifndef __PAGETABLE_PMD_FOLDED
91369@@ -3868,6 +4126,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
91370 spin_unlock(&mm->page_table_lock);
91371 return 0;
91372 }
91373+
91374+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
91375+{
91376+ pmd_t *new = pmd_alloc_one(mm, address);
91377+ if (!new)
91378+ return -ENOMEM;
91379+
91380+ smp_wmb(); /* See comment in __pte_alloc */
91381+
91382+ spin_lock(&mm->page_table_lock);
91383+#ifndef __ARCH_HAS_4LEVEL_HACK
91384+ if (pud_present(*pud)) /* Another has populated it */
91385+ pmd_free(mm, new);
91386+ else
91387+ pud_populate_kernel(mm, pud, new);
91388+#else
91389+ if (pgd_present(*pud)) /* Another has populated it */
91390+ pmd_free(mm, new);
91391+ else
91392+ pgd_populate_kernel(mm, pud, new);
91393+#endif /* __ARCH_HAS_4LEVEL_HACK */
91394+ spin_unlock(&mm->page_table_lock);
91395+ return 0;
91396+}
91397 #endif /* __PAGETABLE_PMD_FOLDED */
91398
91399 #if !defined(__HAVE_ARCH_GATE_AREA)
91400@@ -3881,7 +4163,7 @@ static int __init gate_vma_init(void)
91401 gate_vma.vm_start = FIXADDR_USER_START;
91402 gate_vma.vm_end = FIXADDR_USER_END;
91403 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
91404- gate_vma.vm_page_prot = __P101;
91405+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
91406
91407 return 0;
91408 }
91409@@ -4015,8 +4297,8 @@ out:
91410 return ret;
91411 }
91412
91413-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
91414- void *buf, int len, int write)
91415+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
91416+ void *buf, size_t len, int write)
91417 {
91418 resource_size_t phys_addr;
91419 unsigned long prot = 0;
91420@@ -4042,8 +4324,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
91421 * Access another process' address space as given in mm. If non-NULL, use the
91422 * given task for page fault accounting.
91423 */
91424-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91425- unsigned long addr, void *buf, int len, int write)
91426+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91427+ unsigned long addr, void *buf, size_t len, int write)
91428 {
91429 struct vm_area_struct *vma;
91430 void *old_buf = buf;
91431@@ -4051,7 +4333,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91432 down_read(&mm->mmap_sem);
91433 /* ignore errors, just check how much was successfully transferred */
91434 while (len) {
91435- int bytes, ret, offset;
91436+ ssize_t bytes, ret, offset;
91437 void *maddr;
91438 struct page *page = NULL;
91439
91440@@ -4110,8 +4392,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91441 *
91442 * The caller must hold a reference on @mm.
91443 */
91444-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
91445- void *buf, int len, int write)
91446+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
91447+ void *buf, size_t len, int write)
91448 {
91449 return __access_remote_vm(NULL, mm, addr, buf, len, write);
91450 }
91451@@ -4121,11 +4403,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
91452 * Source/target buffer must be kernel space,
91453 * Do not walk the page table directly, use get_user_pages
91454 */
91455-int access_process_vm(struct task_struct *tsk, unsigned long addr,
91456- void *buf, int len, int write)
91457+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
91458+ void *buf, size_t len, int write)
91459 {
91460 struct mm_struct *mm;
91461- int ret;
91462+ ssize_t ret;
91463
91464 mm = get_task_mm(tsk);
91465 if (!mm)
91466diff --git a/mm/mempolicy.c b/mm/mempolicy.c
91467index e1bd997..055f496 100644
91468--- a/mm/mempolicy.c
91469+++ b/mm/mempolicy.c
91470@@ -747,6 +747,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
91471 unsigned long vmstart;
91472 unsigned long vmend;
91473
91474+#ifdef CONFIG_PAX_SEGMEXEC
91475+ struct vm_area_struct *vma_m;
91476+#endif
91477+
91478 vma = find_vma(mm, start);
91479 if (!vma || vma->vm_start > start)
91480 return -EFAULT;
91481@@ -790,6 +794,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
91482 err = vma_replace_policy(vma, new_pol);
91483 if (err)
91484 goto out;
91485+
91486+#ifdef CONFIG_PAX_SEGMEXEC
91487+ vma_m = pax_find_mirror_vma(vma);
91488+ if (vma_m) {
91489+ err = vma_replace_policy(vma_m, new_pol);
91490+ if (err)
91491+ goto out;
91492+ }
91493+#endif
91494+
91495 }
91496
91497 out:
91498@@ -1255,6 +1269,17 @@ static long do_mbind(unsigned long start, unsigned long len,
91499
91500 if (end < start)
91501 return -EINVAL;
91502+
91503+#ifdef CONFIG_PAX_SEGMEXEC
91504+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
91505+ if (end > SEGMEXEC_TASK_SIZE)
91506+ return -EINVAL;
91507+ } else
91508+#endif
91509+
91510+ if (end > TASK_SIZE)
91511+ return -EINVAL;
91512+
91513 if (end == start)
91514 return 0;
91515
91516@@ -1483,8 +1508,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
91517 */
91518 tcred = __task_cred(task);
91519 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
91520- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
91521- !capable(CAP_SYS_NICE)) {
91522+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
91523 rcu_read_unlock();
91524 err = -EPERM;
91525 goto out_put;
91526@@ -1515,6 +1539,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
91527 goto out;
91528 }
91529
91530+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
91531+ if (mm != current->mm &&
91532+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
91533+ mmput(mm);
91534+ err = -EPERM;
91535+ goto out;
91536+ }
91537+#endif
91538+
91539 err = do_migrate_pages(mm, old, new,
91540 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
91541
91542diff --git a/mm/migrate.c b/mm/migrate.c
91543index 9194375..75c81e2 100644
91544--- a/mm/migrate.c
91545+++ b/mm/migrate.c
91546@@ -1464,8 +1464,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
91547 */
91548 tcred = __task_cred(task);
91549 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
91550- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
91551- !capable(CAP_SYS_NICE)) {
91552+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
91553 rcu_read_unlock();
91554 err = -EPERM;
91555 goto out;
91556diff --git a/mm/mlock.c b/mm/mlock.c
91557index 192e6ee..b044449 100644
91558--- a/mm/mlock.c
91559+++ b/mm/mlock.c
91560@@ -14,6 +14,7 @@
91561 #include <linux/pagevec.h>
91562 #include <linux/mempolicy.h>
91563 #include <linux/syscalls.h>
91564+#include <linux/security.h>
91565 #include <linux/sched.h>
91566 #include <linux/export.h>
91567 #include <linux/rmap.h>
91568@@ -588,7 +589,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
91569 {
91570 unsigned long nstart, end, tmp;
91571 struct vm_area_struct * vma, * prev;
91572- int error;
91573+ int error = 0;
91574
91575 VM_BUG_ON(start & ~PAGE_MASK);
91576 VM_BUG_ON(len != PAGE_ALIGN(len));
91577@@ -597,6 +598,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
91578 return -EINVAL;
91579 if (end == start)
91580 return 0;
91581+ if (end > TASK_SIZE)
91582+ return -EINVAL;
91583+
91584 vma = find_vma(current->mm, start);
91585 if (!vma || vma->vm_start > start)
91586 return -ENOMEM;
91587@@ -608,6 +612,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
91588 for (nstart = start ; ; ) {
91589 vm_flags_t newflags;
91590
91591+#ifdef CONFIG_PAX_SEGMEXEC
91592+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
91593+ break;
91594+#endif
91595+
91596 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
91597
91598 newflags = vma->vm_flags & ~VM_LOCKED;
91599@@ -720,6 +729,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
91600 lock_limit >>= PAGE_SHIFT;
91601
91602 /* check against resource limits */
91603+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
91604 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
91605 error = do_mlock(start, len, 1);
91606 up_write(&current->mm->mmap_sem);
91607@@ -754,6 +764,11 @@ static int do_mlockall(int flags)
91608 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
91609 vm_flags_t newflags;
91610
91611+#ifdef CONFIG_PAX_SEGMEXEC
91612+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
91613+ break;
91614+#endif
91615+
91616 newflags = vma->vm_flags & ~VM_LOCKED;
91617 if (flags & MCL_CURRENT)
91618 newflags |= VM_LOCKED;
91619@@ -787,6 +802,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
91620 lock_limit >>= PAGE_SHIFT;
91621
91622 ret = -ENOMEM;
91623+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
91624 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
91625 capable(CAP_IPC_LOCK))
91626 ret = do_mlockall(flags);
91627diff --git a/mm/mmap.c b/mm/mmap.c
91628index 834b2d7..650d1b9 100644
91629--- a/mm/mmap.c
91630+++ b/mm/mmap.c
91631@@ -36,6 +36,7 @@
91632 #include <linux/sched/sysctl.h>
91633 #include <linux/notifier.h>
91634 #include <linux/memory.h>
91635+#include <linux/random.h>
91636
91637 #include <asm/uaccess.h>
91638 #include <asm/cacheflush.h>
91639@@ -52,6 +53,16 @@
91640 #define arch_rebalance_pgtables(addr, len) (addr)
91641 #endif
91642
91643+static inline void verify_mm_writelocked(struct mm_struct *mm)
91644+{
91645+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
91646+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
91647+ up_read(&mm->mmap_sem);
91648+ BUG();
91649+ }
91650+#endif
91651+}
91652+
91653 static void unmap_region(struct mm_struct *mm,
91654 struct vm_area_struct *vma, struct vm_area_struct *prev,
91655 unsigned long start, unsigned long end);
91656@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
91657 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
91658 *
91659 */
91660-pgprot_t protection_map[16] = {
91661+pgprot_t protection_map[16] __read_only = {
91662 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
91663 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
91664 };
91665
91666-pgprot_t vm_get_page_prot(unsigned long vm_flags)
91667+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
91668 {
91669- return __pgprot(pgprot_val(protection_map[vm_flags &
91670+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
91671 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
91672 pgprot_val(arch_vm_get_page_prot(vm_flags)));
91673+
91674+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
91675+ if (!(__supported_pte_mask & _PAGE_NX) &&
91676+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
91677+ (vm_flags & (VM_READ | VM_WRITE)))
91678+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
91679+#endif
91680+
91681+ return prot;
91682 }
91683 EXPORT_SYMBOL(vm_get_page_prot);
91684
91685@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
91686 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
91687 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
91688 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
91689+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
91690 /*
91691 * Make sure vm_committed_as in one cacheline and not cacheline shared with
91692 * other variables. It can be updated by several CPUs frequently.
91693@@ -245,6 +266,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
91694 struct vm_area_struct *next = vma->vm_next;
91695
91696 might_sleep();
91697+ BUG_ON(vma->vm_mirror);
91698 if (vma->vm_ops && vma->vm_ops->close)
91699 vma->vm_ops->close(vma);
91700 if (vma->vm_file)
91701@@ -289,6 +311,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
91702 * not page aligned -Ram Gupta
91703 */
91704 rlim = rlimit(RLIMIT_DATA);
91705+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
91706 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
91707 (mm->end_data - mm->start_data) > rlim)
91708 goto out;
91709@@ -893,7 +916,15 @@ again: remove_next = 1 + (end > next->vm_end);
91710 static inline int is_mergeable_vma(struct vm_area_struct *vma,
91711 struct file *file, unsigned long vm_flags)
91712 {
91713- if (vma->vm_flags ^ vm_flags)
91714+ /*
91715+ * VM_SOFTDIRTY should not prevent from VMA merging, if we
91716+ * match the flags but dirty bit -- the caller should mark
91717+ * merged VMA as dirty. If dirty bit won't be excluded from
91718+ * comparison, we increase pressue on the memory system forcing
91719+ * the kernel to generate new VMAs when old one could be
91720+ * extended instead.
91721+ */
91722+ if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
91723 return 0;
91724 if (vma->vm_file != file)
91725 return 0;
91726@@ -931,6 +962,12 @@ static int
91727 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
91728 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
91729 {
91730+
91731+#ifdef CONFIG_PAX_SEGMEXEC
91732+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
91733+ return 0;
91734+#endif
91735+
91736 if (is_mergeable_vma(vma, file, vm_flags) &&
91737 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
91738 if (vma->vm_pgoff == vm_pgoff)
91739@@ -950,6 +987,12 @@ static int
91740 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
91741 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
91742 {
91743+
91744+#ifdef CONFIG_PAX_SEGMEXEC
91745+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
91746+ return 0;
91747+#endif
91748+
91749 if (is_mergeable_vma(vma, file, vm_flags) &&
91750 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
91751 pgoff_t vm_pglen;
91752@@ -992,13 +1035,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
91753 struct vm_area_struct *vma_merge(struct mm_struct *mm,
91754 struct vm_area_struct *prev, unsigned long addr,
91755 unsigned long end, unsigned long vm_flags,
91756- struct anon_vma *anon_vma, struct file *file,
91757+ struct anon_vma *anon_vma, struct file *file,
91758 pgoff_t pgoff, struct mempolicy *policy)
91759 {
91760 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
91761 struct vm_area_struct *area, *next;
91762 int err;
91763
91764+#ifdef CONFIG_PAX_SEGMEXEC
91765+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
91766+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
91767+
91768+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
91769+#endif
91770+
91771 /*
91772 * We later require that vma->vm_flags == vm_flags,
91773 * so this tests vma->vm_flags & VM_SPECIAL, too.
91774@@ -1014,6 +1064,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
91775 if (next && next->vm_end == end) /* cases 6, 7, 8 */
91776 next = next->vm_next;
91777
91778+#ifdef CONFIG_PAX_SEGMEXEC
91779+ if (prev)
91780+ prev_m = pax_find_mirror_vma(prev);
91781+ if (area)
91782+ area_m = pax_find_mirror_vma(area);
91783+ if (next)
91784+ next_m = pax_find_mirror_vma(next);
91785+#endif
91786+
91787 /*
91788 * Can it merge with the predecessor?
91789 */
91790@@ -1033,9 +1092,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
91791 /* cases 1, 6 */
91792 err = vma_adjust(prev, prev->vm_start,
91793 next->vm_end, prev->vm_pgoff, NULL);
91794- } else /* cases 2, 5, 7 */
91795+
91796+#ifdef CONFIG_PAX_SEGMEXEC
91797+ if (!err && prev_m)
91798+ err = vma_adjust(prev_m, prev_m->vm_start,
91799+ next_m->vm_end, prev_m->vm_pgoff, NULL);
91800+#endif
91801+
91802+ } else { /* cases 2, 5, 7 */
91803 err = vma_adjust(prev, prev->vm_start,
91804 end, prev->vm_pgoff, NULL);
91805+
91806+#ifdef CONFIG_PAX_SEGMEXEC
91807+ if (!err && prev_m)
91808+ err = vma_adjust(prev_m, prev_m->vm_start,
91809+ end_m, prev_m->vm_pgoff, NULL);
91810+#endif
91811+
91812+ }
91813 if (err)
91814 return NULL;
91815 khugepaged_enter_vma_merge(prev);
91816@@ -1049,12 +1123,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
91817 mpol_equal(policy, vma_policy(next)) &&
91818 can_vma_merge_before(next, vm_flags,
91819 anon_vma, file, pgoff+pglen)) {
91820- if (prev && addr < prev->vm_end) /* case 4 */
91821+ if (prev && addr < prev->vm_end) { /* case 4 */
91822 err = vma_adjust(prev, prev->vm_start,
91823 addr, prev->vm_pgoff, NULL);
91824- else /* cases 3, 8 */
91825+
91826+#ifdef CONFIG_PAX_SEGMEXEC
91827+ if (!err && prev_m)
91828+ err = vma_adjust(prev_m, prev_m->vm_start,
91829+ addr_m, prev_m->vm_pgoff, NULL);
91830+#endif
91831+
91832+ } else { /* cases 3, 8 */
91833 err = vma_adjust(area, addr, next->vm_end,
91834 next->vm_pgoff - pglen, NULL);
91835+
91836+#ifdef CONFIG_PAX_SEGMEXEC
91837+ if (!err && area_m)
91838+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
91839+ next_m->vm_pgoff - pglen, NULL);
91840+#endif
91841+
91842+ }
91843 if (err)
91844 return NULL;
91845 khugepaged_enter_vma_merge(area);
91846@@ -1082,7 +1171,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
91847 return a->vm_end == b->vm_start &&
91848 mpol_equal(vma_policy(a), vma_policy(b)) &&
91849 a->vm_file == b->vm_file &&
91850- !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
91851+ !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
91852 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
91853 }
91854
91855@@ -1163,8 +1252,10 @@ none:
91856 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
91857 struct file *file, long pages)
91858 {
91859- const unsigned long stack_flags
91860- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
91861+
91862+#ifdef CONFIG_PAX_RANDMMAP
91863+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
91864+#endif
91865
91866 mm->total_vm += pages;
91867
91868@@ -1172,7 +1263,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
91869 mm->shared_vm += pages;
91870 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
91871 mm->exec_vm += pages;
91872- } else if (flags & stack_flags)
91873+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
91874 mm->stack_vm += pages;
91875 }
91876 #endif /* CONFIG_PROC_FS */
91877@@ -1210,7 +1301,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
91878 * (the exception is when the underlying filesystem is noexec
91879 * mounted, in which case we dont add PROT_EXEC.)
91880 */
91881- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
91882+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
91883 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
91884 prot |= PROT_EXEC;
91885
91886@@ -1236,7 +1327,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
91887 /* Obtain the address to map to. we verify (or select) it and ensure
91888 * that it represents a valid section of the address space.
91889 */
91890- addr = get_unmapped_area(file, addr, len, pgoff, flags);
91891+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
91892 if (addr & ~PAGE_MASK)
91893 return addr;
91894
91895@@ -1247,6 +1338,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
91896 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
91897 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
91898
91899+#ifdef CONFIG_PAX_MPROTECT
91900+ if (mm->pax_flags & MF_PAX_MPROTECT) {
91901+
91902+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
91903+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
91904+ mm->binfmt->handle_mmap)
91905+ mm->binfmt->handle_mmap(file);
91906+#endif
91907+
91908+#ifndef CONFIG_PAX_MPROTECT_COMPAT
91909+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
91910+ gr_log_rwxmmap(file);
91911+
91912+#ifdef CONFIG_PAX_EMUPLT
91913+ vm_flags &= ~VM_EXEC;
91914+#else
91915+ return -EPERM;
91916+#endif
91917+
91918+ }
91919+
91920+ if (!(vm_flags & VM_EXEC))
91921+ vm_flags &= ~VM_MAYEXEC;
91922+#else
91923+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
91924+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
91925+#endif
91926+ else
91927+ vm_flags &= ~VM_MAYWRITE;
91928+ }
91929+#endif
91930+
91931+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
91932+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
91933+ vm_flags &= ~VM_PAGEEXEC;
91934+#endif
91935+
91936 if (flags & MAP_LOCKED)
91937 if (!can_do_mlock())
91938 return -EPERM;
91939@@ -1258,6 +1386,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
91940 locked += mm->locked_vm;
91941 lock_limit = rlimit(RLIMIT_MEMLOCK);
91942 lock_limit >>= PAGE_SHIFT;
91943+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
91944 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
91945 return -EAGAIN;
91946 }
91947@@ -1342,6 +1471,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
91948 vm_flags |= VM_NORESERVE;
91949 }
91950
91951+ if (!gr_acl_handle_mmap(file, prot))
91952+ return -EACCES;
91953+
91954 addr = mmap_region(file, addr, len, vm_flags, pgoff);
91955 if (!IS_ERR_VALUE(addr) &&
91956 ((vm_flags & VM_LOCKED) ||
91957@@ -1435,7 +1567,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
91958 vm_flags_t vm_flags = vma->vm_flags;
91959
91960 /* If it was private or non-writable, the write bit is already clear */
91961- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
91962+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
91963 return 0;
91964
91965 /* The backer wishes to know when pages are first written to? */
91966@@ -1481,7 +1613,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
91967 struct rb_node **rb_link, *rb_parent;
91968 unsigned long charged = 0;
91969
91970+#ifdef CONFIG_PAX_SEGMEXEC
91971+ struct vm_area_struct *vma_m = NULL;
91972+#endif
91973+
91974+ /*
91975+ * mm->mmap_sem is required to protect against another thread
91976+ * changing the mappings in case we sleep.
91977+ */
91978+ verify_mm_writelocked(mm);
91979+
91980 /* Check against address space limit. */
91981+
91982+#ifdef CONFIG_PAX_RANDMMAP
91983+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
91984+#endif
91985+
91986 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
91987 unsigned long nr_pages;
91988
91989@@ -1500,11 +1647,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
91990
91991 /* Clear old maps */
91992 error = -ENOMEM;
91993-munmap_back:
91994 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
91995 if (do_munmap(mm, addr, len))
91996 return -ENOMEM;
91997- goto munmap_back;
91998+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
91999 }
92000
92001 /*
92002@@ -1535,6 +1681,16 @@ munmap_back:
92003 goto unacct_error;
92004 }
92005
92006+#ifdef CONFIG_PAX_SEGMEXEC
92007+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
92008+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
92009+ if (!vma_m) {
92010+ error = -ENOMEM;
92011+ goto free_vma;
92012+ }
92013+ }
92014+#endif
92015+
92016 vma->vm_mm = mm;
92017 vma->vm_start = addr;
92018 vma->vm_end = addr + len;
92019@@ -1554,6 +1710,13 @@ munmap_back:
92020 if (error)
92021 goto unmap_and_free_vma;
92022
92023+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
92024+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
92025+ vma->vm_flags |= VM_PAGEEXEC;
92026+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
92027+ }
92028+#endif
92029+
92030 /* Can addr have changed??
92031 *
92032 * Answer: Yes, several device drivers can do it in their
92033@@ -1587,6 +1750,12 @@ munmap_back:
92034 }
92035
92036 vma_link(mm, vma, prev, rb_link, rb_parent);
92037+
92038+#ifdef CONFIG_PAX_SEGMEXEC
92039+ if (vma_m)
92040+ BUG_ON(pax_mirror_vma(vma_m, vma));
92041+#endif
92042+
92043 /* Once vma denies write, undo our temporary denial count */
92044 if (vm_flags & VM_DENYWRITE)
92045 allow_write_access(file);
92046@@ -1595,6 +1764,7 @@ out:
92047 perf_event_mmap(vma);
92048
92049 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
92050+ track_exec_limit(mm, addr, addr + len, vm_flags);
92051 if (vm_flags & VM_LOCKED) {
92052 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
92053 vma == get_gate_vma(current->mm)))
92054@@ -1627,6 +1797,12 @@ unmap_and_free_vma:
92055 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
92056 charged = 0;
92057 free_vma:
92058+
92059+#ifdef CONFIG_PAX_SEGMEXEC
92060+ if (vma_m)
92061+ kmem_cache_free(vm_area_cachep, vma_m);
92062+#endif
92063+
92064 kmem_cache_free(vm_area_cachep, vma);
92065 unacct_error:
92066 if (charged)
92067@@ -1634,7 +1810,63 @@ unacct_error:
92068 return error;
92069 }
92070
92071-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
92072+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
92073+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
92074+{
92075+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
92076+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
92077+
92078+ return 0;
92079+}
92080+#endif
92081+
92082+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
92083+{
92084+ if (!vma) {
92085+#ifdef CONFIG_STACK_GROWSUP
92086+ if (addr > sysctl_heap_stack_gap)
92087+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
92088+ else
92089+ vma = find_vma(current->mm, 0);
92090+ if (vma && (vma->vm_flags & VM_GROWSUP))
92091+ return false;
92092+#endif
92093+ return true;
92094+ }
92095+
92096+ if (addr + len > vma->vm_start)
92097+ return false;
92098+
92099+ if (vma->vm_flags & VM_GROWSDOWN)
92100+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
92101+#ifdef CONFIG_STACK_GROWSUP
92102+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
92103+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
92104+#endif
92105+ else if (offset)
92106+ return offset <= vma->vm_start - addr - len;
92107+
92108+ return true;
92109+}
92110+
92111+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
92112+{
92113+ if (vma->vm_start < len)
92114+ return -ENOMEM;
92115+
92116+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
92117+ if (offset <= vma->vm_start - len)
92118+ return vma->vm_start - len - offset;
92119+ else
92120+ return -ENOMEM;
92121+ }
92122+
92123+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
92124+ return vma->vm_start - len - sysctl_heap_stack_gap;
92125+ return -ENOMEM;
92126+}
92127+
92128+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
92129 {
92130 /*
92131 * We implement the search by looking for an rbtree node that
92132@@ -1682,11 +1914,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
92133 }
92134 }
92135
92136- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
92137+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
92138 check_current:
92139 /* Check if current node has a suitable gap */
92140 if (gap_start > high_limit)
92141 return -ENOMEM;
92142+
92143+ if (gap_end - gap_start > info->threadstack_offset)
92144+ gap_start += info->threadstack_offset;
92145+ else
92146+ gap_start = gap_end;
92147+
92148+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
92149+ if (gap_end - gap_start > sysctl_heap_stack_gap)
92150+ gap_start += sysctl_heap_stack_gap;
92151+ else
92152+ gap_start = gap_end;
92153+ }
92154+ if (vma->vm_flags & VM_GROWSDOWN) {
92155+ if (gap_end - gap_start > sysctl_heap_stack_gap)
92156+ gap_end -= sysctl_heap_stack_gap;
92157+ else
92158+ gap_end = gap_start;
92159+ }
92160 if (gap_end >= low_limit && gap_end - gap_start >= length)
92161 goto found;
92162
92163@@ -1736,7 +1986,7 @@ found:
92164 return gap_start;
92165 }
92166
92167-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
92168+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
92169 {
92170 struct mm_struct *mm = current->mm;
92171 struct vm_area_struct *vma;
92172@@ -1790,6 +2040,24 @@ check_current:
92173 gap_end = vma->vm_start;
92174 if (gap_end < low_limit)
92175 return -ENOMEM;
92176+
92177+ if (gap_end - gap_start > info->threadstack_offset)
92178+ gap_end -= info->threadstack_offset;
92179+ else
92180+ gap_end = gap_start;
92181+
92182+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
92183+ if (gap_end - gap_start > sysctl_heap_stack_gap)
92184+ gap_start += sysctl_heap_stack_gap;
92185+ else
92186+ gap_start = gap_end;
92187+ }
92188+ if (vma->vm_flags & VM_GROWSDOWN) {
92189+ if (gap_end - gap_start > sysctl_heap_stack_gap)
92190+ gap_end -= sysctl_heap_stack_gap;
92191+ else
92192+ gap_end = gap_start;
92193+ }
92194 if (gap_start <= high_limit && gap_end - gap_start >= length)
92195 goto found;
92196
92197@@ -1853,6 +2121,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
92198 struct mm_struct *mm = current->mm;
92199 struct vm_area_struct *vma;
92200 struct vm_unmapped_area_info info;
92201+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
92202
92203 if (len > TASK_SIZE - mmap_min_addr)
92204 return -ENOMEM;
92205@@ -1860,11 +2129,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
92206 if (flags & MAP_FIXED)
92207 return addr;
92208
92209+#ifdef CONFIG_PAX_RANDMMAP
92210+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
92211+#endif
92212+
92213 if (addr) {
92214 addr = PAGE_ALIGN(addr);
92215 vma = find_vma(mm, addr);
92216 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
92217- (!vma || addr + len <= vma->vm_start))
92218+ check_heap_stack_gap(vma, addr, len, offset))
92219 return addr;
92220 }
92221
92222@@ -1873,6 +2146,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
92223 info.low_limit = mm->mmap_base;
92224 info.high_limit = TASK_SIZE;
92225 info.align_mask = 0;
92226+ info.threadstack_offset = offset;
92227 return vm_unmapped_area(&info);
92228 }
92229 #endif
92230@@ -1891,6 +2165,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
92231 struct mm_struct *mm = current->mm;
92232 unsigned long addr = addr0;
92233 struct vm_unmapped_area_info info;
92234+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
92235
92236 /* requested length too big for entire address space */
92237 if (len > TASK_SIZE - mmap_min_addr)
92238@@ -1899,12 +2174,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
92239 if (flags & MAP_FIXED)
92240 return addr;
92241
92242+#ifdef CONFIG_PAX_RANDMMAP
92243+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
92244+#endif
92245+
92246 /* requesting a specific address */
92247 if (addr) {
92248 addr = PAGE_ALIGN(addr);
92249 vma = find_vma(mm, addr);
92250 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
92251- (!vma || addr + len <= vma->vm_start))
92252+ check_heap_stack_gap(vma, addr, len, offset))
92253 return addr;
92254 }
92255
92256@@ -1913,6 +2192,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
92257 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
92258 info.high_limit = mm->mmap_base;
92259 info.align_mask = 0;
92260+ info.threadstack_offset = offset;
92261 addr = vm_unmapped_area(&info);
92262
92263 /*
92264@@ -1925,6 +2205,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
92265 VM_BUG_ON(addr != -ENOMEM);
92266 info.flags = 0;
92267 info.low_limit = TASK_UNMAPPED_BASE;
92268+
92269+#ifdef CONFIG_PAX_RANDMMAP
92270+ if (mm->pax_flags & MF_PAX_RANDMMAP)
92271+ info.low_limit += mm->delta_mmap;
92272+#endif
92273+
92274 info.high_limit = TASK_SIZE;
92275 addr = vm_unmapped_area(&info);
92276 }
92277@@ -2026,6 +2312,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
92278 return vma;
92279 }
92280
92281+#ifdef CONFIG_PAX_SEGMEXEC
92282+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
92283+{
92284+ struct vm_area_struct *vma_m;
92285+
92286+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
92287+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
92288+ BUG_ON(vma->vm_mirror);
92289+ return NULL;
92290+ }
92291+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
92292+ vma_m = vma->vm_mirror;
92293+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
92294+ BUG_ON(vma->vm_file != vma_m->vm_file);
92295+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
92296+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
92297+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
92298+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
92299+ return vma_m;
92300+}
92301+#endif
92302+
92303 /*
92304 * Verify that the stack growth is acceptable and
92305 * update accounting. This is shared with both the
92306@@ -2042,6 +2350,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
92307 return -ENOMEM;
92308
92309 /* Stack limit test */
92310+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
92311 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
92312 return -ENOMEM;
92313
92314@@ -2052,6 +2361,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
92315 locked = mm->locked_vm + grow;
92316 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
92317 limit >>= PAGE_SHIFT;
92318+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
92319 if (locked > limit && !capable(CAP_IPC_LOCK))
92320 return -ENOMEM;
92321 }
92322@@ -2081,37 +2391,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
92323 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
92324 * vma is the last one with address > vma->vm_end. Have to extend vma.
92325 */
92326+#ifndef CONFIG_IA64
92327+static
92328+#endif
92329 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
92330 {
92331 int error;
92332+ bool locknext;
92333
92334 if (!(vma->vm_flags & VM_GROWSUP))
92335 return -EFAULT;
92336
92337+ /* Also guard against wrapping around to address 0. */
92338+ if (address < PAGE_ALIGN(address+1))
92339+ address = PAGE_ALIGN(address+1);
92340+ else
92341+ return -ENOMEM;
92342+
92343 /*
92344 * We must make sure the anon_vma is allocated
92345 * so that the anon_vma locking is not a noop.
92346 */
92347 if (unlikely(anon_vma_prepare(vma)))
92348 return -ENOMEM;
92349+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
92350+ if (locknext && anon_vma_prepare(vma->vm_next))
92351+ return -ENOMEM;
92352 vma_lock_anon_vma(vma);
92353+ if (locknext)
92354+ vma_lock_anon_vma(vma->vm_next);
92355
92356 /*
92357 * vma->vm_start/vm_end cannot change under us because the caller
92358 * is required to hold the mmap_sem in read mode. We need the
92359- * anon_vma lock to serialize against concurrent expand_stacks.
92360- * Also guard against wrapping around to address 0.
92361+ * anon_vma locks to serialize against concurrent expand_stacks
92362+ * and expand_upwards.
92363 */
92364- if (address < PAGE_ALIGN(address+4))
92365- address = PAGE_ALIGN(address+4);
92366- else {
92367- vma_unlock_anon_vma(vma);
92368- return -ENOMEM;
92369- }
92370 error = 0;
92371
92372 /* Somebody else might have raced and expanded it already */
92373- if (address > vma->vm_end) {
92374+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
92375+ error = -ENOMEM;
92376+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
92377 unsigned long size, grow;
92378
92379 size = address - vma->vm_start;
92380@@ -2146,6 +2467,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
92381 }
92382 }
92383 }
92384+ if (locknext)
92385+ vma_unlock_anon_vma(vma->vm_next);
92386 vma_unlock_anon_vma(vma);
92387 khugepaged_enter_vma_merge(vma);
92388 validate_mm(vma->vm_mm);
92389@@ -2160,6 +2483,8 @@ int expand_downwards(struct vm_area_struct *vma,
92390 unsigned long address)
92391 {
92392 int error;
92393+ bool lockprev = false;
92394+ struct vm_area_struct *prev;
92395
92396 /*
92397 * We must make sure the anon_vma is allocated
92398@@ -2173,6 +2498,15 @@ int expand_downwards(struct vm_area_struct *vma,
92399 if (error)
92400 return error;
92401
92402+ prev = vma->vm_prev;
92403+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
92404+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
92405+#endif
92406+ if (lockprev && anon_vma_prepare(prev))
92407+ return -ENOMEM;
92408+ if (lockprev)
92409+ vma_lock_anon_vma(prev);
92410+
92411 vma_lock_anon_vma(vma);
92412
92413 /*
92414@@ -2182,9 +2516,17 @@ int expand_downwards(struct vm_area_struct *vma,
92415 */
92416
92417 /* Somebody else might have raced and expanded it already */
92418- if (address < vma->vm_start) {
92419+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
92420+ error = -ENOMEM;
92421+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
92422 unsigned long size, grow;
92423
92424+#ifdef CONFIG_PAX_SEGMEXEC
92425+ struct vm_area_struct *vma_m;
92426+
92427+ vma_m = pax_find_mirror_vma(vma);
92428+#endif
92429+
92430 size = vma->vm_end - address;
92431 grow = (vma->vm_start - address) >> PAGE_SHIFT;
92432
92433@@ -2209,13 +2551,27 @@ int expand_downwards(struct vm_area_struct *vma,
92434 vma->vm_pgoff -= grow;
92435 anon_vma_interval_tree_post_update_vma(vma);
92436 vma_gap_update(vma);
92437+
92438+#ifdef CONFIG_PAX_SEGMEXEC
92439+ if (vma_m) {
92440+ anon_vma_interval_tree_pre_update_vma(vma_m);
92441+ vma_m->vm_start -= grow << PAGE_SHIFT;
92442+ vma_m->vm_pgoff -= grow;
92443+ anon_vma_interval_tree_post_update_vma(vma_m);
92444+ vma_gap_update(vma_m);
92445+ }
92446+#endif
92447+
92448 spin_unlock(&vma->vm_mm->page_table_lock);
92449
92450+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
92451 perf_event_mmap(vma);
92452 }
92453 }
92454 }
92455 vma_unlock_anon_vma(vma);
92456+ if (lockprev)
92457+ vma_unlock_anon_vma(prev);
92458 khugepaged_enter_vma_merge(vma);
92459 validate_mm(vma->vm_mm);
92460 return error;
92461@@ -2313,6 +2669,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
92462 do {
92463 long nrpages = vma_pages(vma);
92464
92465+#ifdef CONFIG_PAX_SEGMEXEC
92466+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
92467+ vma = remove_vma(vma);
92468+ continue;
92469+ }
92470+#endif
92471+
92472 if (vma->vm_flags & VM_ACCOUNT)
92473 nr_accounted += nrpages;
92474 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
92475@@ -2357,6 +2720,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
92476 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
92477 vma->vm_prev = NULL;
92478 do {
92479+
92480+#ifdef CONFIG_PAX_SEGMEXEC
92481+ if (vma->vm_mirror) {
92482+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
92483+ vma->vm_mirror->vm_mirror = NULL;
92484+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
92485+ vma->vm_mirror = NULL;
92486+ }
92487+#endif
92488+
92489 vma_rb_erase(vma, &mm->mm_rb);
92490 mm->map_count--;
92491 tail_vma = vma;
92492@@ -2382,14 +2755,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
92493 struct vm_area_struct *new;
92494 int err = -ENOMEM;
92495
92496+#ifdef CONFIG_PAX_SEGMEXEC
92497+ struct vm_area_struct *vma_m, *new_m = NULL;
92498+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
92499+#endif
92500+
92501 if (is_vm_hugetlb_page(vma) && (addr &
92502 ~(huge_page_mask(hstate_vma(vma)))))
92503 return -EINVAL;
92504
92505+#ifdef CONFIG_PAX_SEGMEXEC
92506+ vma_m = pax_find_mirror_vma(vma);
92507+#endif
92508+
92509 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
92510 if (!new)
92511 goto out_err;
92512
92513+#ifdef CONFIG_PAX_SEGMEXEC
92514+ if (vma_m) {
92515+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
92516+ if (!new_m) {
92517+ kmem_cache_free(vm_area_cachep, new);
92518+ goto out_err;
92519+ }
92520+ }
92521+#endif
92522+
92523 /* most fields are the same, copy all, and then fixup */
92524 *new = *vma;
92525
92526@@ -2402,6 +2794,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
92527 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
92528 }
92529
92530+#ifdef CONFIG_PAX_SEGMEXEC
92531+ if (vma_m) {
92532+ *new_m = *vma_m;
92533+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
92534+ new_m->vm_mirror = new;
92535+ new->vm_mirror = new_m;
92536+
92537+ if (new_below)
92538+ new_m->vm_end = addr_m;
92539+ else {
92540+ new_m->vm_start = addr_m;
92541+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
92542+ }
92543+ }
92544+#endif
92545+
92546 err = vma_dup_policy(vma, new);
92547 if (err)
92548 goto out_free_vma;
92549@@ -2421,6 +2829,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
92550 else
92551 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
92552
92553+#ifdef CONFIG_PAX_SEGMEXEC
92554+ if (!err && vma_m) {
92555+ struct mempolicy *pol = vma_policy(new);
92556+
92557+ if (anon_vma_clone(new_m, vma_m))
92558+ goto out_free_mpol;
92559+
92560+ mpol_get(pol);
92561+ set_vma_policy(new_m, pol);
92562+
92563+ if (new_m->vm_file)
92564+ get_file(new_m->vm_file);
92565+
92566+ if (new_m->vm_ops && new_m->vm_ops->open)
92567+ new_m->vm_ops->open(new_m);
92568+
92569+ if (new_below)
92570+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
92571+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
92572+ else
92573+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
92574+
92575+ if (err) {
92576+ if (new_m->vm_ops && new_m->vm_ops->close)
92577+ new_m->vm_ops->close(new_m);
92578+ if (new_m->vm_file)
92579+ fput(new_m->vm_file);
92580+ mpol_put(pol);
92581+ }
92582+ }
92583+#endif
92584+
92585 /* Success. */
92586 if (!err)
92587 return 0;
92588@@ -2430,10 +2870,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
92589 new->vm_ops->close(new);
92590 if (new->vm_file)
92591 fput(new->vm_file);
92592- unlink_anon_vmas(new);
92593 out_free_mpol:
92594 mpol_put(vma_policy(new));
92595 out_free_vma:
92596+
92597+#ifdef CONFIG_PAX_SEGMEXEC
92598+ if (new_m) {
92599+ unlink_anon_vmas(new_m);
92600+ kmem_cache_free(vm_area_cachep, new_m);
92601+ }
92602+#endif
92603+
92604+ unlink_anon_vmas(new);
92605 kmem_cache_free(vm_area_cachep, new);
92606 out_err:
92607 return err;
92608@@ -2446,6 +2894,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
92609 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
92610 unsigned long addr, int new_below)
92611 {
92612+
92613+#ifdef CONFIG_PAX_SEGMEXEC
92614+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
92615+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
92616+ if (mm->map_count >= sysctl_max_map_count-1)
92617+ return -ENOMEM;
92618+ } else
92619+#endif
92620+
92621 if (mm->map_count >= sysctl_max_map_count)
92622 return -ENOMEM;
92623
92624@@ -2457,11 +2914,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
92625 * work. This now handles partial unmappings.
92626 * Jeremy Fitzhardinge <jeremy@goop.org>
92627 */
92628+#ifdef CONFIG_PAX_SEGMEXEC
92629 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
92630 {
92631+ int ret = __do_munmap(mm, start, len);
92632+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
92633+ return ret;
92634+
92635+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
92636+}
92637+
92638+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
92639+#else
92640+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
92641+#endif
92642+{
92643 unsigned long end;
92644 struct vm_area_struct *vma, *prev, *last;
92645
92646+ /*
92647+ * mm->mmap_sem is required to protect against another thread
92648+ * changing the mappings in case we sleep.
92649+ */
92650+ verify_mm_writelocked(mm);
92651+
92652 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
92653 return -EINVAL;
92654
92655@@ -2536,6 +3012,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
92656 /* Fix up all other VM information */
92657 remove_vma_list(mm, vma);
92658
92659+ track_exec_limit(mm, start, end, 0UL);
92660+
92661 return 0;
92662 }
92663
92664@@ -2544,6 +3022,13 @@ int vm_munmap(unsigned long start, size_t len)
92665 int ret;
92666 struct mm_struct *mm = current->mm;
92667
92668+
92669+#ifdef CONFIG_PAX_SEGMEXEC
92670+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
92671+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
92672+ return -EINVAL;
92673+#endif
92674+
92675 down_write(&mm->mmap_sem);
92676 ret = do_munmap(mm, start, len);
92677 up_write(&mm->mmap_sem);
92678@@ -2557,16 +3042,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
92679 return vm_munmap(addr, len);
92680 }
92681
92682-static inline void verify_mm_writelocked(struct mm_struct *mm)
92683-{
92684-#ifdef CONFIG_DEBUG_VM
92685- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
92686- WARN_ON(1);
92687- up_read(&mm->mmap_sem);
92688- }
92689-#endif
92690-}
92691-
92692 /*
92693 * this is really a simplified "do_mmap". it only handles
92694 * anonymous maps. eventually we may be able to do some
92695@@ -2580,6 +3055,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
92696 struct rb_node ** rb_link, * rb_parent;
92697 pgoff_t pgoff = addr >> PAGE_SHIFT;
92698 int error;
92699+ unsigned long charged;
92700
92701 len = PAGE_ALIGN(len);
92702 if (!len)
92703@@ -2587,16 +3063,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
92704
92705 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
92706
92707+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
92708+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
92709+ flags &= ~VM_EXEC;
92710+
92711+#ifdef CONFIG_PAX_MPROTECT
92712+ if (mm->pax_flags & MF_PAX_MPROTECT)
92713+ flags &= ~VM_MAYEXEC;
92714+#endif
92715+
92716+ }
92717+#endif
92718+
92719 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
92720 if (error & ~PAGE_MASK)
92721 return error;
92722
92723+ charged = len >> PAGE_SHIFT;
92724+
92725 /*
92726 * mlock MCL_FUTURE?
92727 */
92728 if (mm->def_flags & VM_LOCKED) {
92729 unsigned long locked, lock_limit;
92730- locked = len >> PAGE_SHIFT;
92731+ locked = charged;
92732 locked += mm->locked_vm;
92733 lock_limit = rlimit(RLIMIT_MEMLOCK);
92734 lock_limit >>= PAGE_SHIFT;
92735@@ -2613,21 +3103,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
92736 /*
92737 * Clear old maps. this also does some error checking for us
92738 */
92739- munmap_back:
92740 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
92741 if (do_munmap(mm, addr, len))
92742 return -ENOMEM;
92743- goto munmap_back;
92744+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
92745 }
92746
92747 /* Check against address space limits *after* clearing old maps... */
92748- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
92749+ if (!may_expand_vm(mm, charged))
92750 return -ENOMEM;
92751
92752 if (mm->map_count > sysctl_max_map_count)
92753 return -ENOMEM;
92754
92755- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
92756+ if (security_vm_enough_memory_mm(mm, charged))
92757 return -ENOMEM;
92758
92759 /* Can we just expand an old private anonymous mapping? */
92760@@ -2641,7 +3130,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
92761 */
92762 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
92763 if (!vma) {
92764- vm_unacct_memory(len >> PAGE_SHIFT);
92765+ vm_unacct_memory(charged);
92766 return -ENOMEM;
92767 }
92768
92769@@ -2655,10 +3144,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
92770 vma_link(mm, vma, prev, rb_link, rb_parent);
92771 out:
92772 perf_event_mmap(vma);
92773- mm->total_vm += len >> PAGE_SHIFT;
92774+ mm->total_vm += charged;
92775 if (flags & VM_LOCKED)
92776- mm->locked_vm += (len >> PAGE_SHIFT);
92777+ mm->locked_vm += charged;
92778 vma->vm_flags |= VM_SOFTDIRTY;
92779+ track_exec_limit(mm, addr, addr + len, flags);
92780 return addr;
92781 }
92782
92783@@ -2720,6 +3210,7 @@ void exit_mmap(struct mm_struct *mm)
92784 while (vma) {
92785 if (vma->vm_flags & VM_ACCOUNT)
92786 nr_accounted += vma_pages(vma);
92787+ vma->vm_mirror = NULL;
92788 vma = remove_vma(vma);
92789 }
92790 vm_unacct_memory(nr_accounted);
92791@@ -2737,6 +3228,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
92792 struct vm_area_struct *prev;
92793 struct rb_node **rb_link, *rb_parent;
92794
92795+#ifdef CONFIG_PAX_SEGMEXEC
92796+ struct vm_area_struct *vma_m = NULL;
92797+#endif
92798+
92799+ if (security_mmap_addr(vma->vm_start))
92800+ return -EPERM;
92801+
92802 /*
92803 * The vm_pgoff of a purely anonymous vma should be irrelevant
92804 * until its first write fault, when page's anon_vma and index
92805@@ -2760,7 +3258,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
92806 security_vm_enough_memory_mm(mm, vma_pages(vma)))
92807 return -ENOMEM;
92808
92809+#ifdef CONFIG_PAX_SEGMEXEC
92810+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
92811+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
92812+ if (!vma_m)
92813+ return -ENOMEM;
92814+ }
92815+#endif
92816+
92817 vma_link(mm, vma, prev, rb_link, rb_parent);
92818+
92819+#ifdef CONFIG_PAX_SEGMEXEC
92820+ if (vma_m)
92821+ BUG_ON(pax_mirror_vma(vma_m, vma));
92822+#endif
92823+
92824 return 0;
92825 }
92826
92827@@ -2779,6 +3291,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
92828 struct rb_node **rb_link, *rb_parent;
92829 bool faulted_in_anon_vma = true;
92830
92831+ BUG_ON(vma->vm_mirror);
92832+
92833 /*
92834 * If anonymous vma has not yet been faulted, update new pgoff
92835 * to match new location, to increase its chance of merging.
92836@@ -2843,6 +3357,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
92837 return NULL;
92838 }
92839
92840+#ifdef CONFIG_PAX_SEGMEXEC
92841+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
92842+{
92843+ struct vm_area_struct *prev_m;
92844+ struct rb_node **rb_link_m, *rb_parent_m;
92845+ struct mempolicy *pol_m;
92846+
92847+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
92848+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
92849+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
92850+ *vma_m = *vma;
92851+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
92852+ if (anon_vma_clone(vma_m, vma))
92853+ return -ENOMEM;
92854+ pol_m = vma_policy(vma_m);
92855+ mpol_get(pol_m);
92856+ set_vma_policy(vma_m, pol_m);
92857+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
92858+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
92859+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
92860+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
92861+ if (vma_m->vm_file)
92862+ get_file(vma_m->vm_file);
92863+ if (vma_m->vm_ops && vma_m->vm_ops->open)
92864+ vma_m->vm_ops->open(vma_m);
92865+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
92866+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
92867+ vma_m->vm_mirror = vma;
92868+ vma->vm_mirror = vma_m;
92869+ return 0;
92870+}
92871+#endif
92872+
92873 /*
92874 * Return true if the calling process may expand its vm space by the passed
92875 * number of pages
92876@@ -2854,6 +3401,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
92877
92878 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
92879
92880+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
92881 if (cur + npages > lim)
92882 return 0;
92883 return 1;
92884@@ -2924,6 +3472,22 @@ int install_special_mapping(struct mm_struct *mm,
92885 vma->vm_start = addr;
92886 vma->vm_end = addr + len;
92887
92888+#ifdef CONFIG_PAX_MPROTECT
92889+ if (mm->pax_flags & MF_PAX_MPROTECT) {
92890+#ifndef CONFIG_PAX_MPROTECT_COMPAT
92891+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
92892+ return -EPERM;
92893+ if (!(vm_flags & VM_EXEC))
92894+ vm_flags &= ~VM_MAYEXEC;
92895+#else
92896+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
92897+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
92898+#endif
92899+ else
92900+ vm_flags &= ~VM_MAYWRITE;
92901+ }
92902+#endif
92903+
92904 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
92905 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
92906
92907diff --git a/mm/mprotect.c b/mm/mprotect.c
92908index bb53a65..249c052 100644
92909--- a/mm/mprotect.c
92910+++ b/mm/mprotect.c
92911@@ -23,10 +23,18 @@
92912 #include <linux/mmu_notifier.h>
92913 #include <linux/migrate.h>
92914 #include <linux/perf_event.h>
92915+#include <linux/sched/sysctl.h>
92916+
92917+#ifdef CONFIG_PAX_MPROTECT
92918+#include <linux/elf.h>
92919+#include <linux/binfmts.h>
92920+#endif
92921+
92922 #include <asm/uaccess.h>
92923 #include <asm/pgtable.h>
92924 #include <asm/cacheflush.h>
92925 #include <asm/tlbflush.h>
92926+#include <asm/mmu_context.h>
92927
92928 #ifndef pgprot_modify
92929 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
92930@@ -222,6 +230,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
92931 return pages;
92932 }
92933
92934+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
92935+/* called while holding the mmap semaphor for writing except stack expansion */
92936+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
92937+{
92938+ unsigned long oldlimit, newlimit = 0UL;
92939+
92940+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
92941+ return;
92942+
92943+ spin_lock(&mm->page_table_lock);
92944+ oldlimit = mm->context.user_cs_limit;
92945+ if ((prot & VM_EXEC) && oldlimit < end)
92946+ /* USER_CS limit moved up */
92947+ newlimit = end;
92948+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
92949+ /* USER_CS limit moved down */
92950+ newlimit = start;
92951+
92952+ if (newlimit) {
92953+ mm->context.user_cs_limit = newlimit;
92954+
92955+#ifdef CONFIG_SMP
92956+ wmb();
92957+ cpus_clear(mm->context.cpu_user_cs_mask);
92958+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
92959+#endif
92960+
92961+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
92962+ }
92963+ spin_unlock(&mm->page_table_lock);
92964+ if (newlimit == end) {
92965+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
92966+
92967+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
92968+ if (is_vm_hugetlb_page(vma))
92969+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
92970+ else
92971+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
92972+ }
92973+}
92974+#endif
92975+
92976 int
92977 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
92978 unsigned long start, unsigned long end, unsigned long newflags)
92979@@ -234,11 +284,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
92980 int error;
92981 int dirty_accountable = 0;
92982
92983+#ifdef CONFIG_PAX_SEGMEXEC
92984+ struct vm_area_struct *vma_m = NULL;
92985+ unsigned long start_m, end_m;
92986+
92987+ start_m = start + SEGMEXEC_TASK_SIZE;
92988+ end_m = end + SEGMEXEC_TASK_SIZE;
92989+#endif
92990+
92991 if (newflags == oldflags) {
92992 *pprev = vma;
92993 return 0;
92994 }
92995
92996+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
92997+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
92998+
92999+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
93000+ return -ENOMEM;
93001+
93002+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
93003+ return -ENOMEM;
93004+ }
93005+
93006 /*
93007 * If we make a private mapping writable we increase our commit;
93008 * but (without finer accounting) cannot reduce our commit if we
93009@@ -255,6 +323,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
93010 }
93011 }
93012
93013+#ifdef CONFIG_PAX_SEGMEXEC
93014+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
93015+ if (start != vma->vm_start) {
93016+ error = split_vma(mm, vma, start, 1);
93017+ if (error)
93018+ goto fail;
93019+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
93020+ *pprev = (*pprev)->vm_next;
93021+ }
93022+
93023+ if (end != vma->vm_end) {
93024+ error = split_vma(mm, vma, end, 0);
93025+ if (error)
93026+ goto fail;
93027+ }
93028+
93029+ if (pax_find_mirror_vma(vma)) {
93030+ error = __do_munmap(mm, start_m, end_m - start_m);
93031+ if (error)
93032+ goto fail;
93033+ } else {
93034+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
93035+ if (!vma_m) {
93036+ error = -ENOMEM;
93037+ goto fail;
93038+ }
93039+ vma->vm_flags = newflags;
93040+ error = pax_mirror_vma(vma_m, vma);
93041+ if (error) {
93042+ vma->vm_flags = oldflags;
93043+ goto fail;
93044+ }
93045+ }
93046+ }
93047+#endif
93048+
93049 /*
93050 * First try to merge with previous and/or next vma.
93051 */
93052@@ -285,9 +389,21 @@ success:
93053 * vm_flags and vm_page_prot are protected by the mmap_sem
93054 * held in write mode.
93055 */
93056+
93057+#ifdef CONFIG_PAX_SEGMEXEC
93058+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
93059+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
93060+#endif
93061+
93062 vma->vm_flags = newflags;
93063+
93064+#ifdef CONFIG_PAX_MPROTECT
93065+ if (mm->binfmt && mm->binfmt->handle_mprotect)
93066+ mm->binfmt->handle_mprotect(vma, newflags);
93067+#endif
93068+
93069 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
93070- vm_get_page_prot(newflags));
93071+ vm_get_page_prot(vma->vm_flags));
93072
93073 if (vma_wants_writenotify(vma)) {
93074 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
93075@@ -326,6 +442,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93076 end = start + len;
93077 if (end <= start)
93078 return -ENOMEM;
93079+
93080+#ifdef CONFIG_PAX_SEGMEXEC
93081+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
93082+ if (end > SEGMEXEC_TASK_SIZE)
93083+ return -EINVAL;
93084+ } else
93085+#endif
93086+
93087+ if (end > TASK_SIZE)
93088+ return -EINVAL;
93089+
93090 if (!arch_validate_prot(prot))
93091 return -EINVAL;
93092
93093@@ -333,7 +460,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93094 /*
93095 * Does the application expect PROT_READ to imply PROT_EXEC:
93096 */
93097- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
93098+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
93099 prot |= PROT_EXEC;
93100
93101 vm_flags = calc_vm_prot_bits(prot);
93102@@ -365,6 +492,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93103 if (start > vma->vm_start)
93104 prev = vma;
93105
93106+#ifdef CONFIG_PAX_MPROTECT
93107+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
93108+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
93109+#endif
93110+
93111 for (nstart = start ; ; ) {
93112 unsigned long newflags;
93113
93114@@ -375,6 +507,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93115
93116 /* newflags >> 4 shift VM_MAY% in place of VM_% */
93117 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
93118+ if (prot & (PROT_WRITE | PROT_EXEC))
93119+ gr_log_rwxmprotect(vma);
93120+
93121+ error = -EACCES;
93122+ goto out;
93123+ }
93124+
93125+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
93126 error = -EACCES;
93127 goto out;
93128 }
93129@@ -389,6 +529,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93130 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
93131 if (error)
93132 goto out;
93133+
93134+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
93135+
93136 nstart = tmp;
93137
93138 if (nstart < prev->vm_end)
93139diff --git a/mm/mremap.c b/mm/mremap.c
93140index 0843feb..4f5b2e6 100644
93141--- a/mm/mremap.c
93142+++ b/mm/mremap.c
93143@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
93144 continue;
93145 pte = ptep_get_and_clear(mm, old_addr, old_pte);
93146 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
93147+
93148+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
93149+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
93150+ pte = pte_exprotect(pte);
93151+#endif
93152+
93153 pte = move_soft_dirty_pte(pte);
93154 set_pte_at(mm, new_addr, new_pte, pte);
93155 }
93156@@ -337,6 +343,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
93157 if (is_vm_hugetlb_page(vma))
93158 goto Einval;
93159
93160+#ifdef CONFIG_PAX_SEGMEXEC
93161+ if (pax_find_mirror_vma(vma))
93162+ goto Einval;
93163+#endif
93164+
93165 /* We can't remap across vm area boundaries */
93166 if (old_len > vma->vm_end - addr)
93167 goto Efault;
93168@@ -392,20 +403,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
93169 unsigned long ret = -EINVAL;
93170 unsigned long charged = 0;
93171 unsigned long map_flags;
93172+ unsigned long pax_task_size = TASK_SIZE;
93173
93174 if (new_addr & ~PAGE_MASK)
93175 goto out;
93176
93177- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
93178+#ifdef CONFIG_PAX_SEGMEXEC
93179+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
93180+ pax_task_size = SEGMEXEC_TASK_SIZE;
93181+#endif
93182+
93183+ pax_task_size -= PAGE_SIZE;
93184+
93185+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
93186 goto out;
93187
93188 /* Check if the location we're moving into overlaps the
93189 * old location at all, and fail if it does.
93190 */
93191- if ((new_addr <= addr) && (new_addr+new_len) > addr)
93192- goto out;
93193-
93194- if ((addr <= new_addr) && (addr+old_len) > new_addr)
93195+ if (addr + old_len > new_addr && new_addr + new_len > addr)
93196 goto out;
93197
93198 ret = do_munmap(mm, new_addr, new_len);
93199@@ -474,6 +490,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
93200 unsigned long ret = -EINVAL;
93201 unsigned long charged = 0;
93202 bool locked = false;
93203+ unsigned long pax_task_size = TASK_SIZE;
93204
93205 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
93206 return ret;
93207@@ -495,6 +512,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
93208 if (!new_len)
93209 return ret;
93210
93211+#ifdef CONFIG_PAX_SEGMEXEC
93212+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
93213+ pax_task_size = SEGMEXEC_TASK_SIZE;
93214+#endif
93215+
93216+ pax_task_size -= PAGE_SIZE;
93217+
93218+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
93219+ old_len > pax_task_size || addr > pax_task_size-old_len)
93220+ return ret;
93221+
93222 down_write(&current->mm->mmap_sem);
93223
93224 if (flags & MREMAP_FIXED) {
93225@@ -545,6 +573,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
93226 new_addr = addr;
93227 }
93228 ret = addr;
93229+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
93230 goto out;
93231 }
93232 }
93233@@ -568,7 +597,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
93234 goto out;
93235 }
93236
93237+ map_flags = vma->vm_flags;
93238 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
93239+ if (!(ret & ~PAGE_MASK)) {
93240+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
93241+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
93242+ }
93243 }
93244 out:
93245 if (ret & ~PAGE_MASK)
93246diff --git a/mm/nommu.c b/mm/nommu.c
93247index fec093a..8162f74 100644
93248--- a/mm/nommu.c
93249+++ b/mm/nommu.c
93250@@ -64,7 +64,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
93251 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
93252 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
93253 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
93254-int heap_stack_gap = 0;
93255
93256 atomic_long_t mmap_pages_allocated;
93257
93258@@ -844,15 +843,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
93259 EXPORT_SYMBOL(find_vma);
93260
93261 /*
93262- * find a VMA
93263- * - we don't extend stack VMAs under NOMMU conditions
93264- */
93265-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
93266-{
93267- return find_vma(mm, addr);
93268-}
93269-
93270-/*
93271 * expand a stack to a given address
93272 * - not supported under NOMMU conditions
93273 */
93274@@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
93275
93276 /* most fields are the same, copy all, and then fixup */
93277 *new = *vma;
93278+ INIT_LIST_HEAD(&new->anon_vma_chain);
93279 *region = *vma->vm_region;
93280 new->vm_region = region;
93281
93282@@ -1992,8 +1983,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
93283 }
93284 EXPORT_SYMBOL(generic_file_remap_pages);
93285
93286-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
93287- unsigned long addr, void *buf, int len, int write)
93288+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
93289+ unsigned long addr, void *buf, size_t len, int write)
93290 {
93291 struct vm_area_struct *vma;
93292
93293@@ -2034,8 +2025,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
93294 *
93295 * The caller must hold a reference on @mm.
93296 */
93297-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
93298- void *buf, int len, int write)
93299+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
93300+ void *buf, size_t len, int write)
93301 {
93302 return __access_remote_vm(NULL, mm, addr, buf, len, write);
93303 }
93304@@ -2044,7 +2035,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
93305 * Access another process' address space.
93306 * - source/target buffer must be kernel space
93307 */
93308-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
93309+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
93310 {
93311 struct mm_struct *mm;
93312
93313diff --git a/mm/page-writeback.c b/mm/page-writeback.c
93314index 6380758..4064aec 100644
93315--- a/mm/page-writeback.c
93316+++ b/mm/page-writeback.c
93317@@ -690,7 +690,7 @@ static inline long long pos_ratio_polynom(unsigned long setpoint,
93318 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
93319 * - the bdi dirty thresh drops quickly due to change of JBOD workload
93320 */
93321-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
93322+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
93323 unsigned long thresh,
93324 unsigned long bg_thresh,
93325 unsigned long dirty,
93326diff --git a/mm/page_alloc.c b/mm/page_alloc.c
93327index 5248fe0..0f693aa 100644
93328--- a/mm/page_alloc.c
93329+++ b/mm/page_alloc.c
93330@@ -61,6 +61,7 @@
93331 #include <linux/page-debug-flags.h>
93332 #include <linux/hugetlb.h>
93333 #include <linux/sched/rt.h>
93334+#include <linux/random.h>
93335
93336 #include <asm/sections.h>
93337 #include <asm/tlbflush.h>
93338@@ -354,7 +355,7 @@ out:
93339 * This usage means that zero-order pages may not be compound.
93340 */
93341
93342-static void free_compound_page(struct page *page)
93343+void free_compound_page(struct page *page)
93344 {
93345 __free_pages_ok(page, compound_order(page));
93346 }
93347@@ -712,6 +713,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
93348 int i;
93349 int bad = 0;
93350
93351+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93352+ unsigned long index = 1UL << order;
93353+#endif
93354+
93355 trace_mm_page_free(page, order);
93356 kmemcheck_free_shadow(page, order);
93357
93358@@ -728,6 +733,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
93359 debug_check_no_obj_freed(page_address(page),
93360 PAGE_SIZE << order);
93361 }
93362+
93363+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93364+ for (; index; --index)
93365+ sanitize_highpage(page + index - 1);
93366+#endif
93367+
93368 arch_free_page(page, order);
93369 kernel_map_pages(page, 1 << order, 0);
93370
93371@@ -750,6 +761,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
93372 local_irq_restore(flags);
93373 }
93374
93375+#ifdef CONFIG_PAX_LATENT_ENTROPY
93376+bool __meminitdata extra_latent_entropy;
93377+
93378+static int __init setup_pax_extra_latent_entropy(char *str)
93379+{
93380+ extra_latent_entropy = true;
93381+ return 0;
93382+}
93383+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
93384+
93385+volatile u64 latent_entropy __latent_entropy;
93386+EXPORT_SYMBOL(latent_entropy);
93387+#endif
93388+
93389 void __init __free_pages_bootmem(struct page *page, unsigned int order)
93390 {
93391 unsigned int nr_pages = 1 << order;
93392@@ -765,6 +790,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
93393 __ClearPageReserved(p);
93394 set_page_count(p, 0);
93395
93396+#ifdef CONFIG_PAX_LATENT_ENTROPY
93397+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
93398+ u64 hash = 0;
93399+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
93400+ const u64 *data = lowmem_page_address(page);
93401+
93402+ for (index = 0; index < end; index++)
93403+ hash ^= hash + data[index];
93404+ latent_entropy ^= hash;
93405+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
93406+ }
93407+#endif
93408+
93409 page_zone(page)->managed_pages += nr_pages;
93410 set_page_refcounted(page);
93411 __free_pages(page, order);
93412@@ -870,8 +908,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
93413 arch_alloc_page(page, order);
93414 kernel_map_pages(page, 1 << order, 1);
93415
93416+#ifndef CONFIG_PAX_MEMORY_SANITIZE
93417 if (gfp_flags & __GFP_ZERO)
93418 prep_zero_page(page, order, gfp_flags);
93419+#endif
93420
93421 if (order && (gfp_flags & __GFP_COMP))
93422 prep_compound_page(page, order);
93423diff --git a/mm/page_io.c b/mm/page_io.c
93424index 8c79a47..a689e0d 100644
93425--- a/mm/page_io.c
93426+++ b/mm/page_io.c
93427@@ -260,7 +260,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
93428 struct file *swap_file = sis->swap_file;
93429 struct address_space *mapping = swap_file->f_mapping;
93430 struct iovec iov = {
93431- .iov_base = kmap(page),
93432+ .iov_base = (void __force_user *)kmap(page),
93433 .iov_len = PAGE_SIZE,
93434 };
93435
93436diff --git a/mm/percpu.c b/mm/percpu.c
93437index 0d10def..6dc822d 100644
93438--- a/mm/percpu.c
93439+++ b/mm/percpu.c
93440@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
93441 static unsigned int pcpu_high_unit_cpu __read_mostly;
93442
93443 /* the address of the first chunk which starts with the kernel static area */
93444-void *pcpu_base_addr __read_mostly;
93445+void *pcpu_base_addr __read_only;
93446 EXPORT_SYMBOL_GPL(pcpu_base_addr);
93447
93448 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
93449diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
93450index fd26d04..0cea1b0 100644
93451--- a/mm/process_vm_access.c
93452+++ b/mm/process_vm_access.c
93453@@ -13,6 +13,7 @@
93454 #include <linux/uio.h>
93455 #include <linux/sched.h>
93456 #include <linux/highmem.h>
93457+#include <linux/security.h>
93458 #include <linux/ptrace.h>
93459 #include <linux/slab.h>
93460 #include <linux/syscalls.h>
93461@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
93462 size_t iov_l_curr_offset = 0;
93463 ssize_t iov_len;
93464
93465+ return -ENOSYS; // PaX: until properly audited
93466+
93467 /*
93468 * Work out how many pages of struct pages we're going to need
93469 * when eventually calling get_user_pages
93470 */
93471 for (i = 0; i < riovcnt; i++) {
93472 iov_len = rvec[i].iov_len;
93473- if (iov_len > 0) {
93474- nr_pages_iov = ((unsigned long)rvec[i].iov_base
93475- + iov_len)
93476- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
93477- / PAGE_SIZE + 1;
93478- nr_pages = max(nr_pages, nr_pages_iov);
93479- }
93480+ if (iov_len <= 0)
93481+ continue;
93482+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
93483+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
93484+ nr_pages = max(nr_pages, nr_pages_iov);
93485 }
93486
93487 if (nr_pages == 0)
93488@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
93489 goto free_proc_pages;
93490 }
93491
93492+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
93493+ rc = -EPERM;
93494+ goto put_task_struct;
93495+ }
93496+
93497 mm = mm_access(task, PTRACE_MODE_ATTACH);
93498 if (!mm || IS_ERR(mm)) {
93499 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
93500diff --git a/mm/rmap.c b/mm/rmap.c
93501index 068522d..f539f21 100644
93502--- a/mm/rmap.c
93503+++ b/mm/rmap.c
93504@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
93505 struct anon_vma *anon_vma = vma->anon_vma;
93506 struct anon_vma_chain *avc;
93507
93508+#ifdef CONFIG_PAX_SEGMEXEC
93509+ struct anon_vma_chain *avc_m = NULL;
93510+#endif
93511+
93512 might_sleep();
93513 if (unlikely(!anon_vma)) {
93514 struct mm_struct *mm = vma->vm_mm;
93515@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
93516 if (!avc)
93517 goto out_enomem;
93518
93519+#ifdef CONFIG_PAX_SEGMEXEC
93520+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
93521+ if (!avc_m)
93522+ goto out_enomem_free_avc;
93523+#endif
93524+
93525 anon_vma = find_mergeable_anon_vma(vma);
93526 allocated = NULL;
93527 if (!anon_vma) {
93528@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
93529 /* page_table_lock to protect against threads */
93530 spin_lock(&mm->page_table_lock);
93531 if (likely(!vma->anon_vma)) {
93532+
93533+#ifdef CONFIG_PAX_SEGMEXEC
93534+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
93535+
93536+ if (vma_m) {
93537+ BUG_ON(vma_m->anon_vma);
93538+ vma_m->anon_vma = anon_vma;
93539+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
93540+ avc_m = NULL;
93541+ }
93542+#endif
93543+
93544 vma->anon_vma = anon_vma;
93545 anon_vma_chain_link(vma, avc, anon_vma);
93546 allocated = NULL;
93547@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
93548
93549 if (unlikely(allocated))
93550 put_anon_vma(allocated);
93551+
93552+#ifdef CONFIG_PAX_SEGMEXEC
93553+ if (unlikely(avc_m))
93554+ anon_vma_chain_free(avc_m);
93555+#endif
93556+
93557 if (unlikely(avc))
93558 anon_vma_chain_free(avc);
93559 }
93560 return 0;
93561
93562 out_enomem_free_avc:
93563+
93564+#ifdef CONFIG_PAX_SEGMEXEC
93565+ if (avc_m)
93566+ anon_vma_chain_free(avc_m);
93567+#endif
93568+
93569 anon_vma_chain_free(avc);
93570 out_enomem:
93571 return -ENOMEM;
93572@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
93573 * Attach the anon_vmas from src to dst.
93574 * Returns 0 on success, -ENOMEM on failure.
93575 */
93576-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
93577+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
93578 {
93579 struct anon_vma_chain *avc, *pavc;
93580 struct anon_vma *root = NULL;
93581@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
93582 * the corresponding VMA in the parent process is attached to.
93583 * Returns 0 on success, non-zero on failure.
93584 */
93585-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
93586+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
93587 {
93588 struct anon_vma_chain *avc;
93589 struct anon_vma *anon_vma;
93590@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
93591 void __init anon_vma_init(void)
93592 {
93593 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
93594- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
93595- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
93596+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
93597+ anon_vma_ctor);
93598+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
93599+ SLAB_PANIC|SLAB_NO_SANITIZE);
93600 }
93601
93602 /*
93603diff --git a/mm/shmem.c b/mm/shmem.c
93604index 902a148..58f9d59 100644
93605--- a/mm/shmem.c
93606+++ b/mm/shmem.c
93607@@ -33,7 +33,7 @@
93608 #include <linux/swap.h>
93609 #include <linux/aio.h>
93610
93611-static struct vfsmount *shm_mnt;
93612+struct vfsmount *shm_mnt;
93613
93614 #ifdef CONFIG_SHMEM
93615 /*
93616@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
93617 #define BOGO_DIRENT_SIZE 20
93618
93619 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
93620-#define SHORT_SYMLINK_LEN 128
93621+#define SHORT_SYMLINK_LEN 64
93622
93623 /*
93624 * shmem_fallocate and shmem_writepage communicate via inode->i_private
93625@@ -2232,6 +2232,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
93626 static int shmem_xattr_validate(const char *name)
93627 {
93628 struct { const char *prefix; size_t len; } arr[] = {
93629+
93630+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
93631+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
93632+#endif
93633+
93634 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
93635 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
93636 };
93637@@ -2287,6 +2292,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
93638 if (err)
93639 return err;
93640
93641+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
93642+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
93643+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
93644+ return -EOPNOTSUPP;
93645+ if (size > 8)
93646+ return -EINVAL;
93647+ }
93648+#endif
93649+
93650 return simple_xattr_set(&info->xattrs, name, value, size, flags);
93651 }
93652
93653@@ -2599,8 +2613,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
93654 int err = -ENOMEM;
93655
93656 /* Round up to L1_CACHE_BYTES to resist false sharing */
93657- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
93658- L1_CACHE_BYTES), GFP_KERNEL);
93659+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
93660 if (!sbinfo)
93661 return -ENOMEM;
93662
93663diff --git a/mm/slab.c b/mm/slab.c
93664index eb043bf..d82f5a8 100644
93665--- a/mm/slab.c
93666+++ b/mm/slab.c
93667@@ -300,10 +300,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
93668 if ((x)->max_freeable < i) \
93669 (x)->max_freeable = i; \
93670 } while (0)
93671-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
93672-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
93673-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
93674-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
93675+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
93676+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
93677+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
93678+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
93679+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
93680+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
93681 #else
93682 #define STATS_INC_ACTIVE(x) do { } while (0)
93683 #define STATS_DEC_ACTIVE(x) do { } while (0)
93684@@ -320,6 +322,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
93685 #define STATS_INC_ALLOCMISS(x) do { } while (0)
93686 #define STATS_INC_FREEHIT(x) do { } while (0)
93687 #define STATS_INC_FREEMISS(x) do { } while (0)
93688+#define STATS_INC_SANITIZED(x) do { } while (0)
93689+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
93690 #endif
93691
93692 #if DEBUG
93693@@ -403,7 +407,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
93694 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
93695 */
93696 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
93697- const struct page *page, void *obj)
93698+ const struct page *page, const void *obj)
93699 {
93700 u32 offset = (obj - page->s_mem);
93701 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
93702@@ -1489,12 +1493,12 @@ void __init kmem_cache_init(void)
93703 */
93704
93705 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
93706- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
93707+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
93708
93709 if (INDEX_AC != INDEX_NODE)
93710 kmalloc_caches[INDEX_NODE] =
93711 create_kmalloc_cache("kmalloc-node",
93712- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
93713+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
93714
93715 slab_early_init = 0;
93716
93717@@ -3428,6 +3432,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
93718 struct array_cache *ac = cpu_cache_get(cachep);
93719
93720 check_irq_off();
93721+
93722+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93723+ if (pax_sanitize_slab) {
93724+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
93725+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
93726+
93727+ if (cachep->ctor)
93728+ cachep->ctor(objp);
93729+
93730+ STATS_INC_SANITIZED(cachep);
93731+ } else
93732+ STATS_INC_NOT_SANITIZED(cachep);
93733+ }
93734+#endif
93735+
93736 kmemleak_free_recursive(objp, cachep->flags);
93737 objp = cache_free_debugcheck(cachep, objp, caller);
93738
93739@@ -3656,6 +3675,7 @@ void kfree(const void *objp)
93740
93741 if (unlikely(ZERO_OR_NULL_PTR(objp)))
93742 return;
93743+ VM_BUG_ON(!virt_addr_valid(objp));
93744 local_irq_save(flags);
93745 kfree_debugcheck(objp);
93746 c = virt_to_cache(objp);
93747@@ -4097,14 +4117,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
93748 }
93749 /* cpu stats */
93750 {
93751- unsigned long allochit = atomic_read(&cachep->allochit);
93752- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
93753- unsigned long freehit = atomic_read(&cachep->freehit);
93754- unsigned long freemiss = atomic_read(&cachep->freemiss);
93755+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
93756+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
93757+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
93758+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
93759
93760 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
93761 allochit, allocmiss, freehit, freemiss);
93762 }
93763+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93764+ {
93765+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
93766+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
93767+
93768+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
93769+ }
93770+#endif
93771 #endif
93772 }
93773
93774@@ -4334,13 +4362,69 @@ static const struct file_operations proc_slabstats_operations = {
93775 static int __init slab_proc_init(void)
93776 {
93777 #ifdef CONFIG_DEBUG_SLAB_LEAK
93778- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
93779+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
93780 #endif
93781 return 0;
93782 }
93783 module_init(slab_proc_init);
93784 #endif
93785
93786+bool is_usercopy_object(const void *ptr)
93787+{
93788+ struct page *page;
93789+ struct kmem_cache *cachep;
93790+
93791+ if (ZERO_OR_NULL_PTR(ptr))
93792+ return false;
93793+
93794+ if (!slab_is_available())
93795+ return false;
93796+
93797+ if (!virt_addr_valid(ptr))
93798+ return false;
93799+
93800+ page = virt_to_head_page(ptr);
93801+
93802+ if (!PageSlab(page))
93803+ return false;
93804+
93805+ cachep = page->slab_cache;
93806+ return cachep->flags & SLAB_USERCOPY;
93807+}
93808+
93809+#ifdef CONFIG_PAX_USERCOPY
93810+const char *check_heap_object(const void *ptr, unsigned long n)
93811+{
93812+ struct page *page;
93813+ struct kmem_cache *cachep;
93814+ unsigned int objnr;
93815+ unsigned long offset;
93816+
93817+ if (ZERO_OR_NULL_PTR(ptr))
93818+ return "<null>";
93819+
93820+ if (!virt_addr_valid(ptr))
93821+ return NULL;
93822+
93823+ page = virt_to_head_page(ptr);
93824+
93825+ if (!PageSlab(page))
93826+ return NULL;
93827+
93828+ cachep = page->slab_cache;
93829+ if (!(cachep->flags & SLAB_USERCOPY))
93830+ return cachep->name;
93831+
93832+ objnr = obj_to_index(cachep, page, ptr);
93833+ BUG_ON(objnr >= cachep->num);
93834+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
93835+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
93836+ return NULL;
93837+
93838+ return cachep->name;
93839+}
93840+#endif
93841+
93842 /**
93843 * ksize - get the actual amount of memory allocated for a given object
93844 * @objp: Pointer to the object
93845diff --git a/mm/slab.h b/mm/slab.h
93846index 0859c42..2f7b737 100644
93847--- a/mm/slab.h
93848+++ b/mm/slab.h
93849@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
93850 /* The slab cache that manages slab cache information */
93851 extern struct kmem_cache *kmem_cache;
93852
93853+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93854+#ifdef CONFIG_X86_64
93855+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
93856+#else
93857+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
93858+#endif
93859+extern bool pax_sanitize_slab;
93860+#endif
93861+
93862 unsigned long calculate_alignment(unsigned long flags,
93863 unsigned long align, unsigned long size);
93864
93865@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
93866
93867 /* Legal flag mask for kmem_cache_create(), for various configurations */
93868 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
93869- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
93870+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
93871+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
93872
93873 #if defined(CONFIG_DEBUG_SLAB)
93874 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
93875@@ -233,6 +243,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
93876 return s;
93877
93878 page = virt_to_head_page(x);
93879+
93880+ BUG_ON(!PageSlab(page));
93881+
93882 cachep = page->slab_cache;
93883 if (slab_equal_or_root(cachep, s))
93884 return cachep;
93885diff --git a/mm/slab_common.c b/mm/slab_common.c
93886index 0b7bb39..334c328 100644
93887--- a/mm/slab_common.c
93888+++ b/mm/slab_common.c
93889@@ -23,11 +23,22 @@
93890
93891 #include "slab.h"
93892
93893-enum slab_state slab_state;
93894+enum slab_state slab_state __read_only;
93895 LIST_HEAD(slab_caches);
93896 DEFINE_MUTEX(slab_mutex);
93897 struct kmem_cache *kmem_cache;
93898
93899+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93900+bool pax_sanitize_slab __read_only = true;
93901+static int __init pax_sanitize_slab_setup(char *str)
93902+{
93903+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
93904+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
93905+ return 1;
93906+}
93907+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
93908+#endif
93909+
93910 #ifdef CONFIG_DEBUG_VM
93911 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
93912 size_t size)
93913@@ -212,7 +223,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
93914
93915 err = __kmem_cache_create(s, flags);
93916 if (!err) {
93917- s->refcount = 1;
93918+ atomic_set(&s->refcount, 1);
93919 list_add(&s->list, &slab_caches);
93920 memcg_cache_list_add(memcg, s);
93921 } else {
93922@@ -258,8 +269,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
93923
93924 get_online_cpus();
93925 mutex_lock(&slab_mutex);
93926- s->refcount--;
93927- if (!s->refcount) {
93928+ if (atomic_dec_and_test(&s->refcount)) {
93929 list_del(&s->list);
93930
93931 if (!__kmem_cache_shutdown(s)) {
93932@@ -305,7 +315,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
93933 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
93934 name, size, err);
93935
93936- s->refcount = -1; /* Exempt from merging for now */
93937+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
93938 }
93939
93940 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
93941@@ -318,7 +328,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
93942
93943 create_boot_cache(s, name, size, flags);
93944 list_add(&s->list, &slab_caches);
93945- s->refcount = 1;
93946+ atomic_set(&s->refcount, 1);
93947 return s;
93948 }
93949
93950@@ -330,6 +340,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
93951 EXPORT_SYMBOL(kmalloc_dma_caches);
93952 #endif
93953
93954+#ifdef CONFIG_PAX_USERCOPY_SLABS
93955+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
93956+EXPORT_SYMBOL(kmalloc_usercopy_caches);
93957+#endif
93958+
93959 /*
93960 * Conversion table for small slabs sizes / 8 to the index in the
93961 * kmalloc array. This is necessary for slabs < 192 since we have non power
93962@@ -394,6 +409,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
93963 return kmalloc_dma_caches[index];
93964
93965 #endif
93966+
93967+#ifdef CONFIG_PAX_USERCOPY_SLABS
93968+ if (unlikely((flags & GFP_USERCOPY)))
93969+ return kmalloc_usercopy_caches[index];
93970+
93971+#endif
93972+
93973 return kmalloc_caches[index];
93974 }
93975
93976@@ -450,7 +472,7 @@ void __init create_kmalloc_caches(unsigned long flags)
93977 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
93978 if (!kmalloc_caches[i]) {
93979 kmalloc_caches[i] = create_kmalloc_cache(NULL,
93980- 1 << i, flags);
93981+ 1 << i, SLAB_USERCOPY | flags);
93982 }
93983
93984 /*
93985@@ -459,10 +481,10 @@ void __init create_kmalloc_caches(unsigned long flags)
93986 * earlier power of two caches
93987 */
93988 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
93989- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
93990+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
93991
93992 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
93993- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
93994+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
93995 }
93996
93997 /* Kmalloc array is now usable */
93998@@ -495,6 +517,23 @@ void __init create_kmalloc_caches(unsigned long flags)
93999 }
94000 }
94001 #endif
94002+
94003+#ifdef CONFIG_PAX_USERCOPY_SLABS
94004+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
94005+ struct kmem_cache *s = kmalloc_caches[i];
94006+
94007+ if (s) {
94008+ int size = kmalloc_size(i);
94009+ char *n = kasprintf(GFP_NOWAIT,
94010+ "usercopy-kmalloc-%d", size);
94011+
94012+ BUG_ON(!n);
94013+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
94014+ size, SLAB_USERCOPY | flags);
94015+ }
94016+ }
94017+#endif
94018+
94019 }
94020 #endif /* !CONFIG_SLOB */
94021
94022@@ -535,6 +574,9 @@ void print_slabinfo_header(struct seq_file *m)
94023 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
94024 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
94025 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
94026+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94027+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
94028+#endif
94029 #endif
94030 seq_putc(m, '\n');
94031 }
94032diff --git a/mm/slob.c b/mm/slob.c
94033index 4bf8809..98a6914 100644
94034--- a/mm/slob.c
94035+++ b/mm/slob.c
94036@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
94037 /*
94038 * Return the size of a slob block.
94039 */
94040-static slobidx_t slob_units(slob_t *s)
94041+static slobidx_t slob_units(const slob_t *s)
94042 {
94043 if (s->units > 0)
94044 return s->units;
94045@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
94046 /*
94047 * Return the next free slob block pointer after this one.
94048 */
94049-static slob_t *slob_next(slob_t *s)
94050+static slob_t *slob_next(const slob_t *s)
94051 {
94052 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
94053 slobidx_t next;
94054@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
94055 /*
94056 * Returns true if s is the last free block in its page.
94057 */
94058-static int slob_last(slob_t *s)
94059+static int slob_last(const slob_t *s)
94060 {
94061 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
94062 }
94063
94064-static void *slob_new_pages(gfp_t gfp, int order, int node)
94065+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
94066 {
94067- void *page;
94068+ struct page *page;
94069
94070 #ifdef CONFIG_NUMA
94071 if (node != NUMA_NO_NODE)
94072@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
94073 if (!page)
94074 return NULL;
94075
94076- return page_address(page);
94077+ __SetPageSlab(page);
94078+ return page;
94079 }
94080
94081-static void slob_free_pages(void *b, int order)
94082+static void slob_free_pages(struct page *sp, int order)
94083 {
94084 if (current->reclaim_state)
94085 current->reclaim_state->reclaimed_slab += 1 << order;
94086- free_pages((unsigned long)b, order);
94087+ __ClearPageSlab(sp);
94088+ page_mapcount_reset(sp);
94089+ sp->private = 0;
94090+ __free_pages(sp, order);
94091 }
94092
94093 /*
94094@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
94095
94096 /* Not enough space: must allocate a new page */
94097 if (!b) {
94098- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
94099- if (!b)
94100+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
94101+ if (!sp)
94102 return NULL;
94103- sp = virt_to_page(b);
94104- __SetPageSlab(sp);
94105+ b = page_address(sp);
94106
94107 spin_lock_irqsave(&slob_lock, flags);
94108 sp->units = SLOB_UNITS(PAGE_SIZE);
94109 sp->freelist = b;
94110+ sp->private = 0;
94111 INIT_LIST_HEAD(&sp->list);
94112 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
94113 set_slob_page_free(sp, slob_list);
94114@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
94115 if (slob_page_free(sp))
94116 clear_slob_page_free(sp);
94117 spin_unlock_irqrestore(&slob_lock, flags);
94118- __ClearPageSlab(sp);
94119- page_mapcount_reset(sp);
94120- slob_free_pages(b, 0);
94121+ slob_free_pages(sp, 0);
94122 return;
94123 }
94124
94125+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94126+ if (pax_sanitize_slab)
94127+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
94128+#endif
94129+
94130 if (!slob_page_free(sp)) {
94131 /* This slob page is about to become partially free. Easy! */
94132 sp->units = units;
94133@@ -424,11 +431,10 @@ out:
94134 */
94135
94136 static __always_inline void *
94137-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
94138+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
94139 {
94140- unsigned int *m;
94141- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
94142- void *ret;
94143+ slob_t *m;
94144+ void *ret = NULL;
94145
94146 gfp &= gfp_allowed_mask;
94147
94148@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
94149
94150 if (!m)
94151 return NULL;
94152- *m = size;
94153+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
94154+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
94155+ m[0].units = size;
94156+ m[1].units = align;
94157 ret = (void *)m + align;
94158
94159 trace_kmalloc_node(caller, ret,
94160 size, size + align, gfp, node);
94161 } else {
94162 unsigned int order = get_order(size);
94163+ struct page *page;
94164
94165 if (likely(order))
94166 gfp |= __GFP_COMP;
94167- ret = slob_new_pages(gfp, order, node);
94168+ page = slob_new_pages(gfp, order, node);
94169+ if (page) {
94170+ ret = page_address(page);
94171+ page->private = size;
94172+ }
94173
94174 trace_kmalloc_node(caller, ret,
94175 size, PAGE_SIZE << order, gfp, node);
94176 }
94177
94178- kmemleak_alloc(ret, size, 1, gfp);
94179+ return ret;
94180+}
94181+
94182+static __always_inline void *
94183+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
94184+{
94185+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
94186+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
94187+
94188+ if (!ZERO_OR_NULL_PTR(ret))
94189+ kmemleak_alloc(ret, size, 1, gfp);
94190 return ret;
94191 }
94192
94193@@ -493,34 +517,112 @@ void kfree(const void *block)
94194 return;
94195 kmemleak_free(block);
94196
94197+ VM_BUG_ON(!virt_addr_valid(block));
94198 sp = virt_to_page(block);
94199- if (PageSlab(sp)) {
94200+ VM_BUG_ON(!PageSlab(sp));
94201+ if (!sp->private) {
94202 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
94203- unsigned int *m = (unsigned int *)(block - align);
94204- slob_free(m, *m + align);
94205- } else
94206+ slob_t *m = (slob_t *)(block - align);
94207+ slob_free(m, m[0].units + align);
94208+ } else {
94209+ __ClearPageSlab(sp);
94210+ page_mapcount_reset(sp);
94211+ sp->private = 0;
94212 __free_pages(sp, compound_order(sp));
94213+ }
94214 }
94215 EXPORT_SYMBOL(kfree);
94216
94217+bool is_usercopy_object(const void *ptr)
94218+{
94219+ if (!slab_is_available())
94220+ return false;
94221+
94222+ // PAX: TODO
94223+
94224+ return false;
94225+}
94226+
94227+#ifdef CONFIG_PAX_USERCOPY
94228+const char *check_heap_object(const void *ptr, unsigned long n)
94229+{
94230+ struct page *page;
94231+ const slob_t *free;
94232+ const void *base;
94233+ unsigned long flags;
94234+
94235+ if (ZERO_OR_NULL_PTR(ptr))
94236+ return "<null>";
94237+
94238+ if (!virt_addr_valid(ptr))
94239+ return NULL;
94240+
94241+ page = virt_to_head_page(ptr);
94242+ if (!PageSlab(page))
94243+ return NULL;
94244+
94245+ if (page->private) {
94246+ base = page;
94247+ if (base <= ptr && n <= page->private - (ptr - base))
94248+ return NULL;
94249+ return "<slob>";
94250+ }
94251+
94252+ /* some tricky double walking to find the chunk */
94253+ spin_lock_irqsave(&slob_lock, flags);
94254+ base = (void *)((unsigned long)ptr & PAGE_MASK);
94255+ free = page->freelist;
94256+
94257+ while (!slob_last(free) && (void *)free <= ptr) {
94258+ base = free + slob_units(free);
94259+ free = slob_next(free);
94260+ }
94261+
94262+ while (base < (void *)free) {
94263+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
94264+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
94265+ int offset;
94266+
94267+ if (ptr < base + align)
94268+ break;
94269+
94270+ offset = ptr - base - align;
94271+ if (offset >= m) {
94272+ base += size;
94273+ continue;
94274+ }
94275+
94276+ if (n > m - offset)
94277+ break;
94278+
94279+ spin_unlock_irqrestore(&slob_lock, flags);
94280+ return NULL;
94281+ }
94282+
94283+ spin_unlock_irqrestore(&slob_lock, flags);
94284+ return "<slob>";
94285+}
94286+#endif
94287+
94288 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
94289 size_t ksize(const void *block)
94290 {
94291 struct page *sp;
94292 int align;
94293- unsigned int *m;
94294+ slob_t *m;
94295
94296 BUG_ON(!block);
94297 if (unlikely(block == ZERO_SIZE_PTR))
94298 return 0;
94299
94300 sp = virt_to_page(block);
94301- if (unlikely(!PageSlab(sp)))
94302- return PAGE_SIZE << compound_order(sp);
94303+ VM_BUG_ON(!PageSlab(sp));
94304+ if (sp->private)
94305+ return sp->private;
94306
94307 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
94308- m = (unsigned int *)(block - align);
94309- return SLOB_UNITS(*m) * SLOB_UNIT;
94310+ m = (slob_t *)(block - align);
94311+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
94312 }
94313 EXPORT_SYMBOL(ksize);
94314
94315@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
94316
94317 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
94318 {
94319- void *b;
94320+ void *b = NULL;
94321
94322 flags &= gfp_allowed_mask;
94323
94324 lockdep_trace_alloc(flags);
94325
94326+#ifdef CONFIG_PAX_USERCOPY_SLABS
94327+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
94328+#else
94329 if (c->size < PAGE_SIZE) {
94330 b = slob_alloc(c->size, flags, c->align, node);
94331 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
94332 SLOB_UNITS(c->size) * SLOB_UNIT,
94333 flags, node);
94334 } else {
94335- b = slob_new_pages(flags, get_order(c->size), node);
94336+ struct page *sp;
94337+
94338+ sp = slob_new_pages(flags, get_order(c->size), node);
94339+ if (sp) {
94340+ b = page_address(sp);
94341+ sp->private = c->size;
94342+ }
94343 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
94344 PAGE_SIZE << get_order(c->size),
94345 flags, node);
94346 }
94347+#endif
94348
94349 if (b && c->ctor)
94350 c->ctor(b);
94351@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
94352
94353 static void __kmem_cache_free(void *b, int size)
94354 {
94355- if (size < PAGE_SIZE)
94356+ struct page *sp;
94357+
94358+ sp = virt_to_page(b);
94359+ BUG_ON(!PageSlab(sp));
94360+ if (!sp->private)
94361 slob_free(b, size);
94362 else
94363- slob_free_pages(b, get_order(size));
94364+ slob_free_pages(sp, get_order(size));
94365 }
94366
94367 static void kmem_rcu_free(struct rcu_head *head)
94368@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
94369
94370 void kmem_cache_free(struct kmem_cache *c, void *b)
94371 {
94372+ int size = c->size;
94373+
94374+#ifdef CONFIG_PAX_USERCOPY_SLABS
94375+ if (size + c->align < PAGE_SIZE) {
94376+ size += c->align;
94377+ b -= c->align;
94378+ }
94379+#endif
94380+
94381 kmemleak_free_recursive(b, c->flags);
94382 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
94383 struct slob_rcu *slob_rcu;
94384- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
94385- slob_rcu->size = c->size;
94386+ slob_rcu = b + (size - sizeof(struct slob_rcu));
94387+ slob_rcu->size = size;
94388 call_rcu(&slob_rcu->head, kmem_rcu_free);
94389 } else {
94390- __kmem_cache_free(b, c->size);
94391+ __kmem_cache_free(b, size);
94392 }
94393
94394+#ifdef CONFIG_PAX_USERCOPY_SLABS
94395+ trace_kfree(_RET_IP_, b);
94396+#else
94397 trace_kmem_cache_free(_RET_IP_, b);
94398+#endif
94399+
94400 }
94401 EXPORT_SYMBOL(kmem_cache_free);
94402
94403diff --git a/mm/slub.c b/mm/slub.c
94404index 545a170..a086226 100644
94405--- a/mm/slub.c
94406+++ b/mm/slub.c
94407@@ -207,7 +207,7 @@ struct track {
94408
94409 enum track_item { TRACK_ALLOC, TRACK_FREE };
94410
94411-#ifdef CONFIG_SYSFS
94412+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
94413 static int sysfs_slab_add(struct kmem_cache *);
94414 static int sysfs_slab_alias(struct kmem_cache *, const char *);
94415 static void sysfs_slab_remove(struct kmem_cache *);
94416@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
94417 if (!t->addr)
94418 return;
94419
94420- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
94421+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
94422 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
94423 #ifdef CONFIG_STACKTRACE
94424 {
94425@@ -2643,6 +2643,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
94426
94427 slab_free_hook(s, x);
94428
94429+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94430+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
94431+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
94432+ if (s->ctor)
94433+ s->ctor(x);
94434+ }
94435+#endif
94436+
94437 redo:
94438 /*
94439 * Determine the currently cpus per cpu slab.
94440@@ -2710,7 +2718,7 @@ static int slub_min_objects;
94441 * Merge control. If this is set then no merging of slab caches will occur.
94442 * (Could be removed. This was introduced to pacify the merge skeptics.)
94443 */
94444-static int slub_nomerge;
94445+static int slub_nomerge = 1;
94446
94447 /*
94448 * Calculate the order of allocation given an slab object size.
94449@@ -2987,6 +2995,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
94450 s->inuse = size;
94451
94452 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
94453+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94454+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
94455+#endif
94456 s->ctor)) {
94457 /*
94458 * Relocate free pointer after the object if it is not
94459@@ -3332,6 +3343,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
94460 EXPORT_SYMBOL(__kmalloc_node);
94461 #endif
94462
94463+bool is_usercopy_object(const void *ptr)
94464+{
94465+ struct page *page;
94466+ struct kmem_cache *s;
94467+
94468+ if (ZERO_OR_NULL_PTR(ptr))
94469+ return false;
94470+
94471+ if (!slab_is_available())
94472+ return false;
94473+
94474+ if (!virt_addr_valid(ptr))
94475+ return false;
94476+
94477+ page = virt_to_head_page(ptr);
94478+
94479+ if (!PageSlab(page))
94480+ return false;
94481+
94482+ s = page->slab_cache;
94483+ return s->flags & SLAB_USERCOPY;
94484+}
94485+
94486+#ifdef CONFIG_PAX_USERCOPY
94487+const char *check_heap_object(const void *ptr, unsigned long n)
94488+{
94489+ struct page *page;
94490+ struct kmem_cache *s;
94491+ unsigned long offset;
94492+
94493+ if (ZERO_OR_NULL_PTR(ptr))
94494+ return "<null>";
94495+
94496+ if (!virt_addr_valid(ptr))
94497+ return NULL;
94498+
94499+ page = virt_to_head_page(ptr);
94500+
94501+ if (!PageSlab(page))
94502+ return NULL;
94503+
94504+ s = page->slab_cache;
94505+ if (!(s->flags & SLAB_USERCOPY))
94506+ return s->name;
94507+
94508+ offset = (ptr - page_address(page)) % s->size;
94509+ if (offset <= s->object_size && n <= s->object_size - offset)
94510+ return NULL;
94511+
94512+ return s->name;
94513+}
94514+#endif
94515+
94516 size_t ksize(const void *object)
94517 {
94518 struct page *page;
94519@@ -3360,6 +3424,7 @@ void kfree(const void *x)
94520 if (unlikely(ZERO_OR_NULL_PTR(x)))
94521 return;
94522
94523+ VM_BUG_ON(!virt_addr_valid(x));
94524 page = virt_to_head_page(x);
94525 if (unlikely(!PageSlab(page))) {
94526 BUG_ON(!PageCompound(page));
94527@@ -3665,7 +3730,7 @@ static int slab_unmergeable(struct kmem_cache *s)
94528 /*
94529 * We may have set a slab to be unmergeable during bootstrap.
94530 */
94531- if (s->refcount < 0)
94532+ if (atomic_read(&s->refcount) < 0)
94533 return 1;
94534
94535 return 0;
94536@@ -3723,7 +3788,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
94537
94538 s = find_mergeable(memcg, size, align, flags, name, ctor);
94539 if (s) {
94540- s->refcount++;
94541+ atomic_inc(&s->refcount);
94542 /*
94543 * Adjust the object sizes so that we clear
94544 * the complete object on kzalloc.
94545@@ -3732,7 +3797,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
94546 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
94547
94548 if (sysfs_slab_alias(s, name)) {
94549- s->refcount--;
94550+ atomic_dec(&s->refcount);
94551 s = NULL;
94552 }
94553 }
94554@@ -3852,7 +3917,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
94555 }
94556 #endif
94557
94558-#ifdef CONFIG_SYSFS
94559+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
94560 static int count_inuse(struct page *page)
94561 {
94562 return page->inuse;
94563@@ -4241,12 +4306,12 @@ static void resiliency_test(void)
94564 validate_slab_cache(kmalloc_caches[9]);
94565 }
94566 #else
94567-#ifdef CONFIG_SYSFS
94568+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
94569 static void resiliency_test(void) {};
94570 #endif
94571 #endif
94572
94573-#ifdef CONFIG_SYSFS
94574+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
94575 enum slab_stat_type {
94576 SL_ALL, /* All slabs */
94577 SL_PARTIAL, /* Only partially allocated slabs */
94578@@ -4486,7 +4551,7 @@ SLAB_ATTR_RO(ctor);
94579
94580 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
94581 {
94582- return sprintf(buf, "%d\n", s->refcount - 1);
94583+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
94584 }
94585 SLAB_ATTR_RO(aliases);
94586
94587@@ -4574,6 +4639,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
94588 SLAB_ATTR_RO(cache_dma);
94589 #endif
94590
94591+#ifdef CONFIG_PAX_USERCOPY_SLABS
94592+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
94593+{
94594+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
94595+}
94596+SLAB_ATTR_RO(usercopy);
94597+#endif
94598+
94599 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
94600 {
94601 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
94602@@ -4908,6 +4981,9 @@ static struct attribute *slab_attrs[] = {
94603 #ifdef CONFIG_ZONE_DMA
94604 &cache_dma_attr.attr,
94605 #endif
94606+#ifdef CONFIG_PAX_USERCOPY_SLABS
94607+ &usercopy_attr.attr,
94608+#endif
94609 #ifdef CONFIG_NUMA
94610 &remote_node_defrag_ratio_attr.attr,
94611 #endif
94612@@ -5140,6 +5216,7 @@ static char *create_unique_id(struct kmem_cache *s)
94613 return name;
94614 }
94615
94616+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
94617 static int sysfs_slab_add(struct kmem_cache *s)
94618 {
94619 int err;
94620@@ -5163,7 +5240,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
94621 }
94622
94623 s->kobj.kset = slab_kset;
94624- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
94625+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
94626 if (err) {
94627 kobject_put(&s->kobj);
94628 return err;
94629@@ -5197,6 +5274,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
94630 kobject_del(&s->kobj);
94631 kobject_put(&s->kobj);
94632 }
94633+#endif
94634
94635 /*
94636 * Need to buffer aliases during bootup until sysfs becomes
94637@@ -5210,6 +5288,7 @@ struct saved_alias {
94638
94639 static struct saved_alias *alias_list;
94640
94641+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
94642 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
94643 {
94644 struct saved_alias *al;
94645@@ -5232,6 +5311,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
94646 alias_list = al;
94647 return 0;
94648 }
94649+#endif
94650
94651 static int __init slab_sysfs_init(void)
94652 {
94653diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
94654index 27eeab3..7c3f7f2 100644
94655--- a/mm/sparse-vmemmap.c
94656+++ b/mm/sparse-vmemmap.c
94657@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
94658 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
94659 if (!p)
94660 return NULL;
94661- pud_populate(&init_mm, pud, p);
94662+ pud_populate_kernel(&init_mm, pud, p);
94663 }
94664 return pud;
94665 }
94666@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
94667 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
94668 if (!p)
94669 return NULL;
94670- pgd_populate(&init_mm, pgd, p);
94671+ pgd_populate_kernel(&init_mm, pgd, p);
94672 }
94673 return pgd;
94674 }
94675diff --git a/mm/sparse.c b/mm/sparse.c
94676index 8cc7be0..d0f7d7a 100644
94677--- a/mm/sparse.c
94678+++ b/mm/sparse.c
94679@@ -745,7 +745,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
94680
94681 for (i = 0; i < PAGES_PER_SECTION; i++) {
94682 if (PageHWPoison(&memmap[i])) {
94683- atomic_long_sub(1, &num_poisoned_pages);
94684+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
94685 ClearPageHWPoison(&memmap[i]);
94686 }
94687 }
94688diff --git a/mm/swap.c b/mm/swap.c
94689index 84b26aa..ce39899 100644
94690--- a/mm/swap.c
94691+++ b/mm/swap.c
94692@@ -77,6 +77,8 @@ static void __put_compound_page(struct page *page)
94693
94694 __page_cache_release(page);
94695 dtor = get_compound_page_dtor(page);
94696+ if (!PageHuge(page))
94697+ BUG_ON(dtor != free_compound_page);
94698 (*dtor)(page);
94699 }
94700
94701diff --git a/mm/swapfile.c b/mm/swapfile.c
94702index 612a7c9..66b0f5a 100644
94703--- a/mm/swapfile.c
94704+++ b/mm/swapfile.c
94705@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
94706
94707 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
94708 /* Activity counter to indicate that a swapon or swapoff has occurred */
94709-static atomic_t proc_poll_event = ATOMIC_INIT(0);
94710+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
94711
94712 static inline unsigned char swap_count(unsigned char ent)
94713 {
94714@@ -1949,7 +1949,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
94715 }
94716 filp_close(swap_file, NULL);
94717 err = 0;
94718- atomic_inc(&proc_poll_event);
94719+ atomic_inc_unchecked(&proc_poll_event);
94720 wake_up_interruptible(&proc_poll_wait);
94721
94722 out_dput:
94723@@ -1966,8 +1966,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
94724
94725 poll_wait(file, &proc_poll_wait, wait);
94726
94727- if (seq->poll_event != atomic_read(&proc_poll_event)) {
94728- seq->poll_event = atomic_read(&proc_poll_event);
94729+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
94730+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
94731 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
94732 }
94733
94734@@ -2065,7 +2065,7 @@ static int swaps_open(struct inode *inode, struct file *file)
94735 return ret;
94736
94737 seq = file->private_data;
94738- seq->poll_event = atomic_read(&proc_poll_event);
94739+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
94740 return 0;
94741 }
94742
94743@@ -2524,7 +2524,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
94744 (frontswap_map) ? "FS" : "");
94745
94746 mutex_unlock(&swapon_mutex);
94747- atomic_inc(&proc_poll_event);
94748+ atomic_inc_unchecked(&proc_poll_event);
94749 wake_up_interruptible(&proc_poll_wait);
94750
94751 if (S_ISREG(inode->i_mode))
94752diff --git a/mm/util.c b/mm/util.c
94753index 808f375..e4764b5 100644
94754--- a/mm/util.c
94755+++ b/mm/util.c
94756@@ -297,6 +297,12 @@ done:
94757 void arch_pick_mmap_layout(struct mm_struct *mm)
94758 {
94759 mm->mmap_base = TASK_UNMAPPED_BASE;
94760+
94761+#ifdef CONFIG_PAX_RANDMMAP
94762+ if (mm->pax_flags & MF_PAX_RANDMMAP)
94763+ mm->mmap_base += mm->delta_mmap;
94764+#endif
94765+
94766 mm->get_unmapped_area = arch_get_unmapped_area;
94767 }
94768 #endif
94769diff --git a/mm/vmalloc.c b/mm/vmalloc.c
94770index 0fdf968..d6686e8 100644
94771--- a/mm/vmalloc.c
94772+++ b/mm/vmalloc.c
94773@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
94774
94775 pte = pte_offset_kernel(pmd, addr);
94776 do {
94777- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
94778- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
94779+
94780+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
94781+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
94782+ BUG_ON(!pte_exec(*pte));
94783+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
94784+ continue;
94785+ }
94786+#endif
94787+
94788+ {
94789+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
94790+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
94791+ }
94792 } while (pte++, addr += PAGE_SIZE, addr != end);
94793 }
94794
94795@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
94796 pte = pte_alloc_kernel(pmd, addr);
94797 if (!pte)
94798 return -ENOMEM;
94799+
94800+ pax_open_kernel();
94801 do {
94802 struct page *page = pages[*nr];
94803
94804- if (WARN_ON(!pte_none(*pte)))
94805+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
94806+ if (pgprot_val(prot) & _PAGE_NX)
94807+#endif
94808+
94809+ if (!pte_none(*pte)) {
94810+ pax_close_kernel();
94811+ WARN_ON(1);
94812 return -EBUSY;
94813- if (WARN_ON(!page))
94814+ }
94815+ if (!page) {
94816+ pax_close_kernel();
94817+ WARN_ON(1);
94818 return -ENOMEM;
94819+ }
94820 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
94821 (*nr)++;
94822 } while (pte++, addr += PAGE_SIZE, addr != end);
94823+ pax_close_kernel();
94824 return 0;
94825 }
94826
94827@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
94828 pmd_t *pmd;
94829 unsigned long next;
94830
94831- pmd = pmd_alloc(&init_mm, pud, addr);
94832+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
94833 if (!pmd)
94834 return -ENOMEM;
94835 do {
94836@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
94837 pud_t *pud;
94838 unsigned long next;
94839
94840- pud = pud_alloc(&init_mm, pgd, addr);
94841+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
94842 if (!pud)
94843 return -ENOMEM;
94844 do {
94845@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
94846 if (addr >= MODULES_VADDR && addr < MODULES_END)
94847 return 1;
94848 #endif
94849+
94850+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
94851+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
94852+ return 1;
94853+#endif
94854+
94855 return is_vmalloc_addr(x);
94856 }
94857
94858@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
94859
94860 if (!pgd_none(*pgd)) {
94861 pud_t *pud = pud_offset(pgd, addr);
94862+#ifdef CONFIG_X86
94863+ if (!pud_large(*pud))
94864+#endif
94865 if (!pud_none(*pud)) {
94866 pmd_t *pmd = pmd_offset(pud, addr);
94867+#ifdef CONFIG_X86
94868+ if (!pmd_large(*pmd))
94869+#endif
94870 if (!pmd_none(*pmd)) {
94871 pte_t *ptep, pte;
94872
94873@@ -1309,6 +1345,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
94874 struct vm_struct *area;
94875
94876 BUG_ON(in_interrupt());
94877+
94878+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
94879+ if (flags & VM_KERNEXEC) {
94880+ if (start != VMALLOC_START || end != VMALLOC_END)
94881+ return NULL;
94882+ start = (unsigned long)MODULES_EXEC_VADDR;
94883+ end = (unsigned long)MODULES_EXEC_END;
94884+ }
94885+#endif
94886+
94887 if (flags & VM_IOREMAP)
94888 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
94889
94890@@ -1534,6 +1580,11 @@ void *vmap(struct page **pages, unsigned int count,
94891 if (count > totalram_pages)
94892 return NULL;
94893
94894+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
94895+ if (!(pgprot_val(prot) & _PAGE_NX))
94896+ flags |= VM_KERNEXEC;
94897+#endif
94898+
94899 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
94900 __builtin_return_address(0));
94901 if (!area)
94902@@ -1634,6 +1685,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
94903 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
94904 goto fail;
94905
94906+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
94907+ if (!(pgprot_val(prot) & _PAGE_NX))
94908+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
94909+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
94910+ else
94911+#endif
94912+
94913 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
94914 start, end, node, gfp_mask, caller);
94915 if (!area)
94916@@ -1810,10 +1868,9 @@ EXPORT_SYMBOL(vzalloc_node);
94917 * For tight control over page level allocator and protection flags
94918 * use __vmalloc() instead.
94919 */
94920-
94921 void *vmalloc_exec(unsigned long size)
94922 {
94923- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
94924+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
94925 NUMA_NO_NODE, __builtin_return_address(0));
94926 }
94927
94928@@ -2120,6 +2177,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
94929 {
94930 struct vm_struct *area;
94931
94932+ BUG_ON(vma->vm_mirror);
94933+
94934 size = PAGE_ALIGN(size);
94935
94936 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
94937@@ -2602,7 +2661,11 @@ static int s_show(struct seq_file *m, void *p)
94938 v->addr, v->addr + v->size, v->size);
94939
94940 if (v->caller)
94941+#ifdef CONFIG_GRKERNSEC_HIDESYM
94942+ seq_printf(m, " %pK", v->caller);
94943+#else
94944 seq_printf(m, " %pS", v->caller);
94945+#endif
94946
94947 if (v->nr_pages)
94948 seq_printf(m, " pages=%d", v->nr_pages);
94949diff --git a/mm/vmstat.c b/mm/vmstat.c
94950index 7249614..2639fc7 100644
94951--- a/mm/vmstat.c
94952+++ b/mm/vmstat.c
94953@@ -20,6 +20,7 @@
94954 #include <linux/writeback.h>
94955 #include <linux/compaction.h>
94956 #include <linux/mm_inline.h>
94957+#include <linux/grsecurity.h>
94958
94959 #include "internal.h"
94960
94961@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
94962 *
94963 * vm_stat contains the global counters
94964 */
94965-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
94966+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
94967 EXPORT_SYMBOL(vm_stat);
94968
94969 #ifdef CONFIG_SMP
94970@@ -423,7 +424,7 @@ static inline void fold_diff(int *diff)
94971
94972 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
94973 if (diff[i])
94974- atomic_long_add(diff[i], &vm_stat[i]);
94975+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
94976 }
94977
94978 /*
94979@@ -455,7 +456,7 @@ static void refresh_cpu_vm_stats(void)
94980 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
94981 if (v) {
94982
94983- atomic_long_add(v, &zone->vm_stat[i]);
94984+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
94985 global_diff[i] += v;
94986 #ifdef CONFIG_NUMA
94987 /* 3 seconds idle till flush */
94988@@ -517,7 +518,7 @@ void cpu_vm_stats_fold(int cpu)
94989
94990 v = p->vm_stat_diff[i];
94991 p->vm_stat_diff[i] = 0;
94992- atomic_long_add(v, &zone->vm_stat[i]);
94993+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
94994 global_diff[i] += v;
94995 }
94996 }
94997@@ -537,8 +538,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
94998 if (pset->vm_stat_diff[i]) {
94999 int v = pset->vm_stat_diff[i];
95000 pset->vm_stat_diff[i] = 0;
95001- atomic_long_add(v, &zone->vm_stat[i]);
95002- atomic_long_add(v, &vm_stat[i]);
95003+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
95004+ atomic_long_add_unchecked(v, &vm_stat[i]);
95005 }
95006 }
95007 #endif
95008@@ -1148,10 +1149,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
95009 stat_items_size += sizeof(struct vm_event_state);
95010 #endif
95011
95012- v = kmalloc(stat_items_size, GFP_KERNEL);
95013+ v = kzalloc(stat_items_size, GFP_KERNEL);
95014 m->private = v;
95015 if (!v)
95016 return ERR_PTR(-ENOMEM);
95017+
95018+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95019+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
95020+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
95021+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
95022+ && !in_group_p(grsec_proc_gid)
95023+#endif
95024+ )
95025+ return (unsigned long *)m->private + *pos;
95026+#endif
95027+#endif
95028+
95029 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
95030 v[i] = global_page_state(i);
95031 v += NR_VM_ZONE_STAT_ITEMS;
95032@@ -1300,10 +1313,16 @@ static int __init setup_vmstat(void)
95033 put_online_cpus();
95034 #endif
95035 #ifdef CONFIG_PROC_FS
95036- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
95037- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
95038- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
95039- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
95040+ {
95041+ mode_t gr_mode = S_IRUGO;
95042+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95043+ gr_mode = S_IRUSR;
95044+#endif
95045+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
95046+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
95047+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
95048+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
95049+ }
95050 #endif
95051 return 0;
95052 }
95053diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
95054index b3d17d1..e8e4cdd 100644
95055--- a/net/8021q/vlan.c
95056+++ b/net/8021q/vlan.c
95057@@ -472,7 +472,7 @@ out:
95058 return NOTIFY_DONE;
95059 }
95060
95061-static struct notifier_block vlan_notifier_block __read_mostly = {
95062+static struct notifier_block vlan_notifier_block = {
95063 .notifier_call = vlan_device_event,
95064 };
95065
95066@@ -547,8 +547,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
95067 err = -EPERM;
95068 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
95069 break;
95070- if ((args.u.name_type >= 0) &&
95071- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
95072+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
95073 struct vlan_net *vn;
95074
95075 vn = net_generic(net, vlan_net_id);
95076diff --git a/net/9p/client.c b/net/9p/client.c
95077index ee8fd6b..0469d50 100644
95078--- a/net/9p/client.c
95079+++ b/net/9p/client.c
95080@@ -588,7 +588,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
95081 len - inline_len);
95082 } else {
95083 err = copy_from_user(ename + inline_len,
95084- uidata, len - inline_len);
95085+ (char __force_user *)uidata, len - inline_len);
95086 if (err) {
95087 err = -EFAULT;
95088 goto out_err;
95089@@ -1563,7 +1563,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
95090 kernel_buf = 1;
95091 indata = data;
95092 } else
95093- indata = (__force char *)udata;
95094+ indata = (__force_kernel char *)udata;
95095 /*
95096 * response header len is 11
95097 * PDU Header(7) + IO Size (4)
95098@@ -1638,7 +1638,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
95099 kernel_buf = 1;
95100 odata = data;
95101 } else
95102- odata = (char *)udata;
95103+ odata = (char __force_kernel *)udata;
95104 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
95105 P9_ZC_HDR_SZ, kernel_buf, "dqd",
95106 fid->fid, offset, rsize);
95107diff --git a/net/9p/mod.c b/net/9p/mod.c
95108index 6ab36ae..6f1841b 100644
95109--- a/net/9p/mod.c
95110+++ b/net/9p/mod.c
95111@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
95112 void v9fs_register_trans(struct p9_trans_module *m)
95113 {
95114 spin_lock(&v9fs_trans_lock);
95115- list_add_tail(&m->list, &v9fs_trans_list);
95116+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
95117 spin_unlock(&v9fs_trans_lock);
95118 }
95119 EXPORT_SYMBOL(v9fs_register_trans);
95120@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
95121 void v9fs_unregister_trans(struct p9_trans_module *m)
95122 {
95123 spin_lock(&v9fs_trans_lock);
95124- list_del_init(&m->list);
95125+ pax_list_del_init((struct list_head *)&m->list);
95126 spin_unlock(&v9fs_trans_lock);
95127 }
95128 EXPORT_SYMBOL(v9fs_unregister_trans);
95129diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
95130index 9321a77..ed2f256 100644
95131--- a/net/9p/trans_fd.c
95132+++ b/net/9p/trans_fd.c
95133@@ -432,7 +432,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
95134 oldfs = get_fs();
95135 set_fs(get_ds());
95136 /* The cast to a user pointer is valid due to the set_fs() */
95137- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
95138+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
95139 set_fs(oldfs);
95140
95141 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
95142diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
95143index 9c5a1aa..3c6c637 100644
95144--- a/net/9p/trans_virtio.c
95145+++ b/net/9p/trans_virtio.c
95146@@ -340,7 +340,10 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
95147 int count = nr_pages;
95148 while (nr_pages) {
95149 s = rest_of_page(data);
95150- pages[index++] = kmap_to_page(data);
95151+ if (is_vmalloc_addr(data))
95152+ pages[index++] = vmalloc_to_page(data);
95153+ else
95154+ pages[index++] = kmap_to_page(data);
95155 data += s;
95156 nr_pages--;
95157 }
95158diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
95159index 876fbe8..8bbea9f 100644
95160--- a/net/atm/atm_misc.c
95161+++ b/net/atm/atm_misc.c
95162@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
95163 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
95164 return 1;
95165 atm_return(vcc, truesize);
95166- atomic_inc(&vcc->stats->rx_drop);
95167+ atomic_inc_unchecked(&vcc->stats->rx_drop);
95168 return 0;
95169 }
95170 EXPORT_SYMBOL(atm_charge);
95171@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
95172 }
95173 }
95174 atm_return(vcc, guess);
95175- atomic_inc(&vcc->stats->rx_drop);
95176+ atomic_inc_unchecked(&vcc->stats->rx_drop);
95177 return NULL;
95178 }
95179 EXPORT_SYMBOL(atm_alloc_charge);
95180@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
95181
95182 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
95183 {
95184-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
95185+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
95186 __SONET_ITEMS
95187 #undef __HANDLE_ITEM
95188 }
95189@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
95190
95191 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
95192 {
95193-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
95194+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
95195 __SONET_ITEMS
95196 #undef __HANDLE_ITEM
95197 }
95198diff --git a/net/atm/lec.c b/net/atm/lec.c
95199index f23916b..dd4d26b 100644
95200--- a/net/atm/lec.c
95201+++ b/net/atm/lec.c
95202@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
95203 }
95204
95205 static struct lane2_ops lane2_ops = {
95206- lane2_resolve, /* resolve, spec 3.1.3 */
95207- lane2_associate_req, /* associate_req, spec 3.1.4 */
95208- NULL /* associate indicator, spec 3.1.5 */
95209+ .resolve = lane2_resolve,
95210+ .associate_req = lane2_associate_req,
95211+ .associate_indicator = NULL
95212 };
95213
95214 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
95215diff --git a/net/atm/lec.h b/net/atm/lec.h
95216index 4149db1..f2ab682 100644
95217--- a/net/atm/lec.h
95218+++ b/net/atm/lec.h
95219@@ -48,7 +48,7 @@ struct lane2_ops {
95220 const u8 *tlvs, u32 sizeoftlvs);
95221 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
95222 const u8 *tlvs, u32 sizeoftlvs);
95223-};
95224+} __no_const;
95225
95226 /*
95227 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
95228diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
95229index d1b2d9a..d549f7f 100644
95230--- a/net/atm/mpoa_caches.c
95231+++ b/net/atm/mpoa_caches.c
95232@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
95233
95234
95235 static struct in_cache_ops ingress_ops = {
95236- in_cache_add_entry, /* add_entry */
95237- in_cache_get, /* get */
95238- in_cache_get_with_mask, /* get_with_mask */
95239- in_cache_get_by_vcc, /* get_by_vcc */
95240- in_cache_put, /* put */
95241- in_cache_remove_entry, /* remove_entry */
95242- cache_hit, /* cache_hit */
95243- clear_count_and_expired, /* clear_count */
95244- check_resolving_entries, /* check_resolving */
95245- refresh_entries, /* refresh */
95246- in_destroy_cache /* destroy_cache */
95247+ .add_entry = in_cache_add_entry,
95248+ .get = in_cache_get,
95249+ .get_with_mask = in_cache_get_with_mask,
95250+ .get_by_vcc = in_cache_get_by_vcc,
95251+ .put = in_cache_put,
95252+ .remove_entry = in_cache_remove_entry,
95253+ .cache_hit = cache_hit,
95254+ .clear_count = clear_count_and_expired,
95255+ .check_resolving = check_resolving_entries,
95256+ .refresh = refresh_entries,
95257+ .destroy_cache = in_destroy_cache
95258 };
95259
95260 static struct eg_cache_ops egress_ops = {
95261- eg_cache_add_entry, /* add_entry */
95262- eg_cache_get_by_cache_id, /* get_by_cache_id */
95263- eg_cache_get_by_tag, /* get_by_tag */
95264- eg_cache_get_by_vcc, /* get_by_vcc */
95265- eg_cache_get_by_src_ip, /* get_by_src_ip */
95266- eg_cache_put, /* put */
95267- eg_cache_remove_entry, /* remove_entry */
95268- update_eg_cache_entry, /* update */
95269- clear_expired, /* clear_expired */
95270- eg_destroy_cache /* destroy_cache */
95271+ .add_entry = eg_cache_add_entry,
95272+ .get_by_cache_id = eg_cache_get_by_cache_id,
95273+ .get_by_tag = eg_cache_get_by_tag,
95274+ .get_by_vcc = eg_cache_get_by_vcc,
95275+ .get_by_src_ip = eg_cache_get_by_src_ip,
95276+ .put = eg_cache_put,
95277+ .remove_entry = eg_cache_remove_entry,
95278+ .update = update_eg_cache_entry,
95279+ .clear_expired = clear_expired,
95280+ .destroy_cache = eg_destroy_cache
95281 };
95282
95283
95284diff --git a/net/atm/proc.c b/net/atm/proc.c
95285index bbb6461..cf04016 100644
95286--- a/net/atm/proc.c
95287+++ b/net/atm/proc.c
95288@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
95289 const struct k_atm_aal_stats *stats)
95290 {
95291 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
95292- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
95293- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
95294- atomic_read(&stats->rx_drop));
95295+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
95296+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
95297+ atomic_read_unchecked(&stats->rx_drop));
95298 }
95299
95300 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
95301diff --git a/net/atm/resources.c b/net/atm/resources.c
95302index 0447d5d..3cf4728 100644
95303--- a/net/atm/resources.c
95304+++ b/net/atm/resources.c
95305@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
95306 static void copy_aal_stats(struct k_atm_aal_stats *from,
95307 struct atm_aal_stats *to)
95308 {
95309-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
95310+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
95311 __AAL_STAT_ITEMS
95312 #undef __HANDLE_ITEM
95313 }
95314@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
95315 static void subtract_aal_stats(struct k_atm_aal_stats *from,
95316 struct atm_aal_stats *to)
95317 {
95318-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
95319+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
95320 __AAL_STAT_ITEMS
95321 #undef __HANDLE_ITEM
95322 }
95323diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
95324index 919a5ce..cc6b444 100644
95325--- a/net/ax25/sysctl_net_ax25.c
95326+++ b/net/ax25/sysctl_net_ax25.c
95327@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
95328 {
95329 char path[sizeof("net/ax25/") + IFNAMSIZ];
95330 int k;
95331- struct ctl_table *table;
95332+ ctl_table_no_const *table;
95333
95334 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
95335 if (!table)
95336diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
95337index b9c8a6e..ed0f711 100644
95338--- a/net/batman-adv/bat_iv_ogm.c
95339+++ b/net/batman-adv/bat_iv_ogm.c
95340@@ -297,7 +297,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
95341
95342 /* randomize initial seqno to avoid collision */
95343 get_random_bytes(&random_seqno, sizeof(random_seqno));
95344- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
95345+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
95346
95347 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
95348 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
95349@@ -884,9 +884,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
95350 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
95351
95352 /* change sequence number to network order */
95353- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
95354+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
95355 batadv_ogm_packet->seqno = htonl(seqno);
95356- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
95357+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
95358
95359 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
95360 batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff,
95361@@ -1251,7 +1251,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
95362 return;
95363
95364 /* could be changed by schedule_own_packet() */
95365- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
95366+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
95367
95368 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
95369 has_directlink_flag = 1;
95370diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
95371index 6ddb614..ca7e886 100644
95372--- a/net/batman-adv/fragmentation.c
95373+++ b/net/batman-adv/fragmentation.c
95374@@ -447,7 +447,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
95375 frag_header.packet_type = BATADV_UNICAST_FRAG;
95376 frag_header.version = BATADV_COMPAT_VERSION;
95377 frag_header.ttl = BATADV_TTL;
95378- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
95379+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
95380 frag_header.reserved = 0;
95381 frag_header.no = 0;
95382 frag_header.total_size = htons(skb->len);
95383diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
95384index a8f99d1..11797ef 100644
95385--- a/net/batman-adv/soft-interface.c
95386+++ b/net/batman-adv/soft-interface.c
95387@@ -278,7 +278,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
95388 primary_if->net_dev->dev_addr, ETH_ALEN);
95389
95390 /* set broadcast sequence number */
95391- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
95392+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
95393 bcast_packet->seqno = htonl(seqno);
95394
95395 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
95396@@ -688,7 +688,7 @@ static int batadv_softif_init_late(struct net_device *dev)
95397 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
95398
95399 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
95400- atomic_set(&bat_priv->bcast_seqno, 1);
95401+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
95402 atomic_set(&bat_priv->tt.vn, 0);
95403 atomic_set(&bat_priv->tt.local_changes, 0);
95404 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
95405@@ -700,7 +700,7 @@ static int batadv_softif_init_late(struct net_device *dev)
95406
95407 /* randomize initial seqno to avoid collision */
95408 get_random_bytes(&random_seqno, sizeof(random_seqno));
95409- atomic_set(&bat_priv->frag_seqno, random_seqno);
95410+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
95411
95412 bat_priv->primary_if = NULL;
95413 bat_priv->num_ifaces = 0;
95414diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
95415index 91dd369..9c25750 100644
95416--- a/net/batman-adv/types.h
95417+++ b/net/batman-adv/types.h
95418@@ -56,7 +56,7 @@
95419 struct batadv_hard_iface_bat_iv {
95420 unsigned char *ogm_buff;
95421 int ogm_buff_len;
95422- atomic_t ogm_seqno;
95423+ atomic_unchecked_t ogm_seqno;
95424 };
95425
95426 /**
95427@@ -673,7 +673,7 @@ struct batadv_priv {
95428 atomic_t bonding;
95429 atomic_t fragmentation;
95430 atomic_t packet_size_max;
95431- atomic_t frag_seqno;
95432+ atomic_unchecked_t frag_seqno;
95433 #ifdef CONFIG_BATMAN_ADV_BLA
95434 atomic_t bridge_loop_avoidance;
95435 #endif
95436@@ -687,7 +687,7 @@ struct batadv_priv {
95437 #ifdef CONFIG_BATMAN_ADV_DEBUG
95438 atomic_t log_level;
95439 #endif
95440- atomic_t bcast_seqno;
95441+ atomic_unchecked_t bcast_seqno;
95442 atomic_t bcast_queue_left;
95443 atomic_t batman_queue_left;
95444 char num_ifaces;
95445diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
95446index 7552f9e..074ce29 100644
95447--- a/net/bluetooth/hci_sock.c
95448+++ b/net/bluetooth/hci_sock.c
95449@@ -1052,7 +1052,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
95450 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
95451 }
95452
95453- len = min_t(unsigned int, len, sizeof(uf));
95454+ len = min((size_t)len, sizeof(uf));
95455 if (copy_from_user(&uf, optval, len)) {
95456 err = -EFAULT;
95457 break;
95458diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
95459index 4af3821..f2ba46c 100644
95460--- a/net/bluetooth/l2cap_core.c
95461+++ b/net/bluetooth/l2cap_core.c
95462@@ -3500,8 +3500,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
95463 break;
95464
95465 case L2CAP_CONF_RFC:
95466- if (olen == sizeof(rfc))
95467- memcpy(&rfc, (void *)val, olen);
95468+ if (olen != sizeof(rfc))
95469+ break;
95470+
95471+ memcpy(&rfc, (void *)val, olen);
95472
95473 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
95474 rfc.mode != chan->mode)
95475diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
95476index 7cc24d2..e83f531 100644
95477--- a/net/bluetooth/l2cap_sock.c
95478+++ b/net/bluetooth/l2cap_sock.c
95479@@ -545,7 +545,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
95480 struct sock *sk = sock->sk;
95481 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
95482 struct l2cap_options opts;
95483- int len, err = 0;
95484+ int err = 0;
95485+ size_t len = optlen;
95486 u32 opt;
95487
95488 BT_DBG("sk %p", sk);
95489@@ -567,7 +568,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
95490 opts.max_tx = chan->max_tx;
95491 opts.txwin_size = chan->tx_win;
95492
95493- len = min_t(unsigned int, sizeof(opts), optlen);
95494+ len = min(sizeof(opts), len);
95495 if (copy_from_user((char *) &opts, optval, len)) {
95496 err = -EFAULT;
95497 break;
95498@@ -647,7 +648,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
95499 struct bt_security sec;
95500 struct bt_power pwr;
95501 struct l2cap_conn *conn;
95502- int len, err = 0;
95503+ int err = 0;
95504+ size_t len = optlen;
95505 u32 opt;
95506
95507 BT_DBG("sk %p", sk);
95508@@ -670,7 +672,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
95509
95510 sec.level = BT_SECURITY_LOW;
95511
95512- len = min_t(unsigned int, sizeof(sec), optlen);
95513+ len = min(sizeof(sec), len);
95514 if (copy_from_user((char *) &sec, optval, len)) {
95515 err = -EFAULT;
95516 break;
95517@@ -770,7 +772,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
95518
95519 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
95520
95521- len = min_t(unsigned int, sizeof(pwr), optlen);
95522+ len = min(sizeof(pwr), len);
95523 if (copy_from_user((char *) &pwr, optval, len)) {
95524 err = -EFAULT;
95525 break;
95526diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
95527index 3c2d3e4..884855a 100644
95528--- a/net/bluetooth/rfcomm/sock.c
95529+++ b/net/bluetooth/rfcomm/sock.c
95530@@ -672,7 +672,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
95531 struct sock *sk = sock->sk;
95532 struct bt_security sec;
95533 int err = 0;
95534- size_t len;
95535+ size_t len = optlen;
95536 u32 opt;
95537
95538 BT_DBG("sk %p", sk);
95539@@ -694,7 +694,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
95540
95541 sec.level = BT_SECURITY_LOW;
95542
95543- len = min_t(unsigned int, sizeof(sec), optlen);
95544+ len = min(sizeof(sec), len);
95545 if (copy_from_user((char *) &sec, optval, len)) {
95546 err = -EFAULT;
95547 break;
95548diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
95549index 84fcf9f..e389b27 100644
95550--- a/net/bluetooth/rfcomm/tty.c
95551+++ b/net/bluetooth/rfcomm/tty.c
95552@@ -684,7 +684,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
95553 BT_DBG("tty %p id %d", tty, tty->index);
95554
95555 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
95556- dev->channel, dev->port.count);
95557+ dev->channel, atomic_read(&dev->port.count));
95558
95559 err = tty_port_open(&dev->port, tty, filp);
95560 if (err)
95561@@ -707,7 +707,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
95562 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
95563
95564 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
95565- dev->port.count);
95566+ atomic_read(&dev->port.count));
95567
95568 tty_port_close(&dev->port, tty, filp);
95569 }
95570diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
95571index ac78024..161a80c 100644
95572--- a/net/bridge/netfilter/ebtables.c
95573+++ b/net/bridge/netfilter/ebtables.c
95574@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
95575 tmp.valid_hooks = t->table->valid_hooks;
95576 }
95577 mutex_unlock(&ebt_mutex);
95578- if (copy_to_user(user, &tmp, *len) != 0){
95579+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
95580 BUGPRINT("c2u Didn't work\n");
95581 ret = -EFAULT;
95582 break;
95583@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
95584 goto out;
95585 tmp.valid_hooks = t->valid_hooks;
95586
95587- if (copy_to_user(user, &tmp, *len) != 0) {
95588+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
95589 ret = -EFAULT;
95590 break;
95591 }
95592@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
95593 tmp.entries_size = t->table->entries_size;
95594 tmp.valid_hooks = t->table->valid_hooks;
95595
95596- if (copy_to_user(user, &tmp, *len) != 0) {
95597+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
95598 ret = -EFAULT;
95599 break;
95600 }
95601diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
95602index 0f45522..dab651f 100644
95603--- a/net/caif/cfctrl.c
95604+++ b/net/caif/cfctrl.c
95605@@ -10,6 +10,7 @@
95606 #include <linux/spinlock.h>
95607 #include <linux/slab.h>
95608 #include <linux/pkt_sched.h>
95609+#include <linux/sched.h>
95610 #include <net/caif/caif_layer.h>
95611 #include <net/caif/cfpkt.h>
95612 #include <net/caif/cfctrl.h>
95613@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
95614 memset(&dev_info, 0, sizeof(dev_info));
95615 dev_info.id = 0xff;
95616 cfsrvl_init(&this->serv, 0, &dev_info, false);
95617- atomic_set(&this->req_seq_no, 1);
95618- atomic_set(&this->rsp_seq_no, 1);
95619+ atomic_set_unchecked(&this->req_seq_no, 1);
95620+ atomic_set_unchecked(&this->rsp_seq_no, 1);
95621 this->serv.layer.receive = cfctrl_recv;
95622 sprintf(this->serv.layer.name, "ctrl");
95623 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
95624@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
95625 struct cfctrl_request_info *req)
95626 {
95627 spin_lock_bh(&ctrl->info_list_lock);
95628- atomic_inc(&ctrl->req_seq_no);
95629- req->sequence_no = atomic_read(&ctrl->req_seq_no);
95630+ atomic_inc_unchecked(&ctrl->req_seq_no);
95631+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
95632 list_add_tail(&req->list, &ctrl->list);
95633 spin_unlock_bh(&ctrl->info_list_lock);
95634 }
95635@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
95636 if (p != first)
95637 pr_warn("Requests are not received in order\n");
95638
95639- atomic_set(&ctrl->rsp_seq_no,
95640+ atomic_set_unchecked(&ctrl->rsp_seq_no,
95641 p->sequence_no);
95642 list_del(&p->list);
95643 goto out;
95644diff --git a/net/can/af_can.c b/net/can/af_can.c
95645index d249874..99e197b 100644
95646--- a/net/can/af_can.c
95647+++ b/net/can/af_can.c
95648@@ -862,7 +862,7 @@ static const struct net_proto_family can_family_ops = {
95649 };
95650
95651 /* notifier block for netdevice event */
95652-static struct notifier_block can_netdev_notifier __read_mostly = {
95653+static struct notifier_block can_netdev_notifier = {
95654 .notifier_call = can_notifier,
95655 };
95656
95657diff --git a/net/can/gw.c b/net/can/gw.c
95658index 3f9b0f3..fc6d4fa 100644
95659--- a/net/can/gw.c
95660+++ b/net/can/gw.c
95661@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
95662 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
95663
95664 static HLIST_HEAD(cgw_list);
95665-static struct notifier_block notifier;
95666
95667 static struct kmem_cache *cgw_cache __read_mostly;
95668
95669@@ -954,6 +953,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
95670 return err;
95671 }
95672
95673+static struct notifier_block notifier = {
95674+ .notifier_call = cgw_notifier
95675+};
95676+
95677 static __init int cgw_module_init(void)
95678 {
95679 /* sanitize given module parameter */
95680@@ -969,7 +972,6 @@ static __init int cgw_module_init(void)
95681 return -ENOMEM;
95682
95683 /* set notifier */
95684- notifier.notifier_call = cgw_notifier;
95685 register_netdevice_notifier(&notifier);
95686
95687 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
95688diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
95689index 4a5df7b..9ad1f1d 100644
95690--- a/net/ceph/messenger.c
95691+++ b/net/ceph/messenger.c
95692@@ -186,7 +186,7 @@ static void con_fault(struct ceph_connection *con);
95693 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
95694
95695 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
95696-static atomic_t addr_str_seq = ATOMIC_INIT(0);
95697+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
95698
95699 static struct page *zero_page; /* used in certain error cases */
95700
95701@@ -197,7 +197,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
95702 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
95703 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
95704
95705- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
95706+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
95707 s = addr_str[i];
95708
95709 switch (ss->ss_family) {
95710diff --git a/net/compat.c b/net/compat.c
95711index f50161f..94fa415 100644
95712--- a/net/compat.c
95713+++ b/net/compat.c
95714@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
95715 return -EFAULT;
95716 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
95717 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
95718- kmsg->msg_name = compat_ptr(tmp1);
95719- kmsg->msg_iov = compat_ptr(tmp2);
95720- kmsg->msg_control = compat_ptr(tmp3);
95721+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
95722+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
95723+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
95724 return 0;
95725 }
95726
95727@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
95728
95729 if (kern_msg->msg_namelen) {
95730 if (mode == VERIFY_READ) {
95731- int err = move_addr_to_kernel(kern_msg->msg_name,
95732+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
95733 kern_msg->msg_namelen,
95734 kern_address);
95735 if (err < 0)
95736@@ -99,7 +99,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
95737 kern_msg->msg_name = NULL;
95738
95739 tot_len = iov_from_user_compat_to_kern(kern_iov,
95740- (struct compat_iovec __user *)kern_msg->msg_iov,
95741+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
95742 kern_msg->msg_iovlen);
95743 if (tot_len >= 0)
95744 kern_msg->msg_iov = kern_iov;
95745@@ -119,20 +119,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
95746
95747 #define CMSG_COMPAT_FIRSTHDR(msg) \
95748 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
95749- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
95750+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
95751 (struct compat_cmsghdr __user *)NULL)
95752
95753 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
95754 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
95755 (ucmlen) <= (unsigned long) \
95756 ((mhdr)->msg_controllen - \
95757- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
95758+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
95759
95760 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
95761 struct compat_cmsghdr __user *cmsg, int cmsg_len)
95762 {
95763 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
95764- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
95765+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
95766 msg->msg_controllen)
95767 return NULL;
95768 return (struct compat_cmsghdr __user *)ptr;
95769@@ -222,7 +222,7 @@ Efault:
95770
95771 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
95772 {
95773- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
95774+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
95775 struct compat_cmsghdr cmhdr;
95776 struct compat_timeval ctv;
95777 struct compat_timespec cts[3];
95778@@ -278,7 +278,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
95779
95780 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
95781 {
95782- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
95783+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
95784 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
95785 int fdnum = scm->fp->count;
95786 struct file **fp = scm->fp->fp;
95787@@ -366,7 +366,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
95788 return -EFAULT;
95789 old_fs = get_fs();
95790 set_fs(KERNEL_DS);
95791- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
95792+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
95793 set_fs(old_fs);
95794
95795 return err;
95796@@ -427,7 +427,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
95797 len = sizeof(ktime);
95798 old_fs = get_fs();
95799 set_fs(KERNEL_DS);
95800- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
95801+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
95802 set_fs(old_fs);
95803
95804 if (!err) {
95805@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
95806 case MCAST_JOIN_GROUP:
95807 case MCAST_LEAVE_GROUP:
95808 {
95809- struct compat_group_req __user *gr32 = (void *)optval;
95810+ struct compat_group_req __user *gr32 = (void __user *)optval;
95811 struct group_req __user *kgr =
95812 compat_alloc_user_space(sizeof(struct group_req));
95813 u32 interface;
95814@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
95815 case MCAST_BLOCK_SOURCE:
95816 case MCAST_UNBLOCK_SOURCE:
95817 {
95818- struct compat_group_source_req __user *gsr32 = (void *)optval;
95819+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
95820 struct group_source_req __user *kgsr = compat_alloc_user_space(
95821 sizeof(struct group_source_req));
95822 u32 interface;
95823@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
95824 }
95825 case MCAST_MSFILTER:
95826 {
95827- struct compat_group_filter __user *gf32 = (void *)optval;
95828+ struct compat_group_filter __user *gf32 = (void __user *)optval;
95829 struct group_filter __user *kgf;
95830 u32 interface, fmode, numsrc;
95831
95832@@ -650,7 +650,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
95833 char __user *optval, int __user *optlen,
95834 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
95835 {
95836- struct compat_group_filter __user *gf32 = (void *)optval;
95837+ struct compat_group_filter __user *gf32 = (void __user *)optval;
95838 struct group_filter __user *kgf;
95839 int __user *koptlen;
95840 u32 interface, fmode, numsrc;
95841@@ -803,7 +803,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
95842
95843 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
95844 return -EINVAL;
95845- if (copy_from_user(a, args, nas[call]))
95846+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
95847 return -EFAULT;
95848 a0 = a[0];
95849 a1 = a[1];
95850diff --git a/net/core/datagram.c b/net/core/datagram.c
95851index a16ed7b..eb44d17 100644
95852--- a/net/core/datagram.c
95853+++ b/net/core/datagram.c
95854@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
95855 }
95856
95857 kfree_skb(skb);
95858- atomic_inc(&sk->sk_drops);
95859+ atomic_inc_unchecked(&sk->sk_drops);
95860 sk_mem_reclaim_partial(sk);
95861
95862 return err;
95863diff --git a/net/core/dev.c b/net/core/dev.c
95864index 0ce469e..dfb53d2 100644
95865--- a/net/core/dev.c
95866+++ b/net/core/dev.c
95867@@ -1684,14 +1684,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
95868 {
95869 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
95870 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
95871- atomic_long_inc(&dev->rx_dropped);
95872+ atomic_long_inc_unchecked(&dev->rx_dropped);
95873 kfree_skb(skb);
95874 return NET_RX_DROP;
95875 }
95876 }
95877
95878 if (unlikely(!is_skb_forwardable(dev, skb))) {
95879- atomic_long_inc(&dev->rx_dropped);
95880+ atomic_long_inc_unchecked(&dev->rx_dropped);
95881 kfree_skb(skb);
95882 return NET_RX_DROP;
95883 }
95884@@ -2434,7 +2434,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
95885
95886 struct dev_gso_cb {
95887 void (*destructor)(struct sk_buff *skb);
95888-};
95889+} __no_const;
95890
95891 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
95892
95893@@ -3222,7 +3222,7 @@ enqueue:
95894
95895 local_irq_restore(flags);
95896
95897- atomic_long_inc(&skb->dev->rx_dropped);
95898+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
95899 kfree_skb(skb);
95900 return NET_RX_DROP;
95901 }
95902@@ -3294,7 +3294,7 @@ int netif_rx_ni(struct sk_buff *skb)
95903 }
95904 EXPORT_SYMBOL(netif_rx_ni);
95905
95906-static void net_tx_action(struct softirq_action *h)
95907+static __latent_entropy void net_tx_action(void)
95908 {
95909 struct softnet_data *sd = &__get_cpu_var(softnet_data);
95910
95911@@ -3628,7 +3628,7 @@ ncls:
95912 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
95913 } else {
95914 drop:
95915- atomic_long_inc(&skb->dev->rx_dropped);
95916+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
95917 kfree_skb(skb);
95918 /* Jamal, now you will not able to escape explaining
95919 * me how you were going to use this. :-)
95920@@ -4288,7 +4288,7 @@ void netif_napi_del(struct napi_struct *napi)
95921 }
95922 EXPORT_SYMBOL(netif_napi_del);
95923
95924-static void net_rx_action(struct softirq_action *h)
95925+static __latent_entropy void net_rx_action(void)
95926 {
95927 struct softnet_data *sd = &__get_cpu_var(softnet_data);
95928 unsigned long time_limit = jiffies + 2;
95929@@ -6177,7 +6177,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
95930 } else {
95931 netdev_stats_to_stats64(storage, &dev->stats);
95932 }
95933- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
95934+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
95935 return storage;
95936 }
95937 EXPORT_SYMBOL(dev_get_stats);
95938diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
95939index 5b7d0e1..cb960fc 100644
95940--- a/net/core/dev_ioctl.c
95941+++ b/net/core/dev_ioctl.c
95942@@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
95943 if (no_module && capable(CAP_NET_ADMIN))
95944 no_module = request_module("netdev-%s", name);
95945 if (no_module && capable(CAP_SYS_MODULE)) {
95946+#ifdef CONFIG_GRKERNSEC_MODHARDEN
95947+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
95948+#else
95949 if (!request_module("%s", name))
95950 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
95951 name);
95952+#endif
95953 }
95954 }
95955 EXPORT_SYMBOL(dev_load);
95956diff --git a/net/core/filter.c b/net/core/filter.c
95957index ad30d62..c2757df 100644
95958--- a/net/core/filter.c
95959+++ b/net/core/filter.c
95960@@ -679,7 +679,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
95961 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
95962 if (!fp)
95963 return -ENOMEM;
95964- memcpy(fp->insns, fprog->filter, fsize);
95965+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
95966
95967 atomic_set(&fp->refcnt, 1);
95968 fp->len = fprog->len;
95969diff --git a/net/core/flow.c b/net/core/flow.c
95970index dfa602c..3103d88 100644
95971--- a/net/core/flow.c
95972+++ b/net/core/flow.c
95973@@ -61,7 +61,7 @@ struct flow_cache {
95974 struct timer_list rnd_timer;
95975 };
95976
95977-atomic_t flow_cache_genid = ATOMIC_INIT(0);
95978+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
95979 EXPORT_SYMBOL(flow_cache_genid);
95980 static struct flow_cache flow_cache_global;
95981 static struct kmem_cache *flow_cachep __read_mostly;
95982@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
95983
95984 static int flow_entry_valid(struct flow_cache_entry *fle)
95985 {
95986- if (atomic_read(&flow_cache_genid) != fle->genid)
95987+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
95988 return 0;
95989 if (fle->object && !fle->object->ops->check(fle->object))
95990 return 0;
95991@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
95992 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
95993 fcp->hash_count++;
95994 }
95995- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
95996+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
95997 flo = fle->object;
95998 if (!flo)
95999 goto ret_object;
96000@@ -279,7 +279,7 @@ nocache:
96001 }
96002 flo = resolver(net, key, family, dir, flo, ctx);
96003 if (fle) {
96004- fle->genid = atomic_read(&flow_cache_genid);
96005+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
96006 if (!IS_ERR(flo))
96007 fle->object = flo;
96008 else
96009diff --git a/net/core/iovec.c b/net/core/iovec.c
96010index b618694..192bbba 100644
96011--- a/net/core/iovec.c
96012+++ b/net/core/iovec.c
96013@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
96014 if (m->msg_namelen) {
96015 if (mode == VERIFY_READ) {
96016 void __user *namep;
96017- namep = (void __user __force *) m->msg_name;
96018+ namep = (void __force_user *) m->msg_name;
96019 err = move_addr_to_kernel(namep, m->msg_namelen,
96020 address);
96021 if (err < 0)
96022@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
96023 }
96024
96025 size = m->msg_iovlen * sizeof(struct iovec);
96026- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
96027+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
96028 return -EFAULT;
96029
96030 m->msg_iov = iov;
96031diff --git a/net/core/neighbour.c b/net/core/neighbour.c
96032index 932c6d7..7c7aa10 100644
96033--- a/net/core/neighbour.c
96034+++ b/net/core/neighbour.c
96035@@ -2775,7 +2775,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
96036 void __user *buffer, size_t *lenp, loff_t *ppos)
96037 {
96038 int size, ret;
96039- struct ctl_table tmp = *ctl;
96040+ ctl_table_no_const tmp = *ctl;
96041
96042 tmp.extra1 = &zero;
96043 tmp.extra2 = &unres_qlen_max;
96044diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
96045index 2bf8329..7960607 100644
96046--- a/net/core/net-procfs.c
96047+++ b/net/core/net-procfs.c
96048@@ -283,8 +283,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
96049 else
96050 seq_printf(seq, "%04x", ntohs(pt->type));
96051
96052+#ifdef CONFIG_GRKERNSEC_HIDESYM
96053+ seq_printf(seq, " %-8s %pf\n",
96054+ pt->dev ? pt->dev->name : "", NULL);
96055+#else
96056 seq_printf(seq, " %-8s %pf\n",
96057 pt->dev ? pt->dev->name : "", pt->func);
96058+#endif
96059 }
96060
96061 return 0;
96062diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
96063index f3edf96..3cd8b40 100644
96064--- a/net/core/net-sysfs.c
96065+++ b/net/core/net-sysfs.c
96066@@ -1358,7 +1358,7 @@ void netdev_class_remove_file_ns(struct class_attribute *class_attr,
96067 }
96068 EXPORT_SYMBOL(netdev_class_remove_file_ns);
96069
96070-int netdev_kobject_init(void)
96071+int __init netdev_kobject_init(void)
96072 {
96073 kobj_ns_type_register(&net_ns_type_operations);
96074 return class_register(&net_class);
96075diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
96076index 81d3a9a..a0bd7a8 100644
96077--- a/net/core/net_namespace.c
96078+++ b/net/core/net_namespace.c
96079@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
96080 int error;
96081 LIST_HEAD(net_exit_list);
96082
96083- list_add_tail(&ops->list, list);
96084+ pax_list_add_tail((struct list_head *)&ops->list, list);
96085 if (ops->init || (ops->id && ops->size)) {
96086 for_each_net(net) {
96087 error = ops_init(ops, net);
96088@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
96089
96090 out_undo:
96091 /* If I have an error cleanup all namespaces I initialized */
96092- list_del(&ops->list);
96093+ pax_list_del((struct list_head *)&ops->list);
96094 ops_exit_list(ops, &net_exit_list);
96095 ops_free_list(ops, &net_exit_list);
96096 return error;
96097@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
96098 struct net *net;
96099 LIST_HEAD(net_exit_list);
96100
96101- list_del(&ops->list);
96102+ pax_list_del((struct list_head *)&ops->list);
96103 for_each_net(net)
96104 list_add_tail(&net->exit_list, &net_exit_list);
96105 ops_exit_list(ops, &net_exit_list);
96106@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
96107 mutex_lock(&net_mutex);
96108 error = register_pernet_operations(&pernet_list, ops);
96109 if (!error && (first_device == &pernet_list))
96110- first_device = &ops->list;
96111+ first_device = (struct list_head *)&ops->list;
96112 mutex_unlock(&net_mutex);
96113 return error;
96114 }
96115diff --git a/net/core/netpoll.c b/net/core/netpoll.c
96116index 19fe9c7..b6bb620 100644
96117--- a/net/core/netpoll.c
96118+++ b/net/core/netpoll.c
96119@@ -435,7 +435,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
96120 struct udphdr *udph;
96121 struct iphdr *iph;
96122 struct ethhdr *eth;
96123- static atomic_t ip_ident;
96124+ static atomic_unchecked_t ip_ident;
96125 struct ipv6hdr *ip6h;
96126
96127 udp_len = len + sizeof(*udph);
96128@@ -506,7 +506,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
96129 put_unaligned(0x45, (unsigned char *)iph);
96130 iph->tos = 0;
96131 put_unaligned(htons(ip_len), &(iph->tot_len));
96132- iph->id = htons(atomic_inc_return(&ip_ident));
96133+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
96134 iph->frag_off = 0;
96135 iph->ttl = 64;
96136 iph->protocol = IPPROTO_UDP;
96137diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
96138index cf67144..12bf94c 100644
96139--- a/net/core/rtnetlink.c
96140+++ b/net/core/rtnetlink.c
96141@@ -58,7 +58,7 @@ struct rtnl_link {
96142 rtnl_doit_func doit;
96143 rtnl_dumpit_func dumpit;
96144 rtnl_calcit_func calcit;
96145-};
96146+} __no_const;
96147
96148 static DEFINE_MUTEX(rtnl_mutex);
96149
96150@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
96151 if (rtnl_link_ops_get(ops->kind))
96152 return -EEXIST;
96153
96154- if (!ops->dellink)
96155- ops->dellink = unregister_netdevice_queue;
96156+ if (!ops->dellink) {
96157+ pax_open_kernel();
96158+ *(void **)&ops->dellink = unregister_netdevice_queue;
96159+ pax_close_kernel();
96160+ }
96161
96162- list_add_tail(&ops->list, &link_ops);
96163+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
96164 return 0;
96165 }
96166 EXPORT_SYMBOL_GPL(__rtnl_link_register);
96167@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
96168 for_each_net(net) {
96169 __rtnl_kill_links(net, ops);
96170 }
96171- list_del(&ops->list);
96172+ pax_list_del((struct list_head *)&ops->list);
96173 }
96174 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
96175
96176diff --git a/net/core/scm.c b/net/core/scm.c
96177index b442e7e..6f5b5a2 100644
96178--- a/net/core/scm.c
96179+++ b/net/core/scm.c
96180@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
96181 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
96182 {
96183 struct cmsghdr __user *cm
96184- = (__force struct cmsghdr __user *)msg->msg_control;
96185+ = (struct cmsghdr __force_user *)msg->msg_control;
96186 struct cmsghdr cmhdr;
96187 int cmlen = CMSG_LEN(len);
96188 int err;
96189@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
96190 err = -EFAULT;
96191 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
96192 goto out;
96193- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
96194+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
96195 goto out;
96196 cmlen = CMSG_SPACE(len);
96197 if (msg->msg_controllen < cmlen)
96198@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
96199 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
96200 {
96201 struct cmsghdr __user *cm
96202- = (__force struct cmsghdr __user*)msg->msg_control;
96203+ = (struct cmsghdr __force_user *)msg->msg_control;
96204
96205 int fdmax = 0;
96206 int fdnum = scm->fp->count;
96207@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
96208 if (fdnum < fdmax)
96209 fdmax = fdnum;
96210
96211- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
96212+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
96213 i++, cmfptr++)
96214 {
96215 struct socket *sock;
96216diff --git a/net/core/skbuff.c b/net/core/skbuff.c
96217index 06e72d3..19dfa7b 100644
96218--- a/net/core/skbuff.c
96219+++ b/net/core/skbuff.c
96220@@ -2034,7 +2034,7 @@ EXPORT_SYMBOL(__skb_checksum);
96221 __wsum skb_checksum(const struct sk_buff *skb, int offset,
96222 int len, __wsum csum)
96223 {
96224- const struct skb_checksum_ops ops = {
96225+ static const struct skb_checksum_ops ops = {
96226 .update = csum_partial_ext,
96227 .combine = csum_block_add_ext,
96228 };
96229@@ -3147,13 +3147,15 @@ void __init skb_init(void)
96230 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
96231 sizeof(struct sk_buff),
96232 0,
96233- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
96234+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
96235+ SLAB_NO_SANITIZE,
96236 NULL);
96237 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
96238 (2*sizeof(struct sk_buff)) +
96239 sizeof(atomic_t),
96240 0,
96241- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
96242+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
96243+ SLAB_NO_SANITIZE,
96244 NULL);
96245 }
96246
96247diff --git a/net/core/sock.c b/net/core/sock.c
96248index 5393b4b..997c88b 100644
96249--- a/net/core/sock.c
96250+++ b/net/core/sock.c
96251@@ -393,7 +393,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
96252 struct sk_buff_head *list = &sk->sk_receive_queue;
96253
96254 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
96255- atomic_inc(&sk->sk_drops);
96256+ atomic_inc_unchecked(&sk->sk_drops);
96257 trace_sock_rcvqueue_full(sk, skb);
96258 return -ENOMEM;
96259 }
96260@@ -403,7 +403,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
96261 return err;
96262
96263 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
96264- atomic_inc(&sk->sk_drops);
96265+ atomic_inc_unchecked(&sk->sk_drops);
96266 return -ENOBUFS;
96267 }
96268
96269@@ -423,7 +423,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
96270 skb_dst_force(skb);
96271
96272 spin_lock_irqsave(&list->lock, flags);
96273- skb->dropcount = atomic_read(&sk->sk_drops);
96274+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
96275 __skb_queue_tail(list, skb);
96276 spin_unlock_irqrestore(&list->lock, flags);
96277
96278@@ -443,7 +443,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
96279 skb->dev = NULL;
96280
96281 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
96282- atomic_inc(&sk->sk_drops);
96283+ atomic_inc_unchecked(&sk->sk_drops);
96284 goto discard_and_relse;
96285 }
96286 if (nested)
96287@@ -461,7 +461,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
96288 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
96289 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
96290 bh_unlock_sock(sk);
96291- atomic_inc(&sk->sk_drops);
96292+ atomic_inc_unchecked(&sk->sk_drops);
96293 goto discard_and_relse;
96294 }
96295
96296@@ -950,12 +950,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
96297 struct timeval tm;
96298 } v;
96299
96300- int lv = sizeof(int);
96301- int len;
96302+ unsigned int lv = sizeof(int);
96303+ unsigned int len;
96304
96305 if (get_user(len, optlen))
96306 return -EFAULT;
96307- if (len < 0)
96308+ if (len > INT_MAX)
96309 return -EINVAL;
96310
96311 memset(&v, 0, sizeof(v));
96312@@ -1107,11 +1107,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
96313
96314 case SO_PEERNAME:
96315 {
96316- char address[128];
96317+ char address[_K_SS_MAXSIZE];
96318
96319 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
96320 return -ENOTCONN;
96321- if (lv < len)
96322+ if (lv < len || sizeof address < len)
96323 return -EINVAL;
96324 if (copy_to_user(optval, address, len))
96325 return -EFAULT;
96326@@ -1188,7 +1188,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
96327
96328 if (len > lv)
96329 len = lv;
96330- if (copy_to_user(optval, &v, len))
96331+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
96332 return -EFAULT;
96333 lenout:
96334 if (put_user(len, optlen))
96335@@ -2351,7 +2351,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
96336 */
96337 smp_wmb();
96338 atomic_set(&sk->sk_refcnt, 1);
96339- atomic_set(&sk->sk_drops, 0);
96340+ atomic_set_unchecked(&sk->sk_drops, 0);
96341 }
96342 EXPORT_SYMBOL(sock_init_data);
96343
96344@@ -2476,6 +2476,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
96345 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
96346 int level, int type)
96347 {
96348+ struct sock_extended_err ee;
96349 struct sock_exterr_skb *serr;
96350 struct sk_buff *skb, *skb2;
96351 int copied, err;
96352@@ -2497,7 +2498,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
96353 sock_recv_timestamp(msg, sk, skb);
96354
96355 serr = SKB_EXT_ERR(skb);
96356- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
96357+ ee = serr->ee;
96358+ put_cmsg(msg, level, type, sizeof ee, &ee);
96359
96360 msg->msg_flags |= MSG_ERRQUEUE;
96361 err = copied;
96362diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
96363index a0e9cf6..ef7f9ed 100644
96364--- a/net/core/sock_diag.c
96365+++ b/net/core/sock_diag.c
96366@@ -9,26 +9,33 @@
96367 #include <linux/inet_diag.h>
96368 #include <linux/sock_diag.h>
96369
96370-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
96371+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
96372 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
96373 static DEFINE_MUTEX(sock_diag_table_mutex);
96374
96375 int sock_diag_check_cookie(void *sk, __u32 *cookie)
96376 {
96377+#ifndef CONFIG_GRKERNSEC_HIDESYM
96378 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
96379 cookie[1] != INET_DIAG_NOCOOKIE) &&
96380 ((u32)(unsigned long)sk != cookie[0] ||
96381 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
96382 return -ESTALE;
96383 else
96384+#endif
96385 return 0;
96386 }
96387 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
96388
96389 void sock_diag_save_cookie(void *sk, __u32 *cookie)
96390 {
96391+#ifdef CONFIG_GRKERNSEC_HIDESYM
96392+ cookie[0] = 0;
96393+ cookie[1] = 0;
96394+#else
96395 cookie[0] = (u32)(unsigned long)sk;
96396 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
96397+#endif
96398 }
96399 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
96400
96401@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
96402 mutex_lock(&sock_diag_table_mutex);
96403 if (sock_diag_handlers[hndl->family])
96404 err = -EBUSY;
96405- else
96406+ else {
96407+ pax_open_kernel();
96408 sock_diag_handlers[hndl->family] = hndl;
96409+ pax_close_kernel();
96410+ }
96411 mutex_unlock(&sock_diag_table_mutex);
96412
96413 return err;
96414@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
96415
96416 mutex_lock(&sock_diag_table_mutex);
96417 BUG_ON(sock_diag_handlers[family] != hnld);
96418+ pax_open_kernel();
96419 sock_diag_handlers[family] = NULL;
96420+ pax_close_kernel();
96421 mutex_unlock(&sock_diag_table_mutex);
96422 }
96423 EXPORT_SYMBOL_GPL(sock_diag_unregister);
96424diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
96425index cca4441..5e616de 100644
96426--- a/net/core/sysctl_net_core.c
96427+++ b/net/core/sysctl_net_core.c
96428@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
96429 {
96430 unsigned int orig_size, size;
96431 int ret, i;
96432- struct ctl_table tmp = {
96433+ ctl_table_no_const tmp = {
96434 .data = &size,
96435 .maxlen = sizeof(size),
96436 .mode = table->mode
96437@@ -199,7 +199,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
96438 void __user *buffer, size_t *lenp, loff_t *ppos)
96439 {
96440 char id[IFNAMSIZ];
96441- struct ctl_table tbl = {
96442+ ctl_table_no_const tbl = {
96443 .data = id,
96444 .maxlen = IFNAMSIZ,
96445 };
96446@@ -378,13 +378,12 @@ static struct ctl_table netns_core_table[] = {
96447
96448 static __net_init int sysctl_core_net_init(struct net *net)
96449 {
96450- struct ctl_table *tbl;
96451+ ctl_table_no_const *tbl = NULL;
96452
96453 net->core.sysctl_somaxconn = SOMAXCONN;
96454
96455- tbl = netns_core_table;
96456 if (!net_eq(net, &init_net)) {
96457- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
96458+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
96459 if (tbl == NULL)
96460 goto err_dup;
96461
96462@@ -394,17 +393,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
96463 if (net->user_ns != &init_user_ns) {
96464 tbl[0].procname = NULL;
96465 }
96466- }
96467-
96468- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
96469+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
96470+ } else
96471+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
96472 if (net->core.sysctl_hdr == NULL)
96473 goto err_reg;
96474
96475 return 0;
96476
96477 err_reg:
96478- if (tbl != netns_core_table)
96479- kfree(tbl);
96480+ kfree(tbl);
96481 err_dup:
96482 return -ENOMEM;
96483 }
96484@@ -419,7 +417,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
96485 kfree(tbl);
96486 }
96487
96488-static __net_initdata struct pernet_operations sysctl_core_ops = {
96489+static __net_initconst struct pernet_operations sysctl_core_ops = {
96490 .init = sysctl_core_net_init,
96491 .exit = sysctl_core_net_exit,
96492 };
96493diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
96494index dd4d506..fb2fb87 100644
96495--- a/net/decnet/af_decnet.c
96496+++ b/net/decnet/af_decnet.c
96497@@ -465,6 +465,7 @@ static struct proto dn_proto = {
96498 .sysctl_rmem = sysctl_decnet_rmem,
96499 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
96500 .obj_size = sizeof(struct dn_sock),
96501+ .slab_flags = SLAB_USERCOPY,
96502 };
96503
96504 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
96505diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
96506index dd0dfb2..fdbc764 100644
96507--- a/net/decnet/dn_dev.c
96508+++ b/net/decnet/dn_dev.c
96509@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
96510 .extra1 = &min_t3,
96511 .extra2 = &max_t3
96512 },
96513- {0}
96514+ { }
96515 },
96516 };
96517
96518diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
96519index 5325b54..a0d4d69 100644
96520--- a/net/decnet/sysctl_net_decnet.c
96521+++ b/net/decnet/sysctl_net_decnet.c
96522@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
96523
96524 if (len > *lenp) len = *lenp;
96525
96526- if (copy_to_user(buffer, addr, len))
96527+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
96528 return -EFAULT;
96529
96530 *lenp = len;
96531@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
96532
96533 if (len > *lenp) len = *lenp;
96534
96535- if (copy_to_user(buffer, devname, len))
96536+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
96537 return -EFAULT;
96538
96539 *lenp = len;
96540diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
96541index 1865fdf..581a595 100644
96542--- a/net/ieee802154/dgram.c
96543+++ b/net/ieee802154/dgram.c
96544@@ -315,8 +315,9 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
96545 if (saddr) {
96546 saddr->family = AF_IEEE802154;
96547 saddr->addr = mac_cb(skb)->sa;
96548+ }
96549+ if (addr_len)
96550 *addr_len = sizeof(*saddr);
96551- }
96552
96553 if (flags & MSG_TRUNC)
96554 copied = skb->len;
96555diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
96556index 70011e0..454ca6a 100644
96557--- a/net/ipv4/af_inet.c
96558+++ b/net/ipv4/af_inet.c
96559@@ -1683,13 +1683,9 @@ static int __init inet_init(void)
96560
96561 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
96562
96563- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
96564- if (!sysctl_local_reserved_ports)
96565- goto out;
96566-
96567 rc = proto_register(&tcp_prot, 1);
96568 if (rc)
96569- goto out_free_reserved_ports;
96570+ goto out;
96571
96572 rc = proto_register(&udp_prot, 1);
96573 if (rc)
96574@@ -1796,8 +1792,6 @@ out_unregister_udp_proto:
96575 proto_unregister(&udp_prot);
96576 out_unregister_tcp_proto:
96577 proto_unregister(&tcp_prot);
96578-out_free_reserved_ports:
96579- kfree(sysctl_local_reserved_ports);
96580 goto out;
96581 }
96582
96583diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
96584index a1b5bcb..62ec5c6 100644
96585--- a/net/ipv4/devinet.c
96586+++ b/net/ipv4/devinet.c
96587@@ -1533,7 +1533,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
96588 idx = 0;
96589 head = &net->dev_index_head[h];
96590 rcu_read_lock();
96591- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
96592+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
96593 net->dev_base_seq;
96594 hlist_for_each_entry_rcu(dev, head, index_hlist) {
96595 if (idx < s_idx)
96596@@ -1844,7 +1844,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
96597 idx = 0;
96598 head = &net->dev_index_head[h];
96599 rcu_read_lock();
96600- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
96601+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
96602 net->dev_base_seq;
96603 hlist_for_each_entry_rcu(dev, head, index_hlist) {
96604 if (idx < s_idx)
96605@@ -2069,7 +2069,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
96606 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
96607 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
96608
96609-static struct devinet_sysctl_table {
96610+static const struct devinet_sysctl_table {
96611 struct ctl_table_header *sysctl_header;
96612 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
96613 } devinet_sysctl = {
96614@@ -2191,7 +2191,7 @@ static __net_init int devinet_init_net(struct net *net)
96615 int err;
96616 struct ipv4_devconf *all, *dflt;
96617 #ifdef CONFIG_SYSCTL
96618- struct ctl_table *tbl = ctl_forward_entry;
96619+ ctl_table_no_const *tbl = NULL;
96620 struct ctl_table_header *forw_hdr;
96621 #endif
96622
96623@@ -2209,7 +2209,7 @@ static __net_init int devinet_init_net(struct net *net)
96624 goto err_alloc_dflt;
96625
96626 #ifdef CONFIG_SYSCTL
96627- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
96628+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
96629 if (tbl == NULL)
96630 goto err_alloc_ctl;
96631
96632@@ -2229,7 +2229,10 @@ static __net_init int devinet_init_net(struct net *net)
96633 goto err_reg_dflt;
96634
96635 err = -ENOMEM;
96636- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
96637+ if (!net_eq(net, &init_net))
96638+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
96639+ else
96640+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
96641 if (forw_hdr == NULL)
96642 goto err_reg_ctl;
96643 net->ipv4.forw_hdr = forw_hdr;
96644@@ -2245,8 +2248,7 @@ err_reg_ctl:
96645 err_reg_dflt:
96646 __devinet_sysctl_unregister(all);
96647 err_reg_all:
96648- if (tbl != ctl_forward_entry)
96649- kfree(tbl);
96650+ kfree(tbl);
96651 err_alloc_ctl:
96652 #endif
96653 if (dflt != &ipv4_devconf_dflt)
96654diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
96655index c7539e2..b455e51 100644
96656--- a/net/ipv4/fib_frontend.c
96657+++ b/net/ipv4/fib_frontend.c
96658@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
96659 #ifdef CONFIG_IP_ROUTE_MULTIPATH
96660 fib_sync_up(dev);
96661 #endif
96662- atomic_inc(&net->ipv4.dev_addr_genid);
96663+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
96664 rt_cache_flush(dev_net(dev));
96665 break;
96666 case NETDEV_DOWN:
96667 fib_del_ifaddr(ifa, NULL);
96668- atomic_inc(&net->ipv4.dev_addr_genid);
96669+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
96670 if (ifa->ifa_dev->ifa_list == NULL) {
96671 /* Last address was deleted from this interface.
96672 * Disable IP.
96673@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
96674 #ifdef CONFIG_IP_ROUTE_MULTIPATH
96675 fib_sync_up(dev);
96676 #endif
96677- atomic_inc(&net->ipv4.dev_addr_genid);
96678+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
96679 rt_cache_flush(net);
96680 break;
96681 case NETDEV_DOWN:
96682diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
96683index e63f47a..e5c531d 100644
96684--- a/net/ipv4/fib_semantics.c
96685+++ b/net/ipv4/fib_semantics.c
96686@@ -766,7 +766,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
96687 nh->nh_saddr = inet_select_addr(nh->nh_dev,
96688 nh->nh_gw,
96689 nh->nh_parent->fib_scope);
96690- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
96691+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
96692
96693 return nh->nh_saddr;
96694 }
96695diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
96696index fc0e649..febfa65 100644
96697--- a/net/ipv4/inet_connection_sock.c
96698+++ b/net/ipv4/inet_connection_sock.c
96699@@ -29,7 +29,7 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
96700 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
96701 #endif
96702
96703-unsigned long *sysctl_local_reserved_ports;
96704+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
96705 EXPORT_SYMBOL(sysctl_local_reserved_ports);
96706
96707 void inet_get_local_port_range(struct net *net, int *low, int *high)
96708diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
96709index 8b9cf27..0d8d592 100644
96710--- a/net/ipv4/inet_hashtables.c
96711+++ b/net/ipv4/inet_hashtables.c
96712@@ -18,6 +18,7 @@
96713 #include <linux/sched.h>
96714 #include <linux/slab.h>
96715 #include <linux/wait.h>
96716+#include <linux/security.h>
96717
96718 #include <net/inet_connection_sock.h>
96719 #include <net/inet_hashtables.h>
96720@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
96721 return inet_ehashfn(net, laddr, lport, faddr, fport);
96722 }
96723
96724+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
96725+
96726 /*
96727 * Allocate and initialize a new local port bind bucket.
96728 * The bindhash mutex for snum's hash chain must be held here.
96729@@ -554,6 +557,8 @@ ok:
96730 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
96731 spin_unlock(&head->lock);
96732
96733+ gr_update_task_in_ip_table(current, inet_sk(sk));
96734+
96735 if (tw) {
96736 inet_twsk_deschedule(tw, death_row);
96737 while (twrefcnt) {
96738diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
96739index 33d5537..da337a4 100644
96740--- a/net/ipv4/inetpeer.c
96741+++ b/net/ipv4/inetpeer.c
96742@@ -503,8 +503,8 @@ relookup:
96743 if (p) {
96744 p->daddr = *daddr;
96745 atomic_set(&p->refcnt, 1);
96746- atomic_set(&p->rid, 0);
96747- atomic_set(&p->ip_id_count,
96748+ atomic_set_unchecked(&p->rid, 0);
96749+ atomic_set_unchecked(&p->ip_id_count,
96750 (daddr->family == AF_INET) ?
96751 secure_ip_id(daddr->addr.a4) :
96752 secure_ipv6_id(daddr->addr.a6));
96753diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
96754index 2481993..2d9a7a7 100644
96755--- a/net/ipv4/ip_fragment.c
96756+++ b/net/ipv4/ip_fragment.c
96757@@ -283,7 +283,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
96758 return 0;
96759
96760 start = qp->rid;
96761- end = atomic_inc_return(&peer->rid);
96762+ end = atomic_inc_return_unchecked(&peer->rid);
96763 qp->rid = end;
96764
96765 rc = qp->q.fragments && (end - start) > max;
96766@@ -760,12 +760,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
96767
96768 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
96769 {
96770- struct ctl_table *table;
96771+ ctl_table_no_const *table = NULL;
96772 struct ctl_table_header *hdr;
96773
96774- table = ip4_frags_ns_ctl_table;
96775 if (!net_eq(net, &init_net)) {
96776- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
96777+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
96778 if (table == NULL)
96779 goto err_alloc;
96780
96781@@ -776,9 +775,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
96782 /* Don't export sysctls to unprivileged users */
96783 if (net->user_ns != &init_user_ns)
96784 table[0].procname = NULL;
96785- }
96786+ hdr = register_net_sysctl(net, "net/ipv4", table);
96787+ } else
96788+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
96789
96790- hdr = register_net_sysctl(net, "net/ipv4", table);
96791 if (hdr == NULL)
96792 goto err_reg;
96793
96794@@ -786,8 +786,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
96795 return 0;
96796
96797 err_reg:
96798- if (!net_eq(net, &init_net))
96799- kfree(table);
96800+ kfree(table);
96801 err_alloc:
96802 return -ENOMEM;
96803 }
96804diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
96805index d306360..1c1a1f1 100644
96806--- a/net/ipv4/ip_gre.c
96807+++ b/net/ipv4/ip_gre.c
96808@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
96809 module_param(log_ecn_error, bool, 0644);
96810 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
96811
96812-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
96813+static struct rtnl_link_ops ipgre_link_ops;
96814 static int ipgre_tunnel_init(struct net_device *dev);
96815
96816 static int ipgre_net_id __read_mostly;
96817@@ -732,7 +732,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
96818 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
96819 };
96820
96821-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
96822+static struct rtnl_link_ops ipgre_link_ops = {
96823 .kind = "gre",
96824 .maxtype = IFLA_GRE_MAX,
96825 .policy = ipgre_policy,
96826@@ -746,7 +746,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
96827 .fill_info = ipgre_fill_info,
96828 };
96829
96830-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
96831+static struct rtnl_link_ops ipgre_tap_ops = {
96832 .kind = "gretap",
96833 .maxtype = IFLA_GRE_MAX,
96834 .policy = ipgre_policy,
96835diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
96836index ddf32a6..3fdeea9 100644
96837--- a/net/ipv4/ip_sockglue.c
96838+++ b/net/ipv4/ip_sockglue.c
96839@@ -1172,7 +1172,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
96840 len = min_t(unsigned int, len, opt->optlen);
96841 if (put_user(len, optlen))
96842 return -EFAULT;
96843- if (copy_to_user(optval, opt->__data, len))
96844+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
96845+ copy_to_user(optval, opt->__data, len))
96846 return -EFAULT;
96847 return 0;
96848 }
96849@@ -1303,7 +1304,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
96850 if (sk->sk_type != SOCK_STREAM)
96851 return -ENOPROTOOPT;
96852
96853- msg.msg_control = optval;
96854+ msg.msg_control = (void __force_kernel *)optval;
96855 msg.msg_controllen = len;
96856 msg.msg_flags = flags;
96857
96858diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
96859index 52b802a..b725179 100644
96860--- a/net/ipv4/ip_vti.c
96861+++ b/net/ipv4/ip_vti.c
96862@@ -44,7 +44,7 @@
96863 #include <net/net_namespace.h>
96864 #include <net/netns/generic.h>
96865
96866-static struct rtnl_link_ops vti_link_ops __read_mostly;
96867+static struct rtnl_link_ops vti_link_ops;
96868
96869 static int vti_net_id __read_mostly;
96870 static int vti_tunnel_init(struct net_device *dev);
96871@@ -360,7 +360,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
96872 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
96873 };
96874
96875-static struct rtnl_link_ops vti_link_ops __read_mostly = {
96876+static struct rtnl_link_ops vti_link_ops = {
96877 .kind = "vti",
96878 .maxtype = IFLA_VTI_MAX,
96879 .policy = vti_policy,
96880diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
96881index efa1138..20dbba0 100644
96882--- a/net/ipv4/ipconfig.c
96883+++ b/net/ipv4/ipconfig.c
96884@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
96885
96886 mm_segment_t oldfs = get_fs();
96887 set_fs(get_ds());
96888- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
96889+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
96890 set_fs(oldfs);
96891 return res;
96892 }
96893@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
96894
96895 mm_segment_t oldfs = get_fs();
96896 set_fs(get_ds());
96897- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
96898+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
96899 set_fs(oldfs);
96900 return res;
96901 }
96902@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
96903
96904 mm_segment_t oldfs = get_fs();
96905 set_fs(get_ds());
96906- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
96907+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
96908 set_fs(oldfs);
96909 return res;
96910 }
96911diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
96912index fe3e9f7..4956990 100644
96913--- a/net/ipv4/ipip.c
96914+++ b/net/ipv4/ipip.c
96915@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
96916 static int ipip_net_id __read_mostly;
96917
96918 static int ipip_tunnel_init(struct net_device *dev);
96919-static struct rtnl_link_ops ipip_link_ops __read_mostly;
96920+static struct rtnl_link_ops ipip_link_ops;
96921
96922 static int ipip_err(struct sk_buff *skb, u32 info)
96923 {
96924@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
96925 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
96926 };
96927
96928-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
96929+static struct rtnl_link_ops ipip_link_ops = {
96930 .kind = "ipip",
96931 .maxtype = IFLA_IPTUN_MAX,
96932 .policy = ipip_policy,
96933diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
96934index 59da7cd..e318de1 100644
96935--- a/net/ipv4/netfilter/arp_tables.c
96936+++ b/net/ipv4/netfilter/arp_tables.c
96937@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
96938 #endif
96939
96940 static int get_info(struct net *net, void __user *user,
96941- const int *len, int compat)
96942+ int len, int compat)
96943 {
96944 char name[XT_TABLE_MAXNAMELEN];
96945 struct xt_table *t;
96946 int ret;
96947
96948- if (*len != sizeof(struct arpt_getinfo)) {
96949- duprintf("length %u != %Zu\n", *len,
96950+ if (len != sizeof(struct arpt_getinfo)) {
96951+ duprintf("length %u != %Zu\n", len,
96952 sizeof(struct arpt_getinfo));
96953 return -EINVAL;
96954 }
96955@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
96956 info.size = private->size;
96957 strcpy(info.name, name);
96958
96959- if (copy_to_user(user, &info, *len) != 0)
96960+ if (copy_to_user(user, &info, len) != 0)
96961 ret = -EFAULT;
96962 else
96963 ret = 0;
96964@@ -1688,7 +1688,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
96965
96966 switch (cmd) {
96967 case ARPT_SO_GET_INFO:
96968- ret = get_info(sock_net(sk), user, len, 1);
96969+ ret = get_info(sock_net(sk), user, *len, 1);
96970 break;
96971 case ARPT_SO_GET_ENTRIES:
96972 ret = compat_get_entries(sock_net(sk), user, len);
96973@@ -1733,7 +1733,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
96974
96975 switch (cmd) {
96976 case ARPT_SO_GET_INFO:
96977- ret = get_info(sock_net(sk), user, len, 0);
96978+ ret = get_info(sock_net(sk), user, *len, 0);
96979 break;
96980
96981 case ARPT_SO_GET_ENTRIES:
96982diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
96983index 718dfbd..cef4152 100644
96984--- a/net/ipv4/netfilter/ip_tables.c
96985+++ b/net/ipv4/netfilter/ip_tables.c
96986@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
96987 #endif
96988
96989 static int get_info(struct net *net, void __user *user,
96990- const int *len, int compat)
96991+ int len, int compat)
96992 {
96993 char name[XT_TABLE_MAXNAMELEN];
96994 struct xt_table *t;
96995 int ret;
96996
96997- if (*len != sizeof(struct ipt_getinfo)) {
96998- duprintf("length %u != %zu\n", *len,
96999+ if (len != sizeof(struct ipt_getinfo)) {
97000+ duprintf("length %u != %zu\n", len,
97001 sizeof(struct ipt_getinfo));
97002 return -EINVAL;
97003 }
97004@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
97005 info.size = private->size;
97006 strcpy(info.name, name);
97007
97008- if (copy_to_user(user, &info, *len) != 0)
97009+ if (copy_to_user(user, &info, len) != 0)
97010 ret = -EFAULT;
97011 else
97012 ret = 0;
97013@@ -1971,7 +1971,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
97014
97015 switch (cmd) {
97016 case IPT_SO_GET_INFO:
97017- ret = get_info(sock_net(sk), user, len, 1);
97018+ ret = get_info(sock_net(sk), user, *len, 1);
97019 break;
97020 case IPT_SO_GET_ENTRIES:
97021 ret = compat_get_entries(sock_net(sk), user, len);
97022@@ -2018,7 +2018,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
97023
97024 switch (cmd) {
97025 case IPT_SO_GET_INFO:
97026- ret = get_info(sock_net(sk), user, len, 0);
97027+ ret = get_info(sock_net(sk), user, *len, 0);
97028 break;
97029
97030 case IPT_SO_GET_ENTRIES:
97031diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
97032index 242e7f4..a084e95 100644
97033--- a/net/ipv4/ping.c
97034+++ b/net/ipv4/ping.c
97035@@ -55,7 +55,7 @@
97036
97037
97038 struct ping_table ping_table;
97039-struct pingv6_ops pingv6_ops;
97040+struct pingv6_ops *pingv6_ops;
97041 EXPORT_SYMBOL_GPL(pingv6_ops);
97042
97043 static u16 ping_port_rover;
97044@@ -334,7 +334,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
97045 return -ENODEV;
97046 }
97047 }
97048- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
97049+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
97050 scoped);
97051 rcu_read_unlock();
97052
97053@@ -542,7 +542,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
97054 }
97055 #if IS_ENABLED(CONFIG_IPV6)
97056 } else if (skb->protocol == htons(ETH_P_IPV6)) {
97057- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
97058+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
97059 #endif
97060 }
97061
97062@@ -560,7 +560,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
97063 info, (u8 *)icmph);
97064 #if IS_ENABLED(CONFIG_IPV6)
97065 } else if (family == AF_INET6) {
97066- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
97067+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
97068 info, (u8 *)icmph);
97069 #endif
97070 }
97071@@ -830,6 +830,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97072 {
97073 struct inet_sock *isk = inet_sk(sk);
97074 int family = sk->sk_family;
97075+ struct sockaddr_in *sin;
97076+ struct sockaddr_in6 *sin6;
97077 struct sk_buff *skb;
97078 int copied, err;
97079
97080@@ -839,12 +841,19 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97081 if (flags & MSG_OOB)
97082 goto out;
97083
97084+ if (addr_len) {
97085+ if (family == AF_INET)
97086+ *addr_len = sizeof(*sin);
97087+ else if (family == AF_INET6 && addr_len)
97088+ *addr_len = sizeof(*sin6);
97089+ }
97090+
97091 if (flags & MSG_ERRQUEUE) {
97092 if (family == AF_INET) {
97093 return ip_recv_error(sk, msg, len, addr_len);
97094 #if IS_ENABLED(CONFIG_IPV6)
97095 } else if (family == AF_INET6) {
97096- return pingv6_ops.ipv6_recv_error(sk, msg, len,
97097+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
97098 addr_len);
97099 #endif
97100 }
97101@@ -876,7 +885,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97102 sin->sin_port = 0 /* skb->h.uh->source */;
97103 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
97104 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
97105- *addr_len = sizeof(*sin);
97106 }
97107
97108 if (isk->cmsg_flags)
97109@@ -899,11 +907,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97110 sin6->sin6_scope_id =
97111 ipv6_iface_scope_id(&sin6->sin6_addr,
97112 IP6CB(skb)->iif);
97113- *addr_len = sizeof(*sin6);
97114 }
97115
97116 if (inet6_sk(sk)->rxopt.all)
97117- pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
97118+ pingv6_ops->ip6_datagram_recv_ctl(sk, msg, skb);
97119 #endif
97120 } else {
97121 BUG();
97122@@ -1093,7 +1100,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
97123 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
97124 0, sock_i_ino(sp),
97125 atomic_read(&sp->sk_refcnt), sp,
97126- atomic_read(&sp->sk_drops));
97127+ atomic_read_unchecked(&sp->sk_drops));
97128 }
97129
97130 static int ping_v4_seq_show(struct seq_file *seq, void *v)
97131diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
97132index 23c3e5b..cdb8b36 100644
97133--- a/net/ipv4/raw.c
97134+++ b/net/ipv4/raw.c
97135@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
97136 int raw_rcv(struct sock *sk, struct sk_buff *skb)
97137 {
97138 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
97139- atomic_inc(&sk->sk_drops);
97140+ atomic_inc_unchecked(&sk->sk_drops);
97141 kfree_skb(skb);
97142 return NET_RX_DROP;
97143 }
97144@@ -696,6 +696,9 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97145 if (flags & MSG_OOB)
97146 goto out;
97147
97148+ if (addr_len)
97149+ *addr_len = sizeof(*sin);
97150+
97151 if (flags & MSG_ERRQUEUE) {
97152 err = ip_recv_error(sk, msg, len, addr_len);
97153 goto out;
97154@@ -723,7 +726,6 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97155 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
97156 sin->sin_port = 0;
97157 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
97158- *addr_len = sizeof(*sin);
97159 }
97160 if (inet->cmsg_flags)
97161 ip_cmsg_recv(msg, skb);
97162@@ -748,16 +750,20 @@ static int raw_init(struct sock *sk)
97163
97164 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
97165 {
97166+ struct icmp_filter filter;
97167+
97168 if (optlen > sizeof(struct icmp_filter))
97169 optlen = sizeof(struct icmp_filter);
97170- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
97171+ if (copy_from_user(&filter, optval, optlen))
97172 return -EFAULT;
97173+ raw_sk(sk)->filter = filter;
97174 return 0;
97175 }
97176
97177 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
97178 {
97179 int len, ret = -EFAULT;
97180+ struct icmp_filter filter;
97181
97182 if (get_user(len, optlen))
97183 goto out;
97184@@ -767,8 +773,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
97185 if (len > sizeof(struct icmp_filter))
97186 len = sizeof(struct icmp_filter);
97187 ret = -EFAULT;
97188- if (put_user(len, optlen) ||
97189- copy_to_user(optval, &raw_sk(sk)->filter, len))
97190+ filter = raw_sk(sk)->filter;
97191+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
97192 goto out;
97193 ret = 0;
97194 out: return ret;
97195@@ -997,7 +1003,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
97196 0, 0L, 0,
97197 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
97198 0, sock_i_ino(sp),
97199- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
97200+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
97201 }
97202
97203 static int raw_seq_show(struct seq_file *seq, void *v)
97204diff --git a/net/ipv4/route.c b/net/ipv4/route.c
97205index f8da282..133a1c7 100644
97206--- a/net/ipv4/route.c
97207+++ b/net/ipv4/route.c
97208@@ -2621,34 +2621,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
97209 .maxlen = sizeof(int),
97210 .mode = 0200,
97211 .proc_handler = ipv4_sysctl_rtcache_flush,
97212+ .extra1 = &init_net,
97213 },
97214 { },
97215 };
97216
97217 static __net_init int sysctl_route_net_init(struct net *net)
97218 {
97219- struct ctl_table *tbl;
97220+ ctl_table_no_const *tbl = NULL;
97221
97222- tbl = ipv4_route_flush_table;
97223 if (!net_eq(net, &init_net)) {
97224- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
97225+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
97226 if (tbl == NULL)
97227 goto err_dup;
97228
97229 /* Don't export sysctls to unprivileged users */
97230 if (net->user_ns != &init_user_ns)
97231 tbl[0].procname = NULL;
97232- }
97233- tbl[0].extra1 = net;
97234+ tbl[0].extra1 = net;
97235+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
97236+ } else
97237+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
97238
97239- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
97240 if (net->ipv4.route_hdr == NULL)
97241 goto err_reg;
97242 return 0;
97243
97244 err_reg:
97245- if (tbl != ipv4_route_flush_table)
97246- kfree(tbl);
97247+ kfree(tbl);
97248 err_dup:
97249 return -ENOMEM;
97250 }
97251@@ -2671,8 +2671,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
97252
97253 static __net_init int rt_genid_init(struct net *net)
97254 {
97255- atomic_set(&net->ipv4.rt_genid, 0);
97256- atomic_set(&net->fnhe_genid, 0);
97257+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
97258+ atomic_set_unchecked(&net->fnhe_genid, 0);
97259 get_random_bytes(&net->ipv4.dev_addr_genid,
97260 sizeof(net->ipv4.dev_addr_genid));
97261 return 0;
97262diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
97263index 3d69ec8..57207b4 100644
97264--- a/net/ipv4/sysctl_net_ipv4.c
97265+++ b/net/ipv4/sysctl_net_ipv4.c
97266@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
97267 container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
97268 int ret;
97269 int range[2];
97270- struct ctl_table tmp = {
97271+ ctl_table_no_const tmp = {
97272 .data = &range,
97273 .maxlen = sizeof(range),
97274 .mode = table->mode,
97275@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
97276 int ret;
97277 gid_t urange[2];
97278 kgid_t low, high;
97279- struct ctl_table tmp = {
97280+ ctl_table_no_const tmp = {
97281 .data = &urange,
97282 .maxlen = sizeof(urange),
97283 .mode = table->mode,
97284@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
97285 void __user *buffer, size_t *lenp, loff_t *ppos)
97286 {
97287 char val[TCP_CA_NAME_MAX];
97288- struct ctl_table tbl = {
97289+ ctl_table_no_const tbl = {
97290 .data = val,
97291 .maxlen = TCP_CA_NAME_MAX,
97292 };
97293@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
97294 void __user *buffer, size_t *lenp,
97295 loff_t *ppos)
97296 {
97297- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
97298+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
97299 int ret;
97300
97301 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
97302@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
97303 void __user *buffer, size_t *lenp,
97304 loff_t *ppos)
97305 {
97306- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
97307+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
97308 int ret;
97309
97310 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
97311@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
97312 void __user *buffer, size_t *lenp,
97313 loff_t *ppos)
97314 {
97315- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
97316+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
97317 struct tcp_fastopen_context *ctxt;
97318 int ret;
97319 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
97320@@ -445,7 +445,7 @@ static struct ctl_table ipv4_table[] = {
97321 },
97322 {
97323 .procname = "ip_local_reserved_ports",
97324- .data = NULL, /* initialized in sysctl_ipv4_init */
97325+ .data = sysctl_local_reserved_ports,
97326 .maxlen = 65536,
97327 .mode = 0644,
97328 .proc_handler = proc_do_large_bitmap,
97329@@ -827,13 +827,12 @@ static struct ctl_table ipv4_net_table[] = {
97330
97331 static __net_init int ipv4_sysctl_init_net(struct net *net)
97332 {
97333- struct ctl_table *table;
97334+ ctl_table_no_const *table = NULL;
97335
97336- table = ipv4_net_table;
97337 if (!net_eq(net, &init_net)) {
97338 int i;
97339
97340- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
97341+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
97342 if (table == NULL)
97343 goto err_alloc;
97344
97345@@ -856,15 +855,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
97346 net->ipv4.sysctl_local_ports.range[0] = 32768;
97347 net->ipv4.sysctl_local_ports.range[1] = 61000;
97348
97349- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
97350+ if (!net_eq(net, &init_net))
97351+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
97352+ else
97353+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
97354 if (net->ipv4.ipv4_hdr == NULL)
97355 goto err_reg;
97356
97357 return 0;
97358
97359 err_reg:
97360- if (!net_eq(net, &init_net))
97361- kfree(table);
97362+ kfree(table);
97363 err_alloc:
97364 return -ENOMEM;
97365 }
97366@@ -886,16 +887,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
97367 static __init int sysctl_ipv4_init(void)
97368 {
97369 struct ctl_table_header *hdr;
97370- struct ctl_table *i;
97371-
97372- for (i = ipv4_table; i->procname; i++) {
97373- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
97374- i->data = sysctl_local_reserved_ports;
97375- break;
97376- }
97377- }
97378- if (!i->procname)
97379- return -EINVAL;
97380
97381 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
97382 if (hdr == NULL)
97383diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
97384index c53b7f3..a89aadd 100644
97385--- a/net/ipv4/tcp_input.c
97386+++ b/net/ipv4/tcp_input.c
97387@@ -759,7 +759,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
97388 * without any lock. We want to make sure compiler wont store
97389 * intermediate values in this location.
97390 */
97391- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
97392+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
97393 sk->sk_max_pacing_rate);
97394 }
97395
97396@@ -4482,7 +4482,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
97397 * simplifies code)
97398 */
97399 static void
97400-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
97401+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
97402 struct sk_buff *head, struct sk_buff *tail,
97403 u32 start, u32 end)
97404 {
97405@@ -5559,6 +5559,7 @@ discard:
97406 tcp_paws_reject(&tp->rx_opt, 0))
97407 goto discard_and_undo;
97408
97409+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
97410 if (th->syn) {
97411 /* We see SYN without ACK. It is attempt of
97412 * simultaneous connect with crossed SYNs.
97413@@ -5609,6 +5610,7 @@ discard:
97414 goto discard;
97415 #endif
97416 }
97417+#endif
97418 /* "fifth, if neither of the SYN or RST bits is set then
97419 * drop the segment and return."
97420 */
97421@@ -5655,7 +5657,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
97422 goto discard;
97423
97424 if (th->syn) {
97425- if (th->fin)
97426+ if (th->fin || th->urg || th->psh)
97427 goto discard;
97428 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
97429 return 1;
97430diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
97431index 0672139..cacc17d 100644
97432--- a/net/ipv4/tcp_ipv4.c
97433+++ b/net/ipv4/tcp_ipv4.c
97434@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
97435 EXPORT_SYMBOL(sysctl_tcp_low_latency);
97436
97437
97438+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97439+extern int grsec_enable_blackhole;
97440+#endif
97441+
97442 #ifdef CONFIG_TCP_MD5SIG
97443 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
97444 __be32 daddr, __be32 saddr, const struct tcphdr *th);
97445@@ -1830,6 +1834,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
97446 return 0;
97447
97448 reset:
97449+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97450+ if (!grsec_enable_blackhole)
97451+#endif
97452 tcp_v4_send_reset(rsk, skb);
97453 discard:
97454 kfree_skb(skb);
97455@@ -1975,12 +1982,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
97456 TCP_SKB_CB(skb)->sacked = 0;
97457
97458 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
97459- if (!sk)
97460+ if (!sk) {
97461+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97462+ ret = 1;
97463+#endif
97464 goto no_tcp_socket;
97465-
97466+ }
97467 process:
97468- if (sk->sk_state == TCP_TIME_WAIT)
97469+ if (sk->sk_state == TCP_TIME_WAIT) {
97470+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97471+ ret = 2;
97472+#endif
97473 goto do_time_wait;
97474+ }
97475
97476 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
97477 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
97478@@ -2034,6 +2048,10 @@ csum_error:
97479 bad_packet:
97480 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
97481 } else {
97482+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97483+ if (!grsec_enable_blackhole || (ret == 1 &&
97484+ (skb->dev->flags & IFF_LOOPBACK)))
97485+#endif
97486 tcp_v4_send_reset(NULL, skb);
97487 }
97488
97489diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
97490index 97b6841..0893357 100644
97491--- a/net/ipv4/tcp_minisocks.c
97492+++ b/net/ipv4/tcp_minisocks.c
97493@@ -27,6 +27,10 @@
97494 #include <net/inet_common.h>
97495 #include <net/xfrm.h>
97496
97497+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97498+extern int grsec_enable_blackhole;
97499+#endif
97500+
97501 int sysctl_tcp_syncookies __read_mostly = 1;
97502 EXPORT_SYMBOL(sysctl_tcp_syncookies);
97503
97504@@ -708,7 +712,10 @@ embryonic_reset:
97505 * avoid becoming vulnerable to outside attack aiming at
97506 * resetting legit local connections.
97507 */
97508- req->rsk_ops->send_reset(sk, skb);
97509+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97510+ if (!grsec_enable_blackhole)
97511+#endif
97512+ req->rsk_ops->send_reset(sk, skb);
97513 } else if (fastopen) { /* received a valid RST pkt */
97514 reqsk_fastopen_remove(sk, req, true);
97515 tcp_reset(sk);
97516diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
97517index 8b97d71..9d7ccf5 100644
97518--- a/net/ipv4/tcp_probe.c
97519+++ b/net/ipv4/tcp_probe.c
97520@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
97521 if (cnt + width >= len)
97522 break;
97523
97524- if (copy_to_user(buf + cnt, tbuf, width))
97525+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
97526 return -EFAULT;
97527 cnt += width;
97528 }
97529diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
97530index 64f0354..a81b39d 100644
97531--- a/net/ipv4/tcp_timer.c
97532+++ b/net/ipv4/tcp_timer.c
97533@@ -22,6 +22,10 @@
97534 #include <linux/gfp.h>
97535 #include <net/tcp.h>
97536
97537+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97538+extern int grsec_lastack_retries;
97539+#endif
97540+
97541 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
97542 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
97543 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
97544@@ -189,6 +193,13 @@ static int tcp_write_timeout(struct sock *sk)
97545 }
97546 }
97547
97548+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97549+ if ((sk->sk_state == TCP_LAST_ACK) &&
97550+ (grsec_lastack_retries > 0) &&
97551+ (grsec_lastack_retries < retry_until))
97552+ retry_until = grsec_lastack_retries;
97553+#endif
97554+
97555 if (retransmits_timed_out(sk, retry_until,
97556 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
97557 /* Has it gone just too far? */
97558diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
97559index a7e4729..2758946 100644
97560--- a/net/ipv4/udp.c
97561+++ b/net/ipv4/udp.c
97562@@ -87,6 +87,7 @@
97563 #include <linux/types.h>
97564 #include <linux/fcntl.h>
97565 #include <linux/module.h>
97566+#include <linux/security.h>
97567 #include <linux/socket.h>
97568 #include <linux/sockios.h>
97569 #include <linux/igmp.h>
97570@@ -113,6 +114,10 @@
97571 #include <net/busy_poll.h>
97572 #include "udp_impl.h"
97573
97574+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97575+extern int grsec_enable_blackhole;
97576+#endif
97577+
97578 struct udp_table udp_table __read_mostly;
97579 EXPORT_SYMBOL(udp_table);
97580
97581@@ -615,6 +620,9 @@ found:
97582 return s;
97583 }
97584
97585+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
97586+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
97587+
97588 /*
97589 * This routine is called by the ICMP module when it gets some
97590 * sort of error condition. If err < 0 then the socket should
97591@@ -914,9 +922,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97592 dport = usin->sin_port;
97593 if (dport == 0)
97594 return -EINVAL;
97595+
97596+ err = gr_search_udp_sendmsg(sk, usin);
97597+ if (err)
97598+ return err;
97599 } else {
97600 if (sk->sk_state != TCP_ESTABLISHED)
97601 return -EDESTADDRREQ;
97602+
97603+ err = gr_search_udp_sendmsg(sk, NULL);
97604+ if (err)
97605+ return err;
97606+
97607 daddr = inet->inet_daddr;
97608 dport = inet->inet_dport;
97609 /* Open fast path for connected socket.
97610@@ -1163,7 +1180,7 @@ static unsigned int first_packet_length(struct sock *sk)
97611 IS_UDPLITE(sk));
97612 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
97613 IS_UDPLITE(sk));
97614- atomic_inc(&sk->sk_drops);
97615+ atomic_inc_unchecked(&sk->sk_drops);
97616 __skb_unlink(skb, rcvq);
97617 __skb_queue_tail(&list_kill, skb);
97618 }
97619@@ -1234,6 +1251,12 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97620 int is_udplite = IS_UDPLITE(sk);
97621 bool slow;
97622
97623+ /*
97624+ * Check any passed addresses
97625+ */
97626+ if (addr_len)
97627+ *addr_len = sizeof(*sin);
97628+
97629 if (flags & MSG_ERRQUEUE)
97630 return ip_recv_error(sk, msg, len, addr_len);
97631
97632@@ -1243,6 +1266,10 @@ try_again:
97633 if (!skb)
97634 goto out;
97635
97636+ err = gr_search_udp_recvmsg(sk, skb);
97637+ if (err)
97638+ goto out_free;
97639+
97640 ulen = skb->len - sizeof(struct udphdr);
97641 copied = len;
97642 if (copied > ulen)
97643@@ -1276,7 +1303,7 @@ try_again:
97644 if (unlikely(err)) {
97645 trace_kfree_skb(skb, udp_recvmsg);
97646 if (!peeked) {
97647- atomic_inc(&sk->sk_drops);
97648+ atomic_inc_unchecked(&sk->sk_drops);
97649 UDP_INC_STATS_USER(sock_net(sk),
97650 UDP_MIB_INERRORS, is_udplite);
97651 }
97652@@ -1295,7 +1322,6 @@ try_again:
97653 sin->sin_port = udp_hdr(skb)->source;
97654 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
97655 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
97656- *addr_len = sizeof(*sin);
97657 }
97658 if (inet->cmsg_flags)
97659 ip_cmsg_recv(msg, skb);
97660@@ -1566,7 +1592,7 @@ csum_error:
97661 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
97662 drop:
97663 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
97664- atomic_inc(&sk->sk_drops);
97665+ atomic_inc_unchecked(&sk->sk_drops);
97666 kfree_skb(skb);
97667 return -1;
97668 }
97669@@ -1585,7 +1611,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
97670 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
97671
97672 if (!skb1) {
97673- atomic_inc(&sk->sk_drops);
97674+ atomic_inc_unchecked(&sk->sk_drops);
97675 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
97676 IS_UDPLITE(sk));
97677 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
97678@@ -1786,6 +1812,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
97679 goto csum_error;
97680
97681 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
97682+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
97683+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
97684+#endif
97685 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
97686
97687 /*
97688@@ -2350,7 +2379,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
97689 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
97690 0, sock_i_ino(sp),
97691 atomic_read(&sp->sk_refcnt), sp,
97692- atomic_read(&sp->sk_drops));
97693+ atomic_read_unchecked(&sp->sk_drops));
97694 }
97695
97696 int udp4_seq_show(struct seq_file *seq, void *v)
97697diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
97698index e1a6393..f634ce5 100644
97699--- a/net/ipv4/xfrm4_policy.c
97700+++ b/net/ipv4/xfrm4_policy.c
97701@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
97702 fl4->flowi4_tos = iph->tos;
97703 }
97704
97705-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
97706+static int xfrm4_garbage_collect(struct dst_ops *ops)
97707 {
97708 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
97709
97710- xfrm4_policy_afinfo.garbage_collect(net);
97711+ xfrm_garbage_collect_deferred(net);
97712 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
97713 }
97714
97715@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
97716
97717 static int __net_init xfrm4_net_init(struct net *net)
97718 {
97719- struct ctl_table *table;
97720+ ctl_table_no_const *table = NULL;
97721 struct ctl_table_header *hdr;
97722
97723- table = xfrm4_policy_table;
97724 if (!net_eq(net, &init_net)) {
97725- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
97726+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
97727 if (!table)
97728 goto err_alloc;
97729
97730 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
97731- }
97732-
97733- hdr = register_net_sysctl(net, "net/ipv4", table);
97734+ hdr = register_net_sysctl(net, "net/ipv4", table);
97735+ } else
97736+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
97737 if (!hdr)
97738 goto err_reg;
97739
97740@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
97741 return 0;
97742
97743 err_reg:
97744- if (!net_eq(net, &init_net))
97745- kfree(table);
97746+ kfree(table);
97747 err_alloc:
97748 return -ENOMEM;
97749 }
97750diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
97751index 4b6b720..272c0c5 100644
97752--- a/net/ipv6/addrconf.c
97753+++ b/net/ipv6/addrconf.c
97754@@ -589,7 +589,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
97755 idx = 0;
97756 head = &net->dev_index_head[h];
97757 rcu_read_lock();
97758- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
97759+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
97760 net->dev_base_seq;
97761 hlist_for_each_entry_rcu(dev, head, index_hlist) {
97762 if (idx < s_idx)
97763@@ -2334,7 +2334,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
97764 p.iph.ihl = 5;
97765 p.iph.protocol = IPPROTO_IPV6;
97766 p.iph.ttl = 64;
97767- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
97768+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
97769
97770 if (ops->ndo_do_ioctl) {
97771 mm_segment_t oldfs = get_fs();
97772@@ -3962,7 +3962,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
97773 s_ip_idx = ip_idx = cb->args[2];
97774
97775 rcu_read_lock();
97776- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
97777+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
97778 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
97779 idx = 0;
97780 head = &net->dev_index_head[h];
97781@@ -4569,7 +4569,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
97782 dst_free(&ifp->rt->dst);
97783 break;
97784 }
97785- atomic_inc(&net->ipv6.dev_addr_genid);
97786+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
97787 rt_genid_bump_ipv6(net);
97788 }
97789
97790@@ -4590,7 +4590,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
97791 int *valp = ctl->data;
97792 int val = *valp;
97793 loff_t pos = *ppos;
97794- struct ctl_table lctl;
97795+ ctl_table_no_const lctl;
97796 int ret;
97797
97798 /*
97799@@ -4675,7 +4675,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
97800 int *valp = ctl->data;
97801 int val = *valp;
97802 loff_t pos = *ppos;
97803- struct ctl_table lctl;
97804+ ctl_table_no_const lctl;
97805 int ret;
97806
97807 /*
97808diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
97809index 4fbdb70..f6411f2 100644
97810--- a/net/ipv6/af_inet6.c
97811+++ b/net/ipv6/af_inet6.c
97812@@ -776,7 +776,7 @@ static int __net_init inet6_net_init(struct net *net)
97813
97814 net->ipv6.sysctl.bindv6only = 0;
97815 net->ipv6.sysctl.icmpv6_time = 1*HZ;
97816- atomic_set(&net->ipv6.rt_genid, 0);
97817+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
97818
97819 err = ipv6_init_mibs(net);
97820 if (err)
97821diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
97822index 93b1aa3..e902855 100644
97823--- a/net/ipv6/datagram.c
97824+++ b/net/ipv6/datagram.c
97825@@ -906,5 +906,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
97826 0,
97827 sock_i_ino(sp),
97828 atomic_read(&sp->sk_refcnt), sp,
97829- atomic_read(&sp->sk_drops));
97830+ atomic_read_unchecked(&sp->sk_drops));
97831 }
97832diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
97833index eef8d94..cfa1852 100644
97834--- a/net/ipv6/icmp.c
97835+++ b/net/ipv6/icmp.c
97836@@ -997,7 +997,7 @@ struct ctl_table ipv6_icmp_table_template[] = {
97837
97838 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
97839 {
97840- struct ctl_table *table;
97841+ ctl_table_no_const *table;
97842
97843 table = kmemdup(ipv6_icmp_table_template,
97844 sizeof(ipv6_icmp_table_template),
97845diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
97846index 8acb286..840dd06 100644
97847--- a/net/ipv6/ip6_gre.c
97848+++ b/net/ipv6/ip6_gre.c
97849@@ -74,7 +74,7 @@ struct ip6gre_net {
97850 struct net_device *fb_tunnel_dev;
97851 };
97852
97853-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
97854+static struct rtnl_link_ops ip6gre_link_ops;
97855 static int ip6gre_tunnel_init(struct net_device *dev);
97856 static void ip6gre_tunnel_setup(struct net_device *dev);
97857 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
97858@@ -1294,7 +1294,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
97859 }
97860
97861
97862-static struct inet6_protocol ip6gre_protocol __read_mostly = {
97863+static struct inet6_protocol ip6gre_protocol = {
97864 .handler = ip6gre_rcv,
97865 .err_handler = ip6gre_err,
97866 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
97867@@ -1637,7 +1637,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
97868 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
97869 };
97870
97871-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
97872+static struct rtnl_link_ops ip6gre_link_ops = {
97873 .kind = "ip6gre",
97874 .maxtype = IFLA_GRE_MAX,
97875 .policy = ip6gre_policy,
97876@@ -1650,7 +1650,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
97877 .fill_info = ip6gre_fill_info,
97878 };
97879
97880-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
97881+static struct rtnl_link_ops ip6gre_tap_ops = {
97882 .kind = "ip6gretap",
97883 .maxtype = IFLA_GRE_MAX,
97884 .policy = ip6gre_policy,
97885diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
97886index 7881965..9cf62c4 100644
97887--- a/net/ipv6/ip6_tunnel.c
97888+++ b/net/ipv6/ip6_tunnel.c
97889@@ -89,7 +89,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
97890
97891 static int ip6_tnl_dev_init(struct net_device *dev);
97892 static void ip6_tnl_dev_setup(struct net_device *dev);
97893-static struct rtnl_link_ops ip6_link_ops __read_mostly;
97894+static struct rtnl_link_ops ip6_link_ops;
97895
97896 static int ip6_tnl_net_id __read_mostly;
97897 struct ip6_tnl_net {
97898@@ -1717,7 +1717,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
97899 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
97900 };
97901
97902-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
97903+static struct rtnl_link_ops ip6_link_ops = {
97904 .kind = "ip6tnl",
97905 .maxtype = IFLA_IPTUN_MAX,
97906 .policy = ip6_tnl_policy,
97907diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
97908index 7b42d5e..1eff693 100644
97909--- a/net/ipv6/ip6_vti.c
97910+++ b/net/ipv6/ip6_vti.c
97911@@ -63,7 +63,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
97912
97913 static int vti6_dev_init(struct net_device *dev);
97914 static void vti6_dev_setup(struct net_device *dev);
97915-static struct rtnl_link_ops vti6_link_ops __read_mostly;
97916+static struct rtnl_link_ops vti6_link_ops;
97917
97918 static int vti6_net_id __read_mostly;
97919 struct vti6_net {
97920@@ -902,7 +902,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
97921 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
97922 };
97923
97924-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
97925+static struct rtnl_link_ops vti6_link_ops = {
97926 .kind = "vti6",
97927 .maxtype = IFLA_VTI_MAX,
97928 .policy = vti6_policy,
97929diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
97930index 1c6ce31..299e566 100644
97931--- a/net/ipv6/ipv6_sockglue.c
97932+++ b/net/ipv6/ipv6_sockglue.c
97933@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
97934 if (sk->sk_type != SOCK_STREAM)
97935 return -ENOPROTOOPT;
97936
97937- msg.msg_control = optval;
97938+ msg.msg_control = (void __force_kernel *)optval;
97939 msg.msg_controllen = len;
97940 msg.msg_flags = flags;
97941
97942diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
97943index 710238f..0fd1816 100644
97944--- a/net/ipv6/netfilter/ip6_tables.c
97945+++ b/net/ipv6/netfilter/ip6_tables.c
97946@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
97947 #endif
97948
97949 static int get_info(struct net *net, void __user *user,
97950- const int *len, int compat)
97951+ int len, int compat)
97952 {
97953 char name[XT_TABLE_MAXNAMELEN];
97954 struct xt_table *t;
97955 int ret;
97956
97957- if (*len != sizeof(struct ip6t_getinfo)) {
97958- duprintf("length %u != %zu\n", *len,
97959+ if (len != sizeof(struct ip6t_getinfo)) {
97960+ duprintf("length %u != %zu\n", len,
97961 sizeof(struct ip6t_getinfo));
97962 return -EINVAL;
97963 }
97964@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
97965 info.size = private->size;
97966 strcpy(info.name, name);
97967
97968- if (copy_to_user(user, &info, *len) != 0)
97969+ if (copy_to_user(user, &info, len) != 0)
97970 ret = -EFAULT;
97971 else
97972 ret = 0;
97973@@ -1981,7 +1981,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
97974
97975 switch (cmd) {
97976 case IP6T_SO_GET_INFO:
97977- ret = get_info(sock_net(sk), user, len, 1);
97978+ ret = get_info(sock_net(sk), user, *len, 1);
97979 break;
97980 case IP6T_SO_GET_ENTRIES:
97981 ret = compat_get_entries(sock_net(sk), user, len);
97982@@ -2028,7 +2028,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
97983
97984 switch (cmd) {
97985 case IP6T_SO_GET_INFO:
97986- ret = get_info(sock_net(sk), user, len, 0);
97987+ ret = get_info(sock_net(sk), user, *len, 0);
97988 break;
97989
97990 case IP6T_SO_GET_ENTRIES:
97991diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
97992index 767ab8d..c5ec70a 100644
97993--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
97994+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
97995@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
97996
97997 static int nf_ct_frag6_sysctl_register(struct net *net)
97998 {
97999- struct ctl_table *table;
98000+ ctl_table_no_const *table = NULL;
98001 struct ctl_table_header *hdr;
98002
98003- table = nf_ct_frag6_sysctl_table;
98004 if (!net_eq(net, &init_net)) {
98005- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
98006+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
98007 GFP_KERNEL);
98008 if (table == NULL)
98009 goto err_alloc;
98010@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
98011 table[0].data = &net->nf_frag.frags.timeout;
98012 table[1].data = &net->nf_frag.frags.low_thresh;
98013 table[2].data = &net->nf_frag.frags.high_thresh;
98014- }
98015-
98016- hdr = register_net_sysctl(net, "net/netfilter", table);
98017+ hdr = register_net_sysctl(net, "net/netfilter", table);
98018+ } else
98019+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
98020 if (hdr == NULL)
98021 goto err_reg;
98022
98023@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
98024 return 0;
98025
98026 err_reg:
98027- if (!net_eq(net, &init_net))
98028- kfree(table);
98029+ kfree(table);
98030 err_alloc:
98031 return -ENOMEM;
98032 }
98033diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
98034index 827f795..7e28e82 100644
98035--- a/net/ipv6/output_core.c
98036+++ b/net/ipv6/output_core.c
98037@@ -9,8 +9,8 @@
98038
98039 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
98040 {
98041- static atomic_t ipv6_fragmentation_id;
98042- int old, new;
98043+ static atomic_unchecked_t ipv6_fragmentation_id;
98044+ int id;
98045
98046 #if IS_ENABLED(CONFIG_IPV6)
98047 if (rt && !(rt->dst.flags & DST_NOPEER)) {
98048@@ -26,13 +26,10 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
98049 }
98050 }
98051 #endif
98052- do {
98053- old = atomic_read(&ipv6_fragmentation_id);
98054- new = old + 1;
98055- if (!new)
98056- new = 1;
98057- } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
98058- fhdr->identification = htonl(new);
98059+ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
98060+ if (!id)
98061+ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
98062+ fhdr->identification = htonl(id);
98063 }
98064 EXPORT_SYMBOL(ipv6_select_ident);
98065
98066diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
98067index a83243c..a1ca589 100644
98068--- a/net/ipv6/ping.c
98069+++ b/net/ipv6/ping.c
98070@@ -246,6 +246,22 @@ static struct pernet_operations ping_v6_net_ops = {
98071 };
98072 #endif
98073
98074+static struct pingv6_ops real_pingv6_ops = {
98075+ .ipv6_recv_error = ipv6_recv_error,
98076+ .ip6_datagram_recv_ctl = ip6_datagram_recv_ctl,
98077+ .icmpv6_err_convert = icmpv6_err_convert,
98078+ .ipv6_icmp_error = ipv6_icmp_error,
98079+ .ipv6_chk_addr = ipv6_chk_addr,
98080+};
98081+
98082+static struct pingv6_ops dummy_pingv6_ops = {
98083+ .ipv6_recv_error = dummy_ipv6_recv_error,
98084+ .ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl,
98085+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
98086+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
98087+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
98088+};
98089+
98090 int __init pingv6_init(void)
98091 {
98092 #ifdef CONFIG_PROC_FS
98093@@ -253,11 +269,7 @@ int __init pingv6_init(void)
98094 if (ret)
98095 return ret;
98096 #endif
98097- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
98098- pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl;
98099- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
98100- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
98101- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
98102+ pingv6_ops = &real_pingv6_ops;
98103 return inet6_register_protosw(&pingv6_protosw);
98104 }
98105
98106@@ -266,11 +278,7 @@ int __init pingv6_init(void)
98107 */
98108 void pingv6_exit(void)
98109 {
98110- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
98111- pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl;
98112- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
98113- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
98114- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
98115+ pingv6_ops = &dummy_pingv6_ops;
98116 #ifdef CONFIG_PROC_FS
98117 unregister_pernet_subsys(&ping_v6_net_ops);
98118 #endif
98119diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
98120index b6bb87e..06cc9ed 100644
98121--- a/net/ipv6/raw.c
98122+++ b/net/ipv6/raw.c
98123@@ -384,7 +384,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
98124 {
98125 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
98126 skb_checksum_complete(skb)) {
98127- atomic_inc(&sk->sk_drops);
98128+ atomic_inc_unchecked(&sk->sk_drops);
98129 kfree_skb(skb);
98130 return NET_RX_DROP;
98131 }
98132@@ -412,7 +412,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
98133 struct raw6_sock *rp = raw6_sk(sk);
98134
98135 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
98136- atomic_inc(&sk->sk_drops);
98137+ atomic_inc_unchecked(&sk->sk_drops);
98138 kfree_skb(skb);
98139 return NET_RX_DROP;
98140 }
98141@@ -436,7 +436,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
98142
98143 if (inet->hdrincl) {
98144 if (skb_checksum_complete(skb)) {
98145- atomic_inc(&sk->sk_drops);
98146+ atomic_inc_unchecked(&sk->sk_drops);
98147 kfree_skb(skb);
98148 return NET_RX_DROP;
98149 }
98150@@ -465,6 +465,9 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
98151 if (flags & MSG_OOB)
98152 return -EOPNOTSUPP;
98153
98154+ if (addr_len)
98155+ *addr_len=sizeof(*sin6);
98156+
98157 if (flags & MSG_ERRQUEUE)
98158 return ipv6_recv_error(sk, msg, len, addr_len);
98159
98160@@ -503,7 +506,6 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
98161 sin6->sin6_flowinfo = 0;
98162 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
98163 IP6CB(skb)->iif);
98164- *addr_len = sizeof(*sin6);
98165 }
98166
98167 sock_recv_ts_and_drops(msg, sk, skb);
98168@@ -606,7 +608,7 @@ out:
98169 return err;
98170 }
98171
98172-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
98173+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
98174 struct flowi6 *fl6, struct dst_entry **dstp,
98175 unsigned int flags)
98176 {
98177@@ -918,12 +920,15 @@ do_confirm:
98178 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
98179 char __user *optval, int optlen)
98180 {
98181+ struct icmp6_filter filter;
98182+
98183 switch (optname) {
98184 case ICMPV6_FILTER:
98185 if (optlen > sizeof(struct icmp6_filter))
98186 optlen = sizeof(struct icmp6_filter);
98187- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
98188+ if (copy_from_user(&filter, optval, optlen))
98189 return -EFAULT;
98190+ raw6_sk(sk)->filter = filter;
98191 return 0;
98192 default:
98193 return -ENOPROTOOPT;
98194@@ -936,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
98195 char __user *optval, int __user *optlen)
98196 {
98197 int len;
98198+ struct icmp6_filter filter;
98199
98200 switch (optname) {
98201 case ICMPV6_FILTER:
98202@@ -947,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
98203 len = sizeof(struct icmp6_filter);
98204 if (put_user(len, optlen))
98205 return -EFAULT;
98206- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
98207+ filter = raw6_sk(sk)->filter;
98208+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
98209 return -EFAULT;
98210 return 0;
98211 default:
98212diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
98213index cc85a9b..526a133 100644
98214--- a/net/ipv6/reassembly.c
98215+++ b/net/ipv6/reassembly.c
98216@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
98217
98218 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
98219 {
98220- struct ctl_table *table;
98221+ ctl_table_no_const *table = NULL;
98222 struct ctl_table_header *hdr;
98223
98224- table = ip6_frags_ns_ctl_table;
98225 if (!net_eq(net, &init_net)) {
98226- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
98227+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
98228 if (table == NULL)
98229 goto err_alloc;
98230
98231@@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
98232 /* Don't export sysctls to unprivileged users */
98233 if (net->user_ns != &init_user_ns)
98234 table[0].procname = NULL;
98235- }
98236+ hdr = register_net_sysctl(net, "net/ipv6", table);
98237+ } else
98238+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
98239
98240- hdr = register_net_sysctl(net, "net/ipv6", table);
98241 if (hdr == NULL)
98242 goto err_reg;
98243
98244@@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
98245 return 0;
98246
98247 err_reg:
98248- if (!net_eq(net, &init_net))
98249- kfree(table);
98250+ kfree(table);
98251 err_alloc:
98252 return -ENOMEM;
98253 }
98254diff --git a/net/ipv6/route.c b/net/ipv6/route.c
98255index 4b4944c..4580b91 100644
98256--- a/net/ipv6/route.c
98257+++ b/net/ipv6/route.c
98258@@ -2954,7 +2954,7 @@ struct ctl_table ipv6_route_table_template[] = {
98259
98260 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
98261 {
98262- struct ctl_table *table;
98263+ ctl_table_no_const *table;
98264
98265 table = kmemdup(ipv6_route_table_template,
98266 sizeof(ipv6_route_table_template),
98267diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
98268index d3005b3..b36df4a 100644
98269--- a/net/ipv6/sit.c
98270+++ b/net/ipv6/sit.c
98271@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
98272 static void ipip6_dev_free(struct net_device *dev);
98273 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
98274 __be32 *v4dst);
98275-static struct rtnl_link_ops sit_link_ops __read_mostly;
98276+static struct rtnl_link_ops sit_link_ops;
98277
98278 static int sit_net_id __read_mostly;
98279 struct sit_net {
98280@@ -1664,7 +1664,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
98281 unregister_netdevice_queue(dev, head);
98282 }
98283
98284-static struct rtnl_link_ops sit_link_ops __read_mostly = {
98285+static struct rtnl_link_ops sit_link_ops = {
98286 .kind = "sit",
98287 .maxtype = IFLA_IPTUN_MAX,
98288 .policy = ipip6_policy,
98289diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
98290index 107b2f1..72741a9 100644
98291--- a/net/ipv6/sysctl_net_ipv6.c
98292+++ b/net/ipv6/sysctl_net_ipv6.c
98293@@ -40,7 +40,7 @@ static struct ctl_table ipv6_rotable[] = {
98294
98295 static int __net_init ipv6_sysctl_net_init(struct net *net)
98296 {
98297- struct ctl_table *ipv6_table;
98298+ ctl_table_no_const *ipv6_table;
98299 struct ctl_table *ipv6_route_table;
98300 struct ctl_table *ipv6_icmp_table;
98301 int err;
98302diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
98303index f67033b..6f974fc 100644
98304--- a/net/ipv6/tcp_ipv6.c
98305+++ b/net/ipv6/tcp_ipv6.c
98306@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
98307 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
98308 }
98309
98310+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98311+extern int grsec_enable_blackhole;
98312+#endif
98313+
98314 static void tcp_v6_hash(struct sock *sk)
98315 {
98316 if (sk->sk_state != TCP_CLOSE) {
98317@@ -1397,6 +1401,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
98318 return 0;
98319
98320 reset:
98321+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98322+ if (!grsec_enable_blackhole)
98323+#endif
98324 tcp_v6_send_reset(sk, skb);
98325 discard:
98326 if (opt_skb)
98327@@ -1479,12 +1486,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
98328 TCP_SKB_CB(skb)->sacked = 0;
98329
98330 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
98331- if (!sk)
98332+ if (!sk) {
98333+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98334+ ret = 1;
98335+#endif
98336 goto no_tcp_socket;
98337+ }
98338
98339 process:
98340- if (sk->sk_state == TCP_TIME_WAIT)
98341+ if (sk->sk_state == TCP_TIME_WAIT) {
98342+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98343+ ret = 2;
98344+#endif
98345 goto do_time_wait;
98346+ }
98347
98348 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
98349 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
98350@@ -1536,6 +1551,10 @@ csum_error:
98351 bad_packet:
98352 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
98353 } else {
98354+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98355+ if (!grsec_enable_blackhole || (ret == 1 &&
98356+ (skb->dev->flags & IFF_LOOPBACK)))
98357+#endif
98358 tcp_v6_send_reset(NULL, skb);
98359 }
98360
98361diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
98362index 089c741..cfee117 100644
98363--- a/net/ipv6/udp.c
98364+++ b/net/ipv6/udp.c
98365@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
98366 udp_ipv6_hash_secret + net_hash_mix(net));
98367 }
98368
98369+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98370+extern int grsec_enable_blackhole;
98371+#endif
98372+
98373 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
98374 {
98375 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
98376@@ -392,6 +396,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
98377 int is_udp4;
98378 bool slow;
98379
98380+ if (addr_len)
98381+ *addr_len = sizeof(struct sockaddr_in6);
98382+
98383 if (flags & MSG_ERRQUEUE)
98384 return ipv6_recv_error(sk, msg, len, addr_len);
98385
98386@@ -435,7 +442,7 @@ try_again:
98387 if (unlikely(err)) {
98388 trace_kfree_skb(skb, udpv6_recvmsg);
98389 if (!peeked) {
98390- atomic_inc(&sk->sk_drops);
98391+ atomic_inc_unchecked(&sk->sk_drops);
98392 if (is_udp4)
98393 UDP_INC_STATS_USER(sock_net(sk),
98394 UDP_MIB_INERRORS,
98395@@ -477,7 +484,7 @@ try_again:
98396 ipv6_iface_scope_id(&sin6->sin6_addr,
98397 IP6CB(skb)->iif);
98398 }
98399- *addr_len = sizeof(*sin6);
98400+
98401 }
98402 if (is_udp4) {
98403 if (inet->cmsg_flags)
98404@@ -685,7 +692,7 @@ csum_error:
98405 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
98406 drop:
98407 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
98408- atomic_inc(&sk->sk_drops);
98409+ atomic_inc_unchecked(&sk->sk_drops);
98410 kfree_skb(skb);
98411 return -1;
98412 }
98413@@ -742,7 +749,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
98414 if (likely(skb1 == NULL))
98415 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
98416 if (!skb1) {
98417- atomic_inc(&sk->sk_drops);
98418+ atomic_inc_unchecked(&sk->sk_drops);
98419 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
98420 IS_UDPLITE(sk));
98421 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
98422@@ -881,6 +888,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
98423 goto csum_error;
98424
98425 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
98426+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98427+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
98428+#endif
98429 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
98430
98431 kfree_skb(skb);
98432diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
98433index 5f8e128..865d38e 100644
98434--- a/net/ipv6/xfrm6_policy.c
98435+++ b/net/ipv6/xfrm6_policy.c
98436@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
98437 }
98438 }
98439
98440-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
98441+static int xfrm6_garbage_collect(struct dst_ops *ops)
98442 {
98443 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
98444
98445- xfrm6_policy_afinfo.garbage_collect(net);
98446+ xfrm_garbage_collect_deferred(net);
98447 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
98448 }
98449
98450@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
98451
98452 static int __net_init xfrm6_net_init(struct net *net)
98453 {
98454- struct ctl_table *table;
98455+ ctl_table_no_const *table = NULL;
98456 struct ctl_table_header *hdr;
98457
98458- table = xfrm6_policy_table;
98459 if (!net_eq(net, &init_net)) {
98460- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
98461+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
98462 if (!table)
98463 goto err_alloc;
98464
98465 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
98466- }
98467+ hdr = register_net_sysctl(net, "net/ipv6", table);
98468+ } else
98469+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
98470
98471- hdr = register_net_sysctl(net, "net/ipv6", table);
98472 if (!hdr)
98473 goto err_reg;
98474
98475@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
98476 return 0;
98477
98478 err_reg:
98479- if (!net_eq(net, &init_net))
98480- kfree(table);
98481+ kfree(table);
98482 err_alloc:
98483 return -ENOMEM;
98484 }
98485diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
98486index 41ac7938..75e3bb1 100644
98487--- a/net/irda/ircomm/ircomm_tty.c
98488+++ b/net/irda/ircomm/ircomm_tty.c
98489@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
98490 add_wait_queue(&port->open_wait, &wait);
98491
98492 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
98493- __FILE__, __LINE__, tty->driver->name, port->count);
98494+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
98495
98496 spin_lock_irqsave(&port->lock, flags);
98497 if (!tty_hung_up_p(filp))
98498- port->count--;
98499+ atomic_dec(&port->count);
98500 port->blocked_open++;
98501 spin_unlock_irqrestore(&port->lock, flags);
98502
98503@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
98504 }
98505
98506 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
98507- __FILE__, __LINE__, tty->driver->name, port->count);
98508+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
98509
98510 schedule();
98511 }
98512@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
98513
98514 spin_lock_irqsave(&port->lock, flags);
98515 if (!tty_hung_up_p(filp))
98516- port->count++;
98517+ atomic_inc(&port->count);
98518 port->blocked_open--;
98519 spin_unlock_irqrestore(&port->lock, flags);
98520
98521 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
98522- __FILE__, __LINE__, tty->driver->name, port->count);
98523+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
98524
98525 if (!retval)
98526 port->flags |= ASYNC_NORMAL_ACTIVE;
98527@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
98528
98529 /* ++ is not atomic, so this should be protected - Jean II */
98530 spin_lock_irqsave(&self->port.lock, flags);
98531- self->port.count++;
98532+ atomic_inc(&self->port.count);
98533 spin_unlock_irqrestore(&self->port.lock, flags);
98534 tty_port_tty_set(&self->port, tty);
98535
98536 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
98537- self->line, self->port.count);
98538+ self->line, atomic_read(&self->port.count));
98539
98540 /* Not really used by us, but lets do it anyway */
98541 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
98542@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
98543 tty_kref_put(port->tty);
98544 }
98545 port->tty = NULL;
98546- port->count = 0;
98547+ atomic_set(&port->count, 0);
98548 spin_unlock_irqrestore(&port->lock, flags);
98549
98550 wake_up_interruptible(&port->open_wait);
98551@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
98552 seq_putc(m, '\n');
98553
98554 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
98555- seq_printf(m, "Open count: %d\n", self->port.count);
98556+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
98557 seq_printf(m, "Max data size: %d\n", self->max_data_size);
98558 seq_printf(m, "Max header size: %d\n", self->max_header_size);
98559
98560diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
98561index c4b7218..3e83259 100644
98562--- a/net/iucv/af_iucv.c
98563+++ b/net/iucv/af_iucv.c
98564@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
98565
98566 write_lock_bh(&iucv_sk_list.lock);
98567
98568- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
98569+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
98570 while (__iucv_get_sock_by_name(name)) {
98571 sprintf(name, "%08x",
98572- atomic_inc_return(&iucv_sk_list.autobind_name));
98573+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
98574 }
98575
98576 write_unlock_bh(&iucv_sk_list.lock);
98577diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
98578index cd5b8ec..f205e6b 100644
98579--- a/net/iucv/iucv.c
98580+++ b/net/iucv/iucv.c
98581@@ -690,7 +690,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
98582 return NOTIFY_OK;
98583 }
98584
98585-static struct notifier_block __refdata iucv_cpu_notifier = {
98586+static struct notifier_block iucv_cpu_notifier = {
98587 .notifier_call = iucv_cpu_notify,
98588 };
98589
98590diff --git a/net/key/af_key.c b/net/key/af_key.c
98591index 545f047..9757a9d 100644
98592--- a/net/key/af_key.c
98593+++ b/net/key/af_key.c
98594@@ -3041,10 +3041,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
98595 static u32 get_acqseq(void)
98596 {
98597 u32 res;
98598- static atomic_t acqseq;
98599+ static atomic_unchecked_t acqseq;
98600
98601 do {
98602- res = atomic_inc_return(&acqseq);
98603+ res = atomic_inc_return_unchecked(&acqseq);
98604 } while (!res);
98605 return res;
98606 }
98607diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
98608index da1a1ce..571db8d 100644
98609--- a/net/l2tp/l2tp_ip.c
98610+++ b/net/l2tp/l2tp_ip.c
98611@@ -518,6 +518,9 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
98612 if (flags & MSG_OOB)
98613 goto out;
98614
98615+ if (addr_len)
98616+ *addr_len = sizeof(*sin);
98617+
98618 skb = skb_recv_datagram(sk, flags, noblock, &err);
98619 if (!skb)
98620 goto out;
98621@@ -540,7 +543,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
98622 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
98623 sin->sin_port = 0;
98624 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
98625- *addr_len = sizeof(*sin);
98626 }
98627 if (inet->cmsg_flags)
98628 ip_cmsg_recv(msg, skb);
98629diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
98630index 364ce0c..3ebb5a4 100644
98631--- a/net/mac80211/cfg.c
98632+++ b/net/mac80211/cfg.c
98633@@ -826,7 +826,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
98634 ret = ieee80211_vif_use_channel(sdata, chandef,
98635 IEEE80211_CHANCTX_EXCLUSIVE);
98636 }
98637- } else if (local->open_count == local->monitors) {
98638+ } else if (local_read(&local->open_count) == local->monitors) {
98639 local->_oper_chandef = *chandef;
98640 ieee80211_hw_config(local, 0);
98641 }
98642@@ -3308,7 +3308,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
98643 else
98644 local->probe_req_reg--;
98645
98646- if (!local->open_count)
98647+ if (!local_read(&local->open_count))
98648 break;
98649
98650 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
98651@@ -3771,8 +3771,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
98652 if (chanctx_conf) {
98653 *chandef = chanctx_conf->def;
98654 ret = 0;
98655- } else if (local->open_count > 0 &&
98656- local->open_count == local->monitors &&
98657+ } else if (local_read(&local->open_count) > 0 &&
98658+ local_read(&local->open_count) == local->monitors &&
98659 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
98660 if (local->use_chanctx)
98661 *chandef = local->monitor_chandef;
98662diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
98663index 4aea4e7..9e698d1 100644
98664--- a/net/mac80211/ieee80211_i.h
98665+++ b/net/mac80211/ieee80211_i.h
98666@@ -28,6 +28,7 @@
98667 #include <net/ieee80211_radiotap.h>
98668 #include <net/cfg80211.h>
98669 #include <net/mac80211.h>
98670+#include <asm/local.h>
98671 #include "key.h"
98672 #include "sta_info.h"
98673 #include "debug.h"
98674@@ -961,7 +962,7 @@ struct ieee80211_local {
98675 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
98676 spinlock_t queue_stop_reason_lock;
98677
98678- int open_count;
98679+ local_t open_count;
98680 int monitors, cooked_mntrs;
98681 /* number of interfaces with corresponding FIF_ flags */
98682 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
98683diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
98684index a075791..1d0027f 100644
98685--- a/net/mac80211/iface.c
98686+++ b/net/mac80211/iface.c
98687@@ -519,7 +519,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
98688 break;
98689 }
98690
98691- if (local->open_count == 0) {
98692+ if (local_read(&local->open_count) == 0) {
98693 res = drv_start(local);
98694 if (res)
98695 goto err_del_bss;
98696@@ -566,7 +566,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
98697 res = drv_add_interface(local, sdata);
98698 if (res)
98699 goto err_stop;
98700- } else if (local->monitors == 0 && local->open_count == 0) {
98701+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
98702 res = ieee80211_add_virtual_monitor(local);
98703 if (res)
98704 goto err_stop;
98705@@ -675,7 +675,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
98706 atomic_inc(&local->iff_promiscs);
98707
98708 if (coming_up)
98709- local->open_count++;
98710+ local_inc(&local->open_count);
98711
98712 if (hw_reconf_flags)
98713 ieee80211_hw_config(local, hw_reconf_flags);
98714@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
98715 err_del_interface:
98716 drv_remove_interface(local, sdata);
98717 err_stop:
98718- if (!local->open_count)
98719+ if (!local_read(&local->open_count))
98720 drv_stop(local);
98721 err_del_bss:
98722 sdata->bss = NULL;
98723@@ -856,7 +856,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
98724 }
98725
98726 if (going_down)
98727- local->open_count--;
98728+ local_dec(&local->open_count);
98729
98730 switch (sdata->vif.type) {
98731 case NL80211_IFTYPE_AP_VLAN:
98732@@ -923,7 +923,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
98733 }
98734 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
98735
98736- if (local->open_count == 0)
98737+ if (local_read(&local->open_count) == 0)
98738 ieee80211_clear_tx_pending(local);
98739
98740 /*
98741@@ -963,7 +963,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
98742
98743 ieee80211_recalc_ps(local, -1);
98744
98745- if (local->open_count == 0) {
98746+ if (local_read(&local->open_count) == 0) {
98747 ieee80211_stop_device(local);
98748
98749 /* no reconfiguring after stop! */
98750@@ -974,7 +974,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
98751 ieee80211_configure_filter(local);
98752 ieee80211_hw_config(local, hw_reconf_flags);
98753
98754- if (local->monitors == local->open_count)
98755+ if (local->monitors == local_read(&local->open_count))
98756 ieee80211_add_virtual_monitor(local);
98757 }
98758
98759diff --git a/net/mac80211/main.c b/net/mac80211/main.c
98760index 7d1c3ac..b62dd29 100644
98761--- a/net/mac80211/main.c
98762+++ b/net/mac80211/main.c
98763@@ -172,7 +172,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
98764 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
98765 IEEE80211_CONF_CHANGE_POWER);
98766
98767- if (changed && local->open_count) {
98768+ if (changed && local_read(&local->open_count)) {
98769 ret = drv_config(local, changed);
98770 /*
98771 * Goal:
98772diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
98773index 3401262..d5cd68d 100644
98774--- a/net/mac80211/pm.c
98775+++ b/net/mac80211/pm.c
98776@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
98777 struct ieee80211_sub_if_data *sdata;
98778 struct sta_info *sta;
98779
98780- if (!local->open_count)
98781+ if (!local_read(&local->open_count))
98782 goto suspend;
98783
98784 ieee80211_scan_cancel(local);
98785@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
98786 cancel_work_sync(&local->dynamic_ps_enable_work);
98787 del_timer_sync(&local->dynamic_ps_timer);
98788
98789- local->wowlan = wowlan && local->open_count;
98790+ local->wowlan = wowlan && local_read(&local->open_count);
98791 if (local->wowlan) {
98792 int err = drv_suspend(local, wowlan);
98793 if (err < 0) {
98794@@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
98795 WARN_ON(!list_empty(&local->chanctx_list));
98796
98797 /* stop hardware - this must stop RX */
98798- if (local->open_count)
98799+ if (local_read(&local->open_count))
98800 ieee80211_stop_device(local);
98801
98802 suspend:
98803diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
98804index 22b223f..ab70070 100644
98805--- a/net/mac80211/rate.c
98806+++ b/net/mac80211/rate.c
98807@@ -734,7 +734,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
98808
98809 ASSERT_RTNL();
98810
98811- if (local->open_count)
98812+ if (local_read(&local->open_count))
98813 return -EBUSY;
98814
98815 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
98816diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
98817index 6ff1346..936ca9a 100644
98818--- a/net/mac80211/rc80211_pid_debugfs.c
98819+++ b/net/mac80211/rc80211_pid_debugfs.c
98820@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
98821
98822 spin_unlock_irqrestore(&events->lock, status);
98823
98824- if (copy_to_user(buf, pb, p))
98825+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
98826 return -EFAULT;
98827
98828 return p;
98829diff --git a/net/mac80211/util.c b/net/mac80211/util.c
98830index 9f9b9bd..d6fcf59 100644
98831--- a/net/mac80211/util.c
98832+++ b/net/mac80211/util.c
98833@@ -1474,7 +1474,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
98834 }
98835 #endif
98836 /* everything else happens only if HW was up & running */
98837- if (!local->open_count)
98838+ if (!local_read(&local->open_count))
98839 goto wake_up;
98840
98841 /*
98842@@ -1699,7 +1699,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
98843 local->in_reconfig = false;
98844 barrier();
98845
98846- if (local->monitors == local->open_count && local->monitors > 0)
98847+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
98848 ieee80211_add_virtual_monitor(local);
98849
98850 /*
98851diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
98852index c3398cd..98ad3b4 100644
98853--- a/net/netfilter/Kconfig
98854+++ b/net/netfilter/Kconfig
98855@@ -1002,6 +1002,16 @@ config NETFILTER_XT_MATCH_ESP
98856
98857 To compile it as a module, choose M here. If unsure, say N.
98858
98859+config NETFILTER_XT_MATCH_GRADM
98860+ tristate '"gradm" match support'
98861+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
98862+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
98863+ ---help---
98864+ The gradm match allows to match on grsecurity RBAC being enabled.
98865+ It is useful when iptables rules are applied early on bootup to
98866+ prevent connections to the machine (except from a trusted host)
98867+ while the RBAC system is disabled.
98868+
98869 config NETFILTER_XT_MATCH_HASHLIMIT
98870 tristate '"hashlimit" match support'
98871 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
98872diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
98873index 394483b..ed51f2d 100644
98874--- a/net/netfilter/Makefile
98875+++ b/net/netfilter/Makefile
98876@@ -130,6 +130,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
98877 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
98878 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
98879 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
98880+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
98881 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
98882 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
98883 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
98884diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
98885index bac7e01..1d7a31a 100644
98886--- a/net/netfilter/ipset/ip_set_core.c
98887+++ b/net/netfilter/ipset/ip_set_core.c
98888@@ -1950,7 +1950,7 @@ done:
98889 return ret;
98890 }
98891
98892-static struct nf_sockopt_ops so_set __read_mostly = {
98893+static struct nf_sockopt_ops so_set = {
98894 .pf = PF_INET,
98895 .get_optmin = SO_IP_SET,
98896 .get_optmax = SO_IP_SET + 1,
98897diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
98898index 4c8e5c0..5a79b4d 100644
98899--- a/net/netfilter/ipvs/ip_vs_conn.c
98900+++ b/net/netfilter/ipvs/ip_vs_conn.c
98901@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
98902 /* Increase the refcnt counter of the dest */
98903 ip_vs_dest_hold(dest);
98904
98905- conn_flags = atomic_read(&dest->conn_flags);
98906+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
98907 if (cp->protocol != IPPROTO_UDP)
98908 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
98909 flags = cp->flags;
98910@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
98911
98912 cp->control = NULL;
98913 atomic_set(&cp->n_control, 0);
98914- atomic_set(&cp->in_pkts, 0);
98915+ atomic_set_unchecked(&cp->in_pkts, 0);
98916
98917 cp->packet_xmit = NULL;
98918 cp->app = NULL;
98919@@ -1188,7 +1188,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
98920
98921 /* Don't drop the entry if its number of incoming packets is not
98922 located in [0, 8] */
98923- i = atomic_read(&cp->in_pkts);
98924+ i = atomic_read_unchecked(&cp->in_pkts);
98925 if (i > 8 || i < 0) return 0;
98926
98927 if (!todrop_rate[i]) return 0;
98928diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
98929index 4f26ee4..6a9d7c3 100644
98930--- a/net/netfilter/ipvs/ip_vs_core.c
98931+++ b/net/netfilter/ipvs/ip_vs_core.c
98932@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
98933 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
98934 /* do not touch skb anymore */
98935
98936- atomic_inc(&cp->in_pkts);
98937+ atomic_inc_unchecked(&cp->in_pkts);
98938 ip_vs_conn_put(cp);
98939 return ret;
98940 }
98941@@ -1706,7 +1706,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
98942 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
98943 pkts = sysctl_sync_threshold(ipvs);
98944 else
98945- pkts = atomic_add_return(1, &cp->in_pkts);
98946+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
98947
98948 if (ipvs->sync_state & IP_VS_STATE_MASTER)
98949 ip_vs_sync_conn(net, cp, pkts);
98950diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
98951index 35be035..dad174b 100644
98952--- a/net/netfilter/ipvs/ip_vs_ctl.c
98953+++ b/net/netfilter/ipvs/ip_vs_ctl.c
98954@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
98955 */
98956 ip_vs_rs_hash(ipvs, dest);
98957 }
98958- atomic_set(&dest->conn_flags, conn_flags);
98959+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
98960
98961 /* bind the service */
98962 old_svc = rcu_dereference_protected(dest->svc, 1);
98963@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
98964 * align with netns init in ip_vs_control_net_init()
98965 */
98966
98967-static struct ctl_table vs_vars[] = {
98968+static ctl_table_no_const vs_vars[] __read_only = {
98969 {
98970 .procname = "amemthresh",
98971 .maxlen = sizeof(int),
98972@@ -2075,7 +2075,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
98973 " %-7s %-6d %-10d %-10d\n",
98974 &dest->addr.in6,
98975 ntohs(dest->port),
98976- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
98977+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
98978 atomic_read(&dest->weight),
98979 atomic_read(&dest->activeconns),
98980 atomic_read(&dest->inactconns));
98981@@ -2086,7 +2086,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
98982 "%-7s %-6d %-10d %-10d\n",
98983 ntohl(dest->addr.ip),
98984 ntohs(dest->port),
98985- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
98986+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
98987 atomic_read(&dest->weight),
98988 atomic_read(&dest->activeconns),
98989 atomic_read(&dest->inactconns));
98990@@ -2564,7 +2564,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
98991
98992 entry.addr = dest->addr.ip;
98993 entry.port = dest->port;
98994- entry.conn_flags = atomic_read(&dest->conn_flags);
98995+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
98996 entry.weight = atomic_read(&dest->weight);
98997 entry.u_threshold = dest->u_threshold;
98998 entry.l_threshold = dest->l_threshold;
98999@@ -3107,7 +3107,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
99000 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
99001 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
99002 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
99003- (atomic_read(&dest->conn_flags) &
99004+ (atomic_read_unchecked(&dest->conn_flags) &
99005 IP_VS_CONN_F_FWD_MASK)) ||
99006 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
99007 atomic_read(&dest->weight)) ||
99008@@ -3697,7 +3697,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
99009 {
99010 int idx;
99011 struct netns_ipvs *ipvs = net_ipvs(net);
99012- struct ctl_table *tbl;
99013+ ctl_table_no_const *tbl;
99014
99015 atomic_set(&ipvs->dropentry, 0);
99016 spin_lock_init(&ipvs->dropentry_lock);
99017diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
99018index ca056a3..9cf01ef 100644
99019--- a/net/netfilter/ipvs/ip_vs_lblc.c
99020+++ b/net/netfilter/ipvs/ip_vs_lblc.c
99021@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
99022 * IPVS LBLC sysctl table
99023 */
99024 #ifdef CONFIG_SYSCTL
99025-static struct ctl_table vs_vars_table[] = {
99026+static ctl_table_no_const vs_vars_table[] __read_only = {
99027 {
99028 .procname = "lblc_expiration",
99029 .data = NULL,
99030diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
99031index 3f21a2f..a112e85 100644
99032--- a/net/netfilter/ipvs/ip_vs_lblcr.c
99033+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
99034@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
99035 * IPVS LBLCR sysctl table
99036 */
99037
99038-static struct ctl_table vs_vars_table[] = {
99039+static ctl_table_no_const vs_vars_table[] __read_only = {
99040 {
99041 .procname = "lblcr_expiration",
99042 .data = NULL,
99043diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
99044index f63c238..1b87f8a 100644
99045--- a/net/netfilter/ipvs/ip_vs_sync.c
99046+++ b/net/netfilter/ipvs/ip_vs_sync.c
99047@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
99048 cp = cp->control;
99049 if (cp) {
99050 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
99051- pkts = atomic_add_return(1, &cp->in_pkts);
99052+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
99053 else
99054 pkts = sysctl_sync_threshold(ipvs);
99055 ip_vs_sync_conn(net, cp->control, pkts);
99056@@ -771,7 +771,7 @@ control:
99057 if (!cp)
99058 return;
99059 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
99060- pkts = atomic_add_return(1, &cp->in_pkts);
99061+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
99062 else
99063 pkts = sysctl_sync_threshold(ipvs);
99064 goto sloop;
99065@@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
99066
99067 if (opt)
99068 memcpy(&cp->in_seq, opt, sizeof(*opt));
99069- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
99070+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
99071 cp->state = state;
99072 cp->old_state = cp->state;
99073 /*
99074diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
99075index c47444e..b0961c6 100644
99076--- a/net/netfilter/ipvs/ip_vs_xmit.c
99077+++ b/net/netfilter/ipvs/ip_vs_xmit.c
99078@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
99079 else
99080 rc = NF_ACCEPT;
99081 /* do not touch skb anymore */
99082- atomic_inc(&cp->in_pkts);
99083+ atomic_inc_unchecked(&cp->in_pkts);
99084 goto out;
99085 }
99086
99087@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
99088 else
99089 rc = NF_ACCEPT;
99090 /* do not touch skb anymore */
99091- atomic_inc(&cp->in_pkts);
99092+ atomic_inc_unchecked(&cp->in_pkts);
99093 goto out;
99094 }
99095
99096diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
99097index a4b5e2a..13b1de3 100644
99098--- a/net/netfilter/nf_conntrack_acct.c
99099+++ b/net/netfilter/nf_conntrack_acct.c
99100@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
99101 #ifdef CONFIG_SYSCTL
99102 static int nf_conntrack_acct_init_sysctl(struct net *net)
99103 {
99104- struct ctl_table *table;
99105+ ctl_table_no_const *table;
99106
99107 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
99108 GFP_KERNEL);
99109diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
99110index 43549eb..0bbeace 100644
99111--- a/net/netfilter/nf_conntrack_core.c
99112+++ b/net/netfilter/nf_conntrack_core.c
99113@@ -1605,6 +1605,10 @@ void nf_conntrack_init_end(void)
99114 #define DYING_NULLS_VAL ((1<<30)+1)
99115 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
99116
99117+#ifdef CONFIG_GRKERNSEC_HIDESYM
99118+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
99119+#endif
99120+
99121 int nf_conntrack_init_net(struct net *net)
99122 {
99123 int ret;
99124@@ -1619,7 +1623,11 @@ int nf_conntrack_init_net(struct net *net)
99125 goto err_stat;
99126 }
99127
99128+#ifdef CONFIG_GRKERNSEC_HIDESYM
99129+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
99130+#else
99131 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
99132+#endif
99133 if (!net->ct.slabname) {
99134 ret = -ENOMEM;
99135 goto err_slabname;
99136diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
99137index 1df1761..ce8b88a 100644
99138--- a/net/netfilter/nf_conntrack_ecache.c
99139+++ b/net/netfilter/nf_conntrack_ecache.c
99140@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
99141 #ifdef CONFIG_SYSCTL
99142 static int nf_conntrack_event_init_sysctl(struct net *net)
99143 {
99144- struct ctl_table *table;
99145+ ctl_table_no_const *table;
99146
99147 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
99148 GFP_KERNEL);
99149diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
99150index 974a2a4..52cc6ff 100644
99151--- a/net/netfilter/nf_conntrack_helper.c
99152+++ b/net/netfilter/nf_conntrack_helper.c
99153@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
99154
99155 static int nf_conntrack_helper_init_sysctl(struct net *net)
99156 {
99157- struct ctl_table *table;
99158+ ctl_table_no_const *table;
99159
99160 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
99161 GFP_KERNEL);
99162diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
99163index ce30041..3861b5d 100644
99164--- a/net/netfilter/nf_conntrack_proto.c
99165+++ b/net/netfilter/nf_conntrack_proto.c
99166@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
99167
99168 static void
99169 nf_ct_unregister_sysctl(struct ctl_table_header **header,
99170- struct ctl_table **table,
99171+ ctl_table_no_const **table,
99172 unsigned int users)
99173 {
99174 if (users > 0)
99175diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
99176index a99b6c3..cb372f9 100644
99177--- a/net/netfilter/nf_conntrack_proto_dccp.c
99178+++ b/net/netfilter/nf_conntrack_proto_dccp.c
99179@@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
99180 const char *msg;
99181 u_int8_t state;
99182
99183- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
99184+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
99185 BUG_ON(dh == NULL);
99186
99187 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
99188@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
99189 out_invalid:
99190 if (LOG_INVALID(net, IPPROTO_DCCP))
99191 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
99192- NULL, msg);
99193+ NULL, "%s", msg);
99194 return false;
99195 }
99196
99197@@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
99198 u_int8_t type, old_state, new_state;
99199 enum ct_dccp_roles role;
99200
99201- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
99202+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
99203 BUG_ON(dh == NULL);
99204 type = dh->dccph_type;
99205
99206@@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
99207 unsigned int cscov;
99208 const char *msg;
99209
99210- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
99211+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
99212 if (dh == NULL) {
99213 msg = "nf_ct_dccp: short packet ";
99214 goto out_invalid;
99215@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
99216
99217 out_invalid:
99218 if (LOG_INVALID(net, IPPROTO_DCCP))
99219- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
99220+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
99221 return -NF_ACCEPT;
99222 }
99223
99224diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
99225index f641751..d3c5b51 100644
99226--- a/net/netfilter/nf_conntrack_standalone.c
99227+++ b/net/netfilter/nf_conntrack_standalone.c
99228@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
99229
99230 static int nf_conntrack_standalone_init_sysctl(struct net *net)
99231 {
99232- struct ctl_table *table;
99233+ ctl_table_no_const *table;
99234
99235 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
99236 GFP_KERNEL);
99237diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
99238index 7a394df..bd91a8a 100644
99239--- a/net/netfilter/nf_conntrack_timestamp.c
99240+++ b/net/netfilter/nf_conntrack_timestamp.c
99241@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
99242 #ifdef CONFIG_SYSCTL
99243 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
99244 {
99245- struct ctl_table *table;
99246+ ctl_table_no_const *table;
99247
99248 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
99249 GFP_KERNEL);
99250diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
99251index 85296d4..8becdec 100644
99252--- a/net/netfilter/nf_log.c
99253+++ b/net/netfilter/nf_log.c
99254@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
99255
99256 #ifdef CONFIG_SYSCTL
99257 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
99258-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
99259+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
99260
99261 static int nf_log_proc_dostring(struct ctl_table *table, int write,
99262 void __user *buffer, size_t *lenp, loff_t *ppos)
99263@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
99264 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
99265 mutex_unlock(&nf_log_mutex);
99266 } else {
99267+ ctl_table_no_const nf_log_table = *table;
99268+
99269 mutex_lock(&nf_log_mutex);
99270 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
99271 lockdep_is_held(&nf_log_mutex));
99272 if (!logger)
99273- table->data = "NONE";
99274+ nf_log_table.data = "NONE";
99275 else
99276- table->data = logger->name;
99277- r = proc_dostring(table, write, buffer, lenp, ppos);
99278+ nf_log_table.data = logger->name;
99279+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
99280 mutex_unlock(&nf_log_mutex);
99281 }
99282
99283diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
99284index f042ae5..30ea486 100644
99285--- a/net/netfilter/nf_sockopt.c
99286+++ b/net/netfilter/nf_sockopt.c
99287@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
99288 }
99289 }
99290
99291- list_add(&reg->list, &nf_sockopts);
99292+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
99293 out:
99294 mutex_unlock(&nf_sockopt_mutex);
99295 return ret;
99296@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
99297 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
99298 {
99299 mutex_lock(&nf_sockopt_mutex);
99300- list_del(&reg->list);
99301+ pax_list_del((struct list_head *)&reg->list);
99302 mutex_unlock(&nf_sockopt_mutex);
99303 }
99304 EXPORT_SYMBOL(nf_unregister_sockopt);
99305diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
99306index a155d19..726b0f2 100644
99307--- a/net/netfilter/nfnetlink_log.c
99308+++ b/net/netfilter/nfnetlink_log.c
99309@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
99310 struct nfnl_log_net {
99311 spinlock_t instances_lock;
99312 struct hlist_head instance_table[INSTANCE_BUCKETS];
99313- atomic_t global_seq;
99314+ atomic_unchecked_t global_seq;
99315 };
99316
99317 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
99318@@ -564,7 +564,7 @@ __build_packet_message(struct nfnl_log_net *log,
99319 /* global sequence number */
99320 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
99321 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
99322- htonl(atomic_inc_return(&log->global_seq))))
99323+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
99324 goto nla_put_failure;
99325
99326 if (data_len) {
99327diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
99328index da0c1f4..f79737a 100644
99329--- a/net/netfilter/nft_compat.c
99330+++ b/net/netfilter/nft_compat.c
99331@@ -216,7 +216,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
99332 /* We want to reuse existing compat_to_user */
99333 old_fs = get_fs();
99334 set_fs(KERNEL_DS);
99335- t->compat_to_user(out, in);
99336+ t->compat_to_user((void __force_user *)out, in);
99337 set_fs(old_fs);
99338 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
99339 kfree(out);
99340@@ -403,7 +403,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
99341 /* We want to reuse existing compat_to_user */
99342 old_fs = get_fs();
99343 set_fs(KERNEL_DS);
99344- m->compat_to_user(out, in);
99345+ m->compat_to_user((void __force_user *)out, in);
99346 set_fs(old_fs);
99347 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
99348 kfree(out);
99349diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
99350new file mode 100644
99351index 0000000..c566332
99352--- /dev/null
99353+++ b/net/netfilter/xt_gradm.c
99354@@ -0,0 +1,51 @@
99355+/*
99356+ * gradm match for netfilter
99357