]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.0-3.13.7-201403252047.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.13.7-201403252047.patch
CommitLineData
48691cc4
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..e289b9b 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -92,32 +101,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -125,12 +142,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -145,14 +165,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -162,14 +182,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -185,6 +206,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -194,6 +217,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -203,7 +227,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -213,8 +242,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -224,6 +257,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -235,13 +269,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -249,9 +287,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
238index b9e9bd8..bf49b92 100644
239--- a/Documentation/kernel-parameters.txt
240+++ b/Documentation/kernel-parameters.txt
241@@ -1033,6 +1033,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
242 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
243 Default: 1024
244
245+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
246+ ignore grsecurity's /proc restrictions
247+
248+
249 hashdist= [KNL,NUMA] Large hashes allocated during boot
250 are distributed across NUMA nodes. Defaults on
251 for 64-bit NUMA, off otherwise.
252@@ -2018,6 +2022,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
253 noexec=on: enable non-executable mappings (default)
254 noexec=off: disable non-executable mappings
255
256+ nopcid [X86-64]
257+ Disable PCID (Process-Context IDentifier) even if it
258+ is supported by the processor.
259+
260 nosmap [X86]
261 Disable SMAP (Supervisor Mode Access Prevention)
262 even if it is supported by processor.
263@@ -2285,6 +2293,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
264 the specified number of seconds. This is to be used if
265 your oopses keep scrolling off the screen.
266
267+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
268+ virtualization environments that don't cope well with the
269+ expand down segment used by UDEREF on X86-32 or the frequent
270+ page table updates on X86-64.
271+
272+ pax_sanitize_slab=
273+ 0/1 to disable/enable slab object sanitization (enabled by
274+ default).
275+
276+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
277+
278+ pax_extra_latent_entropy
279+ Enable a very simple form of latent entropy extraction
280+ from the first 4GB of memory as the bootmem allocator
281+ passes the memory pages to the buddy allocator.
282+
283+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
284+ when the processor supports PCID.
285+
286 pcbit= [HW,ISDN]
287
288 pcd. [PARIDE]
289diff --git a/Makefile b/Makefile
290index 9f214b4..8c9c622 100644
291--- a/Makefile
292+++ b/Makefile
293@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
294
295 HOSTCC = gcc
296 HOSTCXX = g++
297-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
298-HOSTCXXFLAGS = -O2
299+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
300+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
301+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
302
303 # Decide whether to build built-in, modular, or both.
304 # Normally, just do built-in.
305@@ -311,9 +312,15 @@ endif
306 # If the user is running make -s (silent mode), suppress echoing of
307 # commands
308
309+ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
310+ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
311+ quiet=silent_
312+endif
313+else # make-3.8x
314 ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
315 quiet=silent_
316 endif
317+endif
318
319 export quiet Q KBUILD_VERBOSE
320
321@@ -417,8 +424,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
322 # Rules shared between *config targets and build targets
323
324 # Basic helpers built in scripts/
325-PHONY += scripts_basic
326-scripts_basic:
327+PHONY += scripts_basic gcc-plugins
328+scripts_basic: gcc-plugins
329 $(Q)$(MAKE) $(build)=scripts/basic
330 $(Q)rm -f .tmp_quiet_recordmcount
331
332@@ -579,6 +586,72 @@ else
333 KBUILD_CFLAGS += -O2
334 endif
335
336+ifndef DISABLE_PAX_PLUGINS
337+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
338+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
339+else
340+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
341+endif
342+ifneq ($(PLUGINCC),)
343+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
344+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
345+endif
346+ifdef CONFIG_PAX_MEMORY_STACKLEAK
347+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
348+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
349+endif
350+ifdef CONFIG_KALLOCSTAT_PLUGIN
351+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
352+endif
353+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
354+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
355+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
356+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
357+endif
358+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
359+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
360+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
361+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
362+endif
363+endif
364+ifdef CONFIG_CHECKER_PLUGIN
365+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
366+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
367+endif
368+endif
369+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
370+ifdef CONFIG_PAX_SIZE_OVERFLOW
371+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
372+endif
373+ifdef CONFIG_PAX_LATENT_ENTROPY
374+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
375+endif
376+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
377+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
378+endif
379+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
380+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
381+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
382+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
383+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
384+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
385+ifeq ($(KBUILD_EXTMOD),)
386+gcc-plugins:
387+ $(Q)$(MAKE) $(build)=tools/gcc
388+else
389+gcc-plugins: ;
390+endif
391+else
392+gcc-plugins:
393+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
394+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
395+else
396+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
397+endif
398+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
399+endif
400+endif
401+
402 include $(srctree)/arch/$(SRCARCH)/Makefile
403
404 ifdef CONFIG_READABLE_ASM
405@@ -619,7 +692,7 @@ endif
406
407 ifdef CONFIG_DEBUG_INFO
408 KBUILD_CFLAGS += -g
409-KBUILD_AFLAGS += -gdwarf-2
410+KBUILD_AFLAGS += -Wa,--gdwarf-2
411 endif
412
413 ifdef CONFIG_DEBUG_INFO_REDUCED
414@@ -754,7 +827,7 @@ export mod_sign_cmd
415
416
417 ifeq ($(KBUILD_EXTMOD),)
418-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
419+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
420
421 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
422 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
423@@ -803,6 +876,8 @@ endif
424
425 # The actual objects are generated when descending,
426 # make sure no implicit rule kicks in
427+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
428+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
429 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
430
431 # Handle descending into subdirectories listed in $(vmlinux-dirs)
432@@ -812,7 +887,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
433 # Error messages still appears in the original language
434
435 PHONY += $(vmlinux-dirs)
436-$(vmlinux-dirs): prepare scripts
437+$(vmlinux-dirs): gcc-plugins prepare scripts
438 $(Q)$(MAKE) $(build)=$@
439
440 define filechk_kernel.release
441@@ -855,10 +930,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
442
443 archprepare: archheaders archscripts prepare1 scripts_basic
444
445+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
446+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
447 prepare0: archprepare FORCE
448 $(Q)$(MAKE) $(build)=.
449
450 # All the preparing..
451+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
452 prepare: prepare0
453
454 # Generate some files
455@@ -966,6 +1044,8 @@ all: modules
456 # using awk while concatenating to the final file.
457
458 PHONY += modules
459+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
460+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
461 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
462 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
463 @$(kecho) ' Building modules, stage 2.';
464@@ -981,7 +1061,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
465
466 # Target to prepare building external modules
467 PHONY += modules_prepare
468-modules_prepare: prepare scripts
469+modules_prepare: gcc-plugins prepare scripts
470
471 # Target to install modules
472 PHONY += modules_install
473@@ -1047,7 +1127,8 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
474 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
475 signing_key.priv signing_key.x509 x509.genkey \
476 extra_certificates signing_key.x509.keyid \
477- signing_key.x509.signer
478+ signing_key.x509.signer tools/gcc/size_overflow_hash.h \
479+ tools/gcc/randomize_layout_seed.h
480
481 # clean - Delete most, but leave enough to build external modules
482 #
483@@ -1087,6 +1168,7 @@ distclean: mrproper
484 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
485 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
486 -o -name '.*.rej' \
487+ -o -name '.*.rej' -o -name '*.so' \
488 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
489 -type f -print | xargs rm -f
490
491@@ -1248,6 +1330,8 @@ PHONY += $(module-dirs) modules
492 $(module-dirs): crmodverdir $(objtree)/Module.symvers
493 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
494
495+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
496+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
497 modules: $(module-dirs)
498 @$(kecho) ' Building modules, stage 2.';
499 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
500@@ -1387,17 +1471,21 @@ else
501 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
502 endif
503
504-%.s: %.c prepare scripts FORCE
505+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
506+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
507+%.s: %.c gcc-plugins prepare scripts FORCE
508 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
509 %.i: %.c prepare scripts FORCE
510 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
511-%.o: %.c prepare scripts FORCE
512+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
513+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
514+%.o: %.c gcc-plugins prepare scripts FORCE
515 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
516 %.lst: %.c prepare scripts FORCE
517 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
518-%.s: %.S prepare scripts FORCE
519+%.s: %.S gcc-plugins prepare scripts FORCE
520 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
521-%.o: %.S prepare scripts FORCE
522+%.o: %.S gcc-plugins prepare scripts FORCE
523 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
524 %.symtypes: %.c prepare scripts FORCE
525 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
526@@ -1407,11 +1495,15 @@ endif
527 $(cmd_crmodverdir)
528 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
529 $(build)=$(build-dir)
530-%/: prepare scripts FORCE
531+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
532+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
533+%/: gcc-plugins prepare scripts FORCE
534 $(cmd_crmodverdir)
535 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
536 $(build)=$(build-dir)
537-%.ko: prepare scripts FORCE
538+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
539+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
540+%.ko: gcc-plugins prepare scripts FORCE
541 $(cmd_crmodverdir)
542 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
543 $(build)=$(build-dir) $(@:.ko=.o)
544diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
545index 78b03ef..da28a51 100644
546--- a/arch/alpha/include/asm/atomic.h
547+++ b/arch/alpha/include/asm/atomic.h
548@@ -292,6 +292,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
549 #define atomic_dec(v) atomic_sub(1,(v))
550 #define atomic64_dec(v) atomic64_sub(1,(v))
551
552+#define atomic64_read_unchecked(v) atomic64_read(v)
553+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
554+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
555+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
556+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
557+#define atomic64_inc_unchecked(v) atomic64_inc(v)
558+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
559+#define atomic64_dec_unchecked(v) atomic64_dec(v)
560+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
561+
562 #define smp_mb__before_atomic_dec() smp_mb()
563 #define smp_mb__after_atomic_dec() smp_mb()
564 #define smp_mb__before_atomic_inc() smp_mb()
565diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
566index ad368a9..fbe0f25 100644
567--- a/arch/alpha/include/asm/cache.h
568+++ b/arch/alpha/include/asm/cache.h
569@@ -4,19 +4,19 @@
570 #ifndef __ARCH_ALPHA_CACHE_H
571 #define __ARCH_ALPHA_CACHE_H
572
573+#include <linux/const.h>
574
575 /* Bytes per L1 (data) cache line. */
576 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
577-# define L1_CACHE_BYTES 64
578 # define L1_CACHE_SHIFT 6
579 #else
580 /* Both EV4 and EV5 are write-through, read-allocate,
581 direct-mapped, physical.
582 */
583-# define L1_CACHE_BYTES 32
584 # define L1_CACHE_SHIFT 5
585 #endif
586
587+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
588 #define SMP_CACHE_BYTES L1_CACHE_BYTES
589
590 #endif
591diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
592index 968d999..d36b2df 100644
593--- a/arch/alpha/include/asm/elf.h
594+++ b/arch/alpha/include/asm/elf.h
595@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
596
597 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
598
599+#ifdef CONFIG_PAX_ASLR
600+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
601+
602+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
603+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
604+#endif
605+
606 /* $0 is set by ld.so to a pointer to a function which might be
607 registered using atexit. This provides a mean for the dynamic
608 linker to call DT_FINI functions for shared libraries that have
609diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
610index aab14a0..b4fa3e7 100644
611--- a/arch/alpha/include/asm/pgalloc.h
612+++ b/arch/alpha/include/asm/pgalloc.h
613@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
614 pgd_set(pgd, pmd);
615 }
616
617+static inline void
618+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
619+{
620+ pgd_populate(mm, pgd, pmd);
621+}
622+
623 extern pgd_t *pgd_alloc(struct mm_struct *mm);
624
625 static inline void
626diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
627index d8f9b7e..f6222fa 100644
628--- a/arch/alpha/include/asm/pgtable.h
629+++ b/arch/alpha/include/asm/pgtable.h
630@@ -102,6 +102,17 @@ struct vm_area_struct;
631 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
632 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
633 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
634+
635+#ifdef CONFIG_PAX_PAGEEXEC
636+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
637+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
638+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
639+#else
640+# define PAGE_SHARED_NOEXEC PAGE_SHARED
641+# define PAGE_COPY_NOEXEC PAGE_COPY
642+# define PAGE_READONLY_NOEXEC PAGE_READONLY
643+#endif
644+
645 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
646
647 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
648diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
649index 2fd00b7..cfd5069 100644
650--- a/arch/alpha/kernel/module.c
651+++ b/arch/alpha/kernel/module.c
652@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
653
654 /* The small sections were sorted to the end of the segment.
655 The following should definitely cover them. */
656- gp = (u64)me->module_core + me->core_size - 0x8000;
657+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
658 got = sechdrs[me->arch.gotsecindex].sh_addr;
659
660 for (i = 0; i < n; i++) {
661diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
662index 1402fcc..0b1abd2 100644
663--- a/arch/alpha/kernel/osf_sys.c
664+++ b/arch/alpha/kernel/osf_sys.c
665@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
666 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
667
668 static unsigned long
669-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
670- unsigned long limit)
671+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
672+ unsigned long limit, unsigned long flags)
673 {
674 struct vm_unmapped_area_info info;
675+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
676
677 info.flags = 0;
678 info.length = len;
679@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
680 info.high_limit = limit;
681 info.align_mask = 0;
682 info.align_offset = 0;
683+ info.threadstack_offset = offset;
684 return vm_unmapped_area(&info);
685 }
686
687@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
688 merely specific addresses, but regions of memory -- perhaps
689 this feature should be incorporated into all ports? */
690
691+#ifdef CONFIG_PAX_RANDMMAP
692+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
693+#endif
694+
695 if (addr) {
696- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
697+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
698 if (addr != (unsigned long) -ENOMEM)
699 return addr;
700 }
701
702 /* Next, try allocating at TASK_UNMAPPED_BASE. */
703- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
704- len, limit);
705+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
706+
707 if (addr != (unsigned long) -ENOMEM)
708 return addr;
709
710 /* Finally, try allocating in low memory. */
711- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
712+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
713
714 return addr;
715 }
716diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
717index 98838a0..b304fb4 100644
718--- a/arch/alpha/mm/fault.c
719+++ b/arch/alpha/mm/fault.c
720@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
721 __reload_thread(pcb);
722 }
723
724+#ifdef CONFIG_PAX_PAGEEXEC
725+/*
726+ * PaX: decide what to do with offenders (regs->pc = fault address)
727+ *
728+ * returns 1 when task should be killed
729+ * 2 when patched PLT trampoline was detected
730+ * 3 when unpatched PLT trampoline was detected
731+ */
732+static int pax_handle_fetch_fault(struct pt_regs *regs)
733+{
734+
735+#ifdef CONFIG_PAX_EMUPLT
736+ int err;
737+
738+ do { /* PaX: patched PLT emulation #1 */
739+ unsigned int ldah, ldq, jmp;
740+
741+ err = get_user(ldah, (unsigned int *)regs->pc);
742+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
743+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
744+
745+ if (err)
746+ break;
747+
748+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
749+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
750+ jmp == 0x6BFB0000U)
751+ {
752+ unsigned long r27, addr;
753+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
754+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
755+
756+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
757+ err = get_user(r27, (unsigned long *)addr);
758+ if (err)
759+ break;
760+
761+ regs->r27 = r27;
762+ regs->pc = r27;
763+ return 2;
764+ }
765+ } while (0);
766+
767+ do { /* PaX: patched PLT emulation #2 */
768+ unsigned int ldah, lda, br;
769+
770+ err = get_user(ldah, (unsigned int *)regs->pc);
771+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
772+ err |= get_user(br, (unsigned int *)(regs->pc+8));
773+
774+ if (err)
775+ break;
776+
777+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
778+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
779+ (br & 0xFFE00000U) == 0xC3E00000U)
780+ {
781+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
782+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
783+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
784+
785+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
786+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
787+ return 2;
788+ }
789+ } while (0);
790+
791+ do { /* PaX: unpatched PLT emulation */
792+ unsigned int br;
793+
794+ err = get_user(br, (unsigned int *)regs->pc);
795+
796+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
797+ unsigned int br2, ldq, nop, jmp;
798+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
799+
800+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
801+ err = get_user(br2, (unsigned int *)addr);
802+ err |= get_user(ldq, (unsigned int *)(addr+4));
803+ err |= get_user(nop, (unsigned int *)(addr+8));
804+ err |= get_user(jmp, (unsigned int *)(addr+12));
805+ err |= get_user(resolver, (unsigned long *)(addr+16));
806+
807+ if (err)
808+ break;
809+
810+ if (br2 == 0xC3600000U &&
811+ ldq == 0xA77B000CU &&
812+ nop == 0x47FF041FU &&
813+ jmp == 0x6B7B0000U)
814+ {
815+ regs->r28 = regs->pc+4;
816+ regs->r27 = addr+16;
817+ regs->pc = resolver;
818+ return 3;
819+ }
820+ }
821+ } while (0);
822+#endif
823+
824+ return 1;
825+}
826+
827+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
828+{
829+ unsigned long i;
830+
831+ printk(KERN_ERR "PAX: bytes at PC: ");
832+ for (i = 0; i < 5; i++) {
833+ unsigned int c;
834+ if (get_user(c, (unsigned int *)pc+i))
835+ printk(KERN_CONT "???????? ");
836+ else
837+ printk(KERN_CONT "%08x ", c);
838+ }
839+ printk("\n");
840+}
841+#endif
842
843 /*
844 * This routine handles page faults. It determines the address,
845@@ -133,8 +251,29 @@ retry:
846 good_area:
847 si_code = SEGV_ACCERR;
848 if (cause < 0) {
849- if (!(vma->vm_flags & VM_EXEC))
850+ if (!(vma->vm_flags & VM_EXEC)) {
851+
852+#ifdef CONFIG_PAX_PAGEEXEC
853+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
854+ goto bad_area;
855+
856+ up_read(&mm->mmap_sem);
857+ switch (pax_handle_fetch_fault(regs)) {
858+
859+#ifdef CONFIG_PAX_EMUPLT
860+ case 2:
861+ case 3:
862+ return;
863+#endif
864+
865+ }
866+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
867+ do_group_exit(SIGKILL);
868+#else
869 goto bad_area;
870+#endif
871+
872+ }
873 } else if (!cause) {
874 /* Allow reads even for write-only mappings */
875 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
876diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
877index 47085a0..f975a53 100644
878--- a/arch/arm/Kconfig
879+++ b/arch/arm/Kconfig
880@@ -1830,7 +1830,7 @@ config ALIGNMENT_TRAP
881
882 config UACCESS_WITH_MEMCPY
883 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
884- depends on MMU
885+ depends on MMU && !PAX_MEMORY_UDEREF
886 default y if CPU_FEROCEON
887 help
888 Implement faster copy_to_user and clear_user methods for CPU
889@@ -2102,6 +2102,7 @@ config XIP_PHYS_ADDR
890 config KEXEC
891 bool "Kexec system call (EXPERIMENTAL)"
892 depends on (!SMP || PM_SLEEP_SMP)
893+ depends on !GRKERNSEC_KMEM
894 help
895 kexec is a system call that implements the ability to shutdown your
896 current kernel, and to start another kernel. It is like a reboot
897diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
898index 62d2cb5..09d45e3 100644
899--- a/arch/arm/include/asm/atomic.h
900+++ b/arch/arm/include/asm/atomic.h
901@@ -18,17 +18,35 @@
902 #include <asm/barrier.h>
903 #include <asm/cmpxchg.h>
904
905+#ifdef CONFIG_GENERIC_ATOMIC64
906+#include <asm-generic/atomic64.h>
907+#endif
908+
909 #define ATOMIC_INIT(i) { (i) }
910
911 #ifdef __KERNEL__
912
913+#define _ASM_EXTABLE(from, to) \
914+" .pushsection __ex_table,\"a\"\n"\
915+" .align 3\n" \
916+" .long " #from ", " #to"\n" \
917+" .popsection"
918+
919 /*
920 * On ARM, ordinary assignment (str instruction) doesn't clear the local
921 * strex/ldrex monitor on some implementations. The reason we can use it for
922 * atomic_set() is the clrex or dummy strex done on every exception return.
923 */
924 #define atomic_read(v) (*(volatile int *)&(v)->counter)
925+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
926+{
927+ return v->counter;
928+}
929 #define atomic_set(v,i) (((v)->counter) = (i))
930+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
931+{
932+ v->counter = i;
933+}
934
935 #if __LINUX_ARM_ARCH__ >= 6
936
937@@ -44,6 +62,36 @@ static inline void atomic_add(int i, atomic_t *v)
938
939 prefetchw(&v->counter);
940 __asm__ __volatile__("@ atomic_add\n"
941+"1: ldrex %1, [%3]\n"
942+" adds %0, %1, %4\n"
943+
944+#ifdef CONFIG_PAX_REFCOUNT
945+" bvc 3f\n"
946+"2: bkpt 0xf103\n"
947+"3:\n"
948+#endif
949+
950+" strex %1, %0, [%3]\n"
951+" teq %1, #0\n"
952+" bne 1b"
953+
954+#ifdef CONFIG_PAX_REFCOUNT
955+"\n4:\n"
956+ _ASM_EXTABLE(2b, 4b)
957+#endif
958+
959+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
960+ : "r" (&v->counter), "Ir" (i)
961+ : "cc");
962+}
963+
964+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
965+{
966+ unsigned long tmp;
967+ int result;
968+
969+ prefetchw(&v->counter);
970+ __asm__ __volatile__("@ atomic_add_unchecked\n"
971 "1: ldrex %0, [%3]\n"
972 " add %0, %0, %4\n"
973 " strex %1, %0, [%3]\n"
974@@ -62,6 +110,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
975 smp_mb();
976
977 __asm__ __volatile__("@ atomic_add_return\n"
978+"1: ldrex %1, [%3]\n"
979+" adds %0, %1, %4\n"
980+
981+#ifdef CONFIG_PAX_REFCOUNT
982+" bvc 3f\n"
983+" mov %0, %1\n"
984+"2: bkpt 0xf103\n"
985+"3:\n"
986+#endif
987+
988+" strex %1, %0, [%3]\n"
989+" teq %1, #0\n"
990+" bne 1b"
991+
992+#ifdef CONFIG_PAX_REFCOUNT
993+"\n4:\n"
994+ _ASM_EXTABLE(2b, 4b)
995+#endif
996+
997+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
998+ : "r" (&v->counter), "Ir" (i)
999+ : "cc");
1000+
1001+ smp_mb();
1002+
1003+ return result;
1004+}
1005+
1006+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1007+{
1008+ unsigned long tmp;
1009+ int result;
1010+
1011+ smp_mb();
1012+
1013+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
1014 "1: ldrex %0, [%3]\n"
1015 " add %0, %0, %4\n"
1016 " strex %1, %0, [%3]\n"
1017@@ -83,6 +167,36 @@ static inline void atomic_sub(int i, atomic_t *v)
1018
1019 prefetchw(&v->counter);
1020 __asm__ __volatile__("@ atomic_sub\n"
1021+"1: ldrex %1, [%3]\n"
1022+" subs %0, %1, %4\n"
1023+
1024+#ifdef CONFIG_PAX_REFCOUNT
1025+" bvc 3f\n"
1026+"2: bkpt 0xf103\n"
1027+"3:\n"
1028+#endif
1029+
1030+" strex %1, %0, [%3]\n"
1031+" teq %1, #0\n"
1032+" bne 1b"
1033+
1034+#ifdef CONFIG_PAX_REFCOUNT
1035+"\n4:\n"
1036+ _ASM_EXTABLE(2b, 4b)
1037+#endif
1038+
1039+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1040+ : "r" (&v->counter), "Ir" (i)
1041+ : "cc");
1042+}
1043+
1044+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1045+{
1046+ unsigned long tmp;
1047+ int result;
1048+
1049+ prefetchw(&v->counter);
1050+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1051 "1: ldrex %0, [%3]\n"
1052 " sub %0, %0, %4\n"
1053 " strex %1, %0, [%3]\n"
1054@@ -101,11 +215,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1055 smp_mb();
1056
1057 __asm__ __volatile__("@ atomic_sub_return\n"
1058-"1: ldrex %0, [%3]\n"
1059-" sub %0, %0, %4\n"
1060+"1: ldrex %1, [%3]\n"
1061+" subs %0, %1, %4\n"
1062+
1063+#ifdef CONFIG_PAX_REFCOUNT
1064+" bvc 3f\n"
1065+" mov %0, %1\n"
1066+"2: bkpt 0xf103\n"
1067+"3:\n"
1068+#endif
1069+
1070 " strex %1, %0, [%3]\n"
1071 " teq %1, #0\n"
1072 " bne 1b"
1073+
1074+#ifdef CONFIG_PAX_REFCOUNT
1075+"\n4:\n"
1076+ _ASM_EXTABLE(2b, 4b)
1077+#endif
1078+
1079 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1080 : "r" (&v->counter), "Ir" (i)
1081 : "cc");
1082@@ -138,6 +266,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1083 return oldval;
1084 }
1085
1086+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1087+{
1088+ unsigned long oldval, res;
1089+
1090+ smp_mb();
1091+
1092+ do {
1093+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1094+ "ldrex %1, [%3]\n"
1095+ "mov %0, #0\n"
1096+ "teq %1, %4\n"
1097+ "strexeq %0, %5, [%3]\n"
1098+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1099+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1100+ : "cc");
1101+ } while (res);
1102+
1103+ smp_mb();
1104+
1105+ return oldval;
1106+}
1107+
1108 #else /* ARM_ARCH_6 */
1109
1110 #ifdef CONFIG_SMP
1111@@ -156,7 +306,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1112
1113 return val;
1114 }
1115+
1116+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1117+{
1118+ return atomic_add_return(i, v);
1119+}
1120+
1121 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1122+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1123+{
1124+ (void) atomic_add_return(i, v);
1125+}
1126
1127 static inline int atomic_sub_return(int i, atomic_t *v)
1128 {
1129@@ -171,6 +331,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1130 return val;
1131 }
1132 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1133+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1134+{
1135+ (void) atomic_sub_return(i, v);
1136+}
1137
1138 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1139 {
1140@@ -186,9 +350,18 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1141 return ret;
1142 }
1143
1144+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1145+{
1146+ return atomic_cmpxchg(v, old, new);
1147+}
1148+
1149 #endif /* __LINUX_ARM_ARCH__ */
1150
1151 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1152+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1153+{
1154+ return xchg(&v->counter, new);
1155+}
1156
1157 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1158 {
1159@@ -201,11 +374,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1160 }
1161
1162 #define atomic_inc(v) atomic_add(1, v)
1163+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1164+{
1165+ atomic_add_unchecked(1, v);
1166+}
1167 #define atomic_dec(v) atomic_sub(1, v)
1168+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1169+{
1170+ atomic_sub_unchecked(1, v);
1171+}
1172
1173 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1174+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1175+{
1176+ return atomic_add_return_unchecked(1, v) == 0;
1177+}
1178 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1179 #define atomic_inc_return(v) (atomic_add_return(1, v))
1180+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1181+{
1182+ return atomic_add_return_unchecked(1, v);
1183+}
1184 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1185 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1186
1187@@ -221,6 +410,14 @@ typedef struct {
1188 long long counter;
1189 } atomic64_t;
1190
1191+#ifdef CONFIG_PAX_REFCOUNT
1192+typedef struct {
1193+ long long counter;
1194+} atomic64_unchecked_t;
1195+#else
1196+typedef atomic64_t atomic64_unchecked_t;
1197+#endif
1198+
1199 #define ATOMIC64_INIT(i) { (i) }
1200
1201 #ifdef CONFIG_ARM_LPAE
1202@@ -237,6 +434,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1203 return result;
1204 }
1205
1206+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1207+{
1208+ long long result;
1209+
1210+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1211+" ldrd %0, %H0, [%1]"
1212+ : "=&r" (result)
1213+ : "r" (&v->counter), "Qo" (v->counter)
1214+ );
1215+
1216+ return result;
1217+}
1218+
1219 static inline void atomic64_set(atomic64_t *v, long long i)
1220 {
1221 __asm__ __volatile__("@ atomic64_set\n"
1222@@ -245,6 +455,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1223 : "r" (&v->counter), "r" (i)
1224 );
1225 }
1226+
1227+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1228+{
1229+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1230+" strd %2, %H2, [%1]"
1231+ : "=Qo" (v->counter)
1232+ : "r" (&v->counter), "r" (i)
1233+ );
1234+}
1235 #else
1236 static inline long long atomic64_read(const atomic64_t *v)
1237 {
1238@@ -259,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1239 return result;
1240 }
1241
1242+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1243+{
1244+ long long result;
1245+
1246+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1247+" ldrexd %0, %H0, [%1]"
1248+ : "=&r" (result)
1249+ : "r" (&v->counter), "Qo" (v->counter)
1250+ );
1251+
1252+ return result;
1253+}
1254+
1255 static inline void atomic64_set(atomic64_t *v, long long i)
1256 {
1257 long long tmp;
1258@@ -273,6 +505,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1259 : "r" (&v->counter), "r" (i)
1260 : "cc");
1261 }
1262+
1263+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1264+{
1265+ long long tmp;
1266+
1267+ prefetchw(&v->counter);
1268+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1269+"1: ldrexd %0, %H0, [%2]\n"
1270+" strexd %0, %3, %H3, [%2]\n"
1271+" teq %0, #0\n"
1272+" bne 1b"
1273+ : "=&r" (tmp), "=Qo" (v->counter)
1274+ : "r" (&v->counter), "r" (i)
1275+ : "cc");
1276+}
1277 #endif
1278
1279 static inline void atomic64_add(long long i, atomic64_t *v)
1280@@ -284,6 +531,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1281 __asm__ __volatile__("@ atomic64_add\n"
1282 "1: ldrexd %0, %H0, [%3]\n"
1283 " adds %Q0, %Q0, %Q4\n"
1284+" adcs %R0, %R0, %R4\n"
1285+
1286+#ifdef CONFIG_PAX_REFCOUNT
1287+" bvc 3f\n"
1288+"2: bkpt 0xf103\n"
1289+"3:\n"
1290+#endif
1291+
1292+" strexd %1, %0, %H0, [%3]\n"
1293+" teq %1, #0\n"
1294+" bne 1b"
1295+
1296+#ifdef CONFIG_PAX_REFCOUNT
1297+"\n4:\n"
1298+ _ASM_EXTABLE(2b, 4b)
1299+#endif
1300+
1301+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1302+ : "r" (&v->counter), "r" (i)
1303+ : "cc");
1304+}
1305+
1306+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1307+{
1308+ long long result;
1309+ unsigned long tmp;
1310+
1311+ prefetchw(&v->counter);
1312+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1313+"1: ldrexd %0, %H0, [%3]\n"
1314+" adds %Q0, %Q0, %Q4\n"
1315 " adc %R0, %R0, %R4\n"
1316 " strexd %1, %0, %H0, [%3]\n"
1317 " teq %1, #0\n"
1318@@ -303,6 +581,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1319 __asm__ __volatile__("@ atomic64_add_return\n"
1320 "1: ldrexd %0, %H0, [%3]\n"
1321 " adds %Q0, %Q0, %Q4\n"
1322+" adcs %R0, %R0, %R4\n"
1323+
1324+#ifdef CONFIG_PAX_REFCOUNT
1325+" bvc 3f\n"
1326+" mov %0, %1\n"
1327+" mov %H0, %H1\n"
1328+"2: bkpt 0xf103\n"
1329+"3:\n"
1330+#endif
1331+
1332+" strexd %1, %0, %H0, [%3]\n"
1333+" teq %1, #0\n"
1334+" bne 1b"
1335+
1336+#ifdef CONFIG_PAX_REFCOUNT
1337+"\n4:\n"
1338+ _ASM_EXTABLE(2b, 4b)
1339+#endif
1340+
1341+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1342+ : "r" (&v->counter), "r" (i)
1343+ : "cc");
1344+
1345+ smp_mb();
1346+
1347+ return result;
1348+}
1349+
1350+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1351+{
1352+ long long result;
1353+ unsigned long tmp;
1354+
1355+ smp_mb();
1356+
1357+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1358+"1: ldrexd %0, %H0, [%3]\n"
1359+" adds %Q0, %Q0, %Q4\n"
1360 " adc %R0, %R0, %R4\n"
1361 " strexd %1, %0, %H0, [%3]\n"
1362 " teq %1, #0\n"
1363@@ -325,6 +641,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1364 __asm__ __volatile__("@ atomic64_sub\n"
1365 "1: ldrexd %0, %H0, [%3]\n"
1366 " subs %Q0, %Q0, %Q4\n"
1367+" sbcs %R0, %R0, %R4\n"
1368+
1369+#ifdef CONFIG_PAX_REFCOUNT
1370+" bvc 3f\n"
1371+"2: bkpt 0xf103\n"
1372+"3:\n"
1373+#endif
1374+
1375+" strexd %1, %0, %H0, [%3]\n"
1376+" teq %1, #0\n"
1377+" bne 1b"
1378+
1379+#ifdef CONFIG_PAX_REFCOUNT
1380+"\n4:\n"
1381+ _ASM_EXTABLE(2b, 4b)
1382+#endif
1383+
1384+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1385+ : "r" (&v->counter), "r" (i)
1386+ : "cc");
1387+}
1388+
1389+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1390+{
1391+ long long result;
1392+ unsigned long tmp;
1393+
1394+ prefetchw(&v->counter);
1395+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1396+"1: ldrexd %0, %H0, [%3]\n"
1397+" subs %Q0, %Q0, %Q4\n"
1398 " sbc %R0, %R0, %R4\n"
1399 " strexd %1, %0, %H0, [%3]\n"
1400 " teq %1, #0\n"
1401@@ -344,16 +691,29 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1402 __asm__ __volatile__("@ atomic64_sub_return\n"
1403 "1: ldrexd %0, %H0, [%3]\n"
1404 " subs %Q0, %Q0, %Q4\n"
1405-" sbc %R0, %R0, %R4\n"
1406+" sbcs %R0, %R0, %R4\n"
1407+
1408+#ifdef CONFIG_PAX_REFCOUNT
1409+" bvc 3f\n"
1410+" mov %0, %1\n"
1411+" mov %H0, %H1\n"
1412+"2: bkpt 0xf103\n"
1413+"3:\n"
1414+#endif
1415+
1416 " strexd %1, %0, %H0, [%3]\n"
1417 " teq %1, #0\n"
1418 " bne 1b"
1419+
1420+#ifdef CONFIG_PAX_REFCOUNT
1421+"\n4:\n"
1422+ _ASM_EXTABLE(2b, 4b)
1423+#endif
1424+
1425 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1426 : "r" (&v->counter), "r" (i)
1427 : "cc");
1428
1429- smp_mb();
1430-
1431 return result;
1432 }
1433
1434@@ -382,6 +742,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1435 return oldval;
1436 }
1437
1438+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1439+ long long new)
1440+{
1441+ long long oldval;
1442+ unsigned long res;
1443+
1444+ smp_mb();
1445+
1446+ do {
1447+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1448+ "ldrexd %1, %H1, [%3]\n"
1449+ "mov %0, #0\n"
1450+ "teq %1, %4\n"
1451+ "teqeq %H1, %H4\n"
1452+ "strexdeq %0, %5, %H5, [%3]"
1453+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1454+ : "r" (&ptr->counter), "r" (old), "r" (new)
1455+ : "cc");
1456+ } while (res);
1457+
1458+ smp_mb();
1459+
1460+ return oldval;
1461+}
1462+
1463 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1464 {
1465 long long result;
1466@@ -406,20 +791,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1467 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1468 {
1469 long long result;
1470- unsigned long tmp;
1471+ u64 tmp;
1472
1473 smp_mb();
1474
1475 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1476-"1: ldrexd %0, %H0, [%3]\n"
1477-" subs %Q0, %Q0, #1\n"
1478-" sbc %R0, %R0, #0\n"
1479+"1: ldrexd %1, %H1, [%3]\n"
1480+" subs %Q0, %Q1, #1\n"
1481+" sbcs %R0, %R1, #0\n"
1482+
1483+#ifdef CONFIG_PAX_REFCOUNT
1484+" bvc 3f\n"
1485+" mov %Q0, %Q1\n"
1486+" mov %R0, %R1\n"
1487+"2: bkpt 0xf103\n"
1488+"3:\n"
1489+#endif
1490+
1491 " teq %R0, #0\n"
1492-" bmi 2f\n"
1493+" bmi 4f\n"
1494 " strexd %1, %0, %H0, [%3]\n"
1495 " teq %1, #0\n"
1496 " bne 1b\n"
1497-"2:"
1498+"4:\n"
1499+
1500+#ifdef CONFIG_PAX_REFCOUNT
1501+ _ASM_EXTABLE(2b, 4b)
1502+#endif
1503+
1504 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1505 : "r" (&v->counter)
1506 : "cc");
1507@@ -442,13 +841,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1508 " teq %0, %5\n"
1509 " teqeq %H0, %H5\n"
1510 " moveq %1, #0\n"
1511-" beq 2f\n"
1512+" beq 4f\n"
1513 " adds %Q0, %Q0, %Q6\n"
1514-" adc %R0, %R0, %R6\n"
1515+" adcs %R0, %R0, %R6\n"
1516+
1517+#ifdef CONFIG_PAX_REFCOUNT
1518+" bvc 3f\n"
1519+"2: bkpt 0xf103\n"
1520+"3:\n"
1521+#endif
1522+
1523 " strexd %2, %0, %H0, [%4]\n"
1524 " teq %2, #0\n"
1525 " bne 1b\n"
1526-"2:"
1527+"4:\n"
1528+
1529+#ifdef CONFIG_PAX_REFCOUNT
1530+ _ASM_EXTABLE(2b, 4b)
1531+#endif
1532+
1533 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1534 : "r" (&v->counter), "r" (u), "r" (a)
1535 : "cc");
1536@@ -461,10 +872,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1537
1538 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1539 #define atomic64_inc(v) atomic64_add(1LL, (v))
1540+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1541 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1542+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1543 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1544 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1545 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1546+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1547 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1548 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1549 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1550diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1551index 75fe66b..ba3dee4 100644
1552--- a/arch/arm/include/asm/cache.h
1553+++ b/arch/arm/include/asm/cache.h
1554@@ -4,8 +4,10 @@
1555 #ifndef __ASMARM_CACHE_H
1556 #define __ASMARM_CACHE_H
1557
1558+#include <linux/const.h>
1559+
1560 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1561-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1562+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1563
1564 /*
1565 * Memory returned by kmalloc() may be used for DMA, so we must make
1566@@ -24,5 +26,6 @@
1567 #endif
1568
1569 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1570+#define __read_only __attribute__ ((__section__(".data..read_only")))
1571
1572 #endif
1573diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1574index ab91ebb..2c2afeb 100644
1575--- a/arch/arm/include/asm/cacheflush.h
1576+++ b/arch/arm/include/asm/cacheflush.h
1577@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1578 void (*dma_unmap_area)(const void *, size_t, int);
1579
1580 void (*dma_flush_range)(const void *, const void *);
1581-};
1582+} __no_const;
1583
1584 /*
1585 * Select the calling method
1586diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1587index 6dcc164..b14d917 100644
1588--- a/arch/arm/include/asm/checksum.h
1589+++ b/arch/arm/include/asm/checksum.h
1590@@ -37,7 +37,19 @@ __wsum
1591 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1592
1593 __wsum
1594-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1595+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1596+
1597+static inline __wsum
1598+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1599+{
1600+ __wsum ret;
1601+ pax_open_userland();
1602+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1603+ pax_close_userland();
1604+ return ret;
1605+}
1606+
1607+
1608
1609 /*
1610 * Fold a partial checksum without adding pseudo headers
1611diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1612index df2fbba..63fe3e1 100644
1613--- a/arch/arm/include/asm/cmpxchg.h
1614+++ b/arch/arm/include/asm/cmpxchg.h
1615@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1616
1617 #define xchg(ptr,x) \
1618 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1619+#define xchg_unchecked(ptr,x) \
1620+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1621
1622 #include <asm-generic/cmpxchg-local.h>
1623
1624diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1625index 6ddbe44..b5e38b1 100644
1626--- a/arch/arm/include/asm/domain.h
1627+++ b/arch/arm/include/asm/domain.h
1628@@ -48,18 +48,37 @@
1629 * Domain types
1630 */
1631 #define DOMAIN_NOACCESS 0
1632-#define DOMAIN_CLIENT 1
1633 #ifdef CONFIG_CPU_USE_DOMAINS
1634+#define DOMAIN_USERCLIENT 1
1635+#define DOMAIN_KERNELCLIENT 1
1636 #define DOMAIN_MANAGER 3
1637+#define DOMAIN_VECTORS DOMAIN_USER
1638 #else
1639+
1640+#ifdef CONFIG_PAX_KERNEXEC
1641 #define DOMAIN_MANAGER 1
1642+#define DOMAIN_KERNEXEC 3
1643+#else
1644+#define DOMAIN_MANAGER 1
1645+#endif
1646+
1647+#ifdef CONFIG_PAX_MEMORY_UDEREF
1648+#define DOMAIN_USERCLIENT 0
1649+#define DOMAIN_UDEREF 1
1650+#define DOMAIN_VECTORS DOMAIN_KERNEL
1651+#else
1652+#define DOMAIN_USERCLIENT 1
1653+#define DOMAIN_VECTORS DOMAIN_USER
1654+#endif
1655+#define DOMAIN_KERNELCLIENT 1
1656+
1657 #endif
1658
1659 #define domain_val(dom,type) ((type) << (2*(dom)))
1660
1661 #ifndef __ASSEMBLY__
1662
1663-#ifdef CONFIG_CPU_USE_DOMAINS
1664+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1665 static inline void set_domain(unsigned val)
1666 {
1667 asm volatile(
1668@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1669 isb();
1670 }
1671
1672-#define modify_domain(dom,type) \
1673- do { \
1674- struct thread_info *thread = current_thread_info(); \
1675- unsigned int domain = thread->cpu_domain; \
1676- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1677- thread->cpu_domain = domain | domain_val(dom, type); \
1678- set_domain(thread->cpu_domain); \
1679- } while (0)
1680-
1681+extern void modify_domain(unsigned int dom, unsigned int type);
1682 #else
1683 static inline void set_domain(unsigned val) { }
1684 static inline void modify_domain(unsigned dom, unsigned type) { }
1685diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1686index f4b46d3..abc9b2b 100644
1687--- a/arch/arm/include/asm/elf.h
1688+++ b/arch/arm/include/asm/elf.h
1689@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1690 the loader. We need to make sure that it is out of the way of the program
1691 that it will "exec", and that there is sufficient room for the brk. */
1692
1693-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1694+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1695+
1696+#ifdef CONFIG_PAX_ASLR
1697+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1698+
1699+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1700+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1701+#endif
1702
1703 /* When the program starts, a1 contains a pointer to a function to be
1704 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1705@@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1706 extern void elf_set_personality(const struct elf32_hdr *);
1707 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1708
1709-struct mm_struct;
1710-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1711-#define arch_randomize_brk arch_randomize_brk
1712-
1713 #ifdef CONFIG_MMU
1714 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1715 struct linux_binprm;
1716diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1717index de53547..52b9a28 100644
1718--- a/arch/arm/include/asm/fncpy.h
1719+++ b/arch/arm/include/asm/fncpy.h
1720@@ -81,7 +81,9 @@
1721 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1722 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1723 \
1724+ pax_open_kernel(); \
1725 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1726+ pax_close_kernel(); \
1727 flush_icache_range((unsigned long)(dest_buf), \
1728 (unsigned long)(dest_buf) + (size)); \
1729 \
1730diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1731index e42cf59..7b94b8f 100644
1732--- a/arch/arm/include/asm/futex.h
1733+++ b/arch/arm/include/asm/futex.h
1734@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1735 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1736 return -EFAULT;
1737
1738+ pax_open_userland();
1739+
1740 smp_mb();
1741 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1742 "1: ldrex %1, [%4]\n"
1743@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1744 : "cc", "memory");
1745 smp_mb();
1746
1747+ pax_close_userland();
1748+
1749 *uval = val;
1750 return ret;
1751 }
1752@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1753 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1754 return -EFAULT;
1755
1756+ pax_open_userland();
1757+
1758 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1759 "1: " TUSER(ldr) " %1, [%4]\n"
1760 " teq %1, %2\n"
1761@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1762 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1763 : "cc", "memory");
1764
1765+ pax_close_userland();
1766+
1767 *uval = val;
1768 return ret;
1769 }
1770@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1771 return -EFAULT;
1772
1773 pagefault_disable(); /* implies preempt_disable() */
1774+ pax_open_userland();
1775
1776 switch (op) {
1777 case FUTEX_OP_SET:
1778@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1779 ret = -ENOSYS;
1780 }
1781
1782+ pax_close_userland();
1783 pagefault_enable(); /* subsumes preempt_enable() */
1784
1785 if (!ret) {
1786diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1787index 83eb2f7..ed77159 100644
1788--- a/arch/arm/include/asm/kmap_types.h
1789+++ b/arch/arm/include/asm/kmap_types.h
1790@@ -4,6 +4,6 @@
1791 /*
1792 * This is the "bare minimum". AIO seems to require this.
1793 */
1794-#define KM_TYPE_NR 16
1795+#define KM_TYPE_NR 17
1796
1797 #endif
1798diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1799index 9e614a1..3302cca 100644
1800--- a/arch/arm/include/asm/mach/dma.h
1801+++ b/arch/arm/include/asm/mach/dma.h
1802@@ -22,7 +22,7 @@ struct dma_ops {
1803 int (*residue)(unsigned int, dma_t *); /* optional */
1804 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1805 const char *type;
1806-};
1807+} __do_const;
1808
1809 struct dma_struct {
1810 void *addr; /* single DMA address */
1811diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1812index 2fe141f..192dc01 100644
1813--- a/arch/arm/include/asm/mach/map.h
1814+++ b/arch/arm/include/asm/mach/map.h
1815@@ -27,13 +27,16 @@ struct map_desc {
1816 #define MT_MINICLEAN 6
1817 #define MT_LOW_VECTORS 7
1818 #define MT_HIGH_VECTORS 8
1819-#define MT_MEMORY 9
1820+#define MT_MEMORY_RWX 9
1821 #define MT_ROM 10
1822-#define MT_MEMORY_NONCACHED 11
1823+#define MT_MEMORY_NONCACHED_RX 11
1824 #define MT_MEMORY_DTCM 12
1825 #define MT_MEMORY_ITCM 13
1826 #define MT_MEMORY_SO 14
1827 #define MT_MEMORY_DMA_READY 15
1828+#define MT_MEMORY_RW 16
1829+#define MT_MEMORY_RX 17
1830+#define MT_MEMORY_NONCACHED_RW 18
1831
1832 #ifdef CONFIG_MMU
1833 extern void iotable_init(struct map_desc *, int);
1834diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1835index f94784f..9a09a4a 100644
1836--- a/arch/arm/include/asm/outercache.h
1837+++ b/arch/arm/include/asm/outercache.h
1838@@ -35,7 +35,7 @@ struct outer_cache_fns {
1839 #endif
1840 void (*set_debug)(unsigned long);
1841 void (*resume)(void);
1842-};
1843+} __no_const;
1844
1845 extern struct outer_cache_fns outer_cache;
1846
1847diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1848index 4355f0e..cd9168e 100644
1849--- a/arch/arm/include/asm/page.h
1850+++ b/arch/arm/include/asm/page.h
1851@@ -23,6 +23,7 @@
1852
1853 #else
1854
1855+#include <linux/compiler.h>
1856 #include <asm/glue.h>
1857
1858 /*
1859@@ -114,7 +115,7 @@ struct cpu_user_fns {
1860 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1861 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1862 unsigned long vaddr, struct vm_area_struct *vma);
1863-};
1864+} __no_const;
1865
1866 #ifdef MULTI_USER
1867 extern struct cpu_user_fns cpu_user;
1868diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1869index 78a7793..e3dc06c 100644
1870--- a/arch/arm/include/asm/pgalloc.h
1871+++ b/arch/arm/include/asm/pgalloc.h
1872@@ -17,6 +17,7 @@
1873 #include <asm/processor.h>
1874 #include <asm/cacheflush.h>
1875 #include <asm/tlbflush.h>
1876+#include <asm/system_info.h>
1877
1878 #define check_pgt_cache() do { } while (0)
1879
1880@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1881 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1882 }
1883
1884+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1885+{
1886+ pud_populate(mm, pud, pmd);
1887+}
1888+
1889 #else /* !CONFIG_ARM_LPAE */
1890
1891 /*
1892@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1893 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1894 #define pmd_free(mm, pmd) do { } while (0)
1895 #define pud_populate(mm,pmd,pte) BUG()
1896+#define pud_populate_kernel(mm,pmd,pte) BUG()
1897
1898 #endif /* CONFIG_ARM_LPAE */
1899
1900@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1901 __free_page(pte);
1902 }
1903
1904+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1905+{
1906+#ifdef CONFIG_ARM_LPAE
1907+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1908+#else
1909+ if (addr & SECTION_SIZE)
1910+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1911+ else
1912+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1913+#endif
1914+ flush_pmd_entry(pmdp);
1915+}
1916+
1917 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1918 pmdval_t prot)
1919 {
1920@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1921 static inline void
1922 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1923 {
1924- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1925+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1926 }
1927 #define pmd_pgtable(pmd) pmd_page(pmd)
1928
1929diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1930index 5cfba15..f415e1a 100644
1931--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1932+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1933@@ -20,12 +20,15 @@
1934 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1935 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1936 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1937+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1938 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1939 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1940 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1941+
1942 /*
1943 * - section
1944 */
1945+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1946 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1947 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1948 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1949@@ -37,6 +40,7 @@
1950 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1951 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1952 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1953+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1954
1955 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1956 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1957@@ -66,6 +70,7 @@
1958 * - extended small page/tiny page
1959 */
1960 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1961+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1962 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1963 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1964 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1965diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1966index 86a659a..70e0120 100644
1967--- a/arch/arm/include/asm/pgtable-2level.h
1968+++ b/arch/arm/include/asm/pgtable-2level.h
1969@@ -126,6 +126,9 @@
1970 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1971 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1972
1973+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1974+#define L_PTE_PXN (_AT(pteval_t, 0))
1975+
1976 /*
1977 * These are the memory types, defined to be compatible with
1978 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1979diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1980index 626989f..9d67a33 100644
1981--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1982+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1983@@ -75,6 +75,7 @@
1984 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1985 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1986 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1987+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1988 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1989
1990 /*
1991diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1992index 1d15673..04d626a 100644
1993--- a/arch/arm/include/asm/pgtable-3level.h
1994+++ b/arch/arm/include/asm/pgtable-3level.h
1995@@ -82,6 +82,7 @@
1996 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1997 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1998 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1999+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2000 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2001 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
2002 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
2003@@ -95,6 +96,7 @@
2004 /*
2005 * To be used in assembly code with the upper page attributes.
2006 */
2007+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2008 #define L_PTE_XN_HIGH (1 << (54 - 32))
2009 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2010
2011diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2012index 1571d12..b8a9b43 100644
2013--- a/arch/arm/include/asm/pgtable.h
2014+++ b/arch/arm/include/asm/pgtable.h
2015@@ -33,6 +33,9 @@
2016 #include <asm/pgtable-2level.h>
2017 #endif
2018
2019+#define ktla_ktva(addr) (addr)
2020+#define ktva_ktla(addr) (addr)
2021+
2022 /*
2023 * Just any arbitrary offset to the start of the vmalloc VM area: the
2024 * current 8MB value just means that there will be a 8MB "hole" after the
2025@@ -48,6 +51,9 @@
2026 #define LIBRARY_TEXT_START 0x0c000000
2027
2028 #ifndef __ASSEMBLY__
2029+extern pteval_t __supported_pte_mask;
2030+extern pmdval_t __supported_pmd_mask;
2031+
2032 extern void __pte_error(const char *file, int line, pte_t);
2033 extern void __pmd_error(const char *file, int line, pmd_t);
2034 extern void __pgd_error(const char *file, int line, pgd_t);
2035@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2036 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2037 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2038
2039+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2040+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2041+
2042+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2043+#include <asm/domain.h>
2044+#include <linux/thread_info.h>
2045+#include <linux/preempt.h>
2046+
2047+static inline int test_domain(int domain, int domaintype)
2048+{
2049+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2050+}
2051+#endif
2052+
2053+#ifdef CONFIG_PAX_KERNEXEC
2054+static inline unsigned long pax_open_kernel(void) {
2055+#ifdef CONFIG_ARM_LPAE
2056+ /* TODO */
2057+#else
2058+ preempt_disable();
2059+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2060+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2061+#endif
2062+ return 0;
2063+}
2064+
2065+static inline unsigned long pax_close_kernel(void) {
2066+#ifdef CONFIG_ARM_LPAE
2067+ /* TODO */
2068+#else
2069+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2070+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2071+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2072+ preempt_enable_no_resched();
2073+#endif
2074+ return 0;
2075+}
2076+#else
2077+static inline unsigned long pax_open_kernel(void) { return 0; }
2078+static inline unsigned long pax_close_kernel(void) { return 0; }
2079+#endif
2080+
2081 /*
2082 * This is the lowest virtual address we can permit any user space
2083 * mapping to be mapped at. This is particularly important for
2084@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2085 /*
2086 * The pgprot_* and protection_map entries will be fixed up in runtime
2087 * to include the cachable and bufferable bits based on memory policy,
2088- * as well as any architecture dependent bits like global/ASID and SMP
2089- * shared mapping bits.
2090+ * as well as any architecture dependent bits like global/ASID, PXN,
2091+ * and SMP shared mapping bits.
2092 */
2093 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2094
2095@@ -260,7 +308,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2096 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2097 {
2098 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2099- L_PTE_NONE | L_PTE_VALID;
2100+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2101 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2102 return pte;
2103 }
2104diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2105index c4ae171..ea0c0c2 100644
2106--- a/arch/arm/include/asm/psci.h
2107+++ b/arch/arm/include/asm/psci.h
2108@@ -29,7 +29,7 @@ struct psci_operations {
2109 int (*cpu_off)(struct psci_power_state state);
2110 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2111 int (*migrate)(unsigned long cpuid);
2112-};
2113+} __no_const;
2114
2115 extern struct psci_operations psci_ops;
2116 extern struct smp_operations psci_smp_ops;
2117diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2118index 22a3b9b..7f214ee 100644
2119--- a/arch/arm/include/asm/smp.h
2120+++ b/arch/arm/include/asm/smp.h
2121@@ -112,7 +112,7 @@ struct smp_operations {
2122 int (*cpu_disable)(unsigned int cpu);
2123 #endif
2124 #endif
2125-};
2126+} __no_const;
2127
2128 /*
2129 * set platform specific SMP operations
2130diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2131index 71a06b2..8bb9ae1 100644
2132--- a/arch/arm/include/asm/thread_info.h
2133+++ b/arch/arm/include/asm/thread_info.h
2134@@ -88,9 +88,9 @@ struct thread_info {
2135 .flags = 0, \
2136 .preempt_count = INIT_PREEMPT_COUNT, \
2137 .addr_limit = KERNEL_DS, \
2138- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2139- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2140- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2141+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2142+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2143+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2144 .restart_block = { \
2145 .fn = do_no_restart_syscall, \
2146 }, \
2147@@ -157,7 +157,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2148 #define TIF_SYSCALL_AUDIT 9
2149 #define TIF_SYSCALL_TRACEPOINT 10
2150 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2151-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2152+/* within 8 bits of TIF_SYSCALL_TRACE
2153+ * to meet flexible second operand requirements
2154+ */
2155+#define TIF_GRSEC_SETXID 12
2156+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2157 #define TIF_USING_IWMMXT 17
2158 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2159 #define TIF_RESTORE_SIGMASK 20
2160@@ -170,10 +174,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2161 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2162 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2163 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2164+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2165
2166 /* Checks for any syscall work in entry-common.S */
2167 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2168- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2169+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2170
2171 /*
2172 * Change these and you break ASM code in entry-common.S
2173diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2174index 72abdc5..35acac1 100644
2175--- a/arch/arm/include/asm/uaccess.h
2176+++ b/arch/arm/include/asm/uaccess.h
2177@@ -18,6 +18,7 @@
2178 #include <asm/domain.h>
2179 #include <asm/unified.h>
2180 #include <asm/compiler.h>
2181+#include <asm/pgtable.h>
2182
2183 #if __LINUX_ARM_ARCH__ < 6
2184 #include <asm-generic/uaccess-unaligned.h>
2185@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2186 static inline void set_fs(mm_segment_t fs)
2187 {
2188 current_thread_info()->addr_limit = fs;
2189- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2190+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2191 }
2192
2193 #define segment_eq(a,b) ((a) == (b))
2194
2195+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2196+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2197+
2198+static inline void pax_open_userland(void)
2199+{
2200+
2201+#ifdef CONFIG_PAX_MEMORY_UDEREF
2202+ if (segment_eq(get_fs(), USER_DS)) {
2203+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2204+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2205+ }
2206+#endif
2207+
2208+}
2209+
2210+static inline void pax_close_userland(void)
2211+{
2212+
2213+#ifdef CONFIG_PAX_MEMORY_UDEREF
2214+ if (segment_eq(get_fs(), USER_DS)) {
2215+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2216+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2217+ }
2218+#endif
2219+
2220+}
2221+
2222 #define __addr_ok(addr) ({ \
2223 unsigned long flag; \
2224 __asm__("cmp %2, %0; movlo %0, #0" \
2225@@ -150,8 +178,12 @@ extern int __get_user_4(void *);
2226
2227 #define get_user(x,p) \
2228 ({ \
2229+ int __e; \
2230 might_fault(); \
2231- __get_user_check(x,p); \
2232+ pax_open_userland(); \
2233+ __e = __get_user_check(x,p); \
2234+ pax_close_userland(); \
2235+ __e; \
2236 })
2237
2238 extern int __put_user_1(void *, unsigned int);
2239@@ -195,8 +227,12 @@ extern int __put_user_8(void *, unsigned long long);
2240
2241 #define put_user(x,p) \
2242 ({ \
2243+ int __e; \
2244 might_fault(); \
2245- __put_user_check(x,p); \
2246+ pax_open_userland(); \
2247+ __e = __put_user_check(x,p); \
2248+ pax_close_userland(); \
2249+ __e; \
2250 })
2251
2252 #else /* CONFIG_MMU */
2253@@ -220,6 +256,7 @@ static inline void set_fs(mm_segment_t fs)
2254
2255 #endif /* CONFIG_MMU */
2256
2257+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2258 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2259
2260 #define user_addr_max() \
2261@@ -237,13 +274,17 @@ static inline void set_fs(mm_segment_t fs)
2262 #define __get_user(x,ptr) \
2263 ({ \
2264 long __gu_err = 0; \
2265+ pax_open_userland(); \
2266 __get_user_err((x),(ptr),__gu_err); \
2267+ pax_close_userland(); \
2268 __gu_err; \
2269 })
2270
2271 #define __get_user_error(x,ptr,err) \
2272 ({ \
2273+ pax_open_userland(); \
2274 __get_user_err((x),(ptr),err); \
2275+ pax_close_userland(); \
2276 (void) 0; \
2277 })
2278
2279@@ -319,13 +360,17 @@ do { \
2280 #define __put_user(x,ptr) \
2281 ({ \
2282 long __pu_err = 0; \
2283+ pax_open_userland(); \
2284 __put_user_err((x),(ptr),__pu_err); \
2285+ pax_close_userland(); \
2286 __pu_err; \
2287 })
2288
2289 #define __put_user_error(x,ptr,err) \
2290 ({ \
2291+ pax_open_userland(); \
2292 __put_user_err((x),(ptr),err); \
2293+ pax_close_userland(); \
2294 (void) 0; \
2295 })
2296
2297@@ -425,11 +470,44 @@ do { \
2298
2299
2300 #ifdef CONFIG_MMU
2301-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2302-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2303+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2304+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2305+
2306+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2307+{
2308+ unsigned long ret;
2309+
2310+ check_object_size(to, n, false);
2311+ pax_open_userland();
2312+ ret = ___copy_from_user(to, from, n);
2313+ pax_close_userland();
2314+ return ret;
2315+}
2316+
2317+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2318+{
2319+ unsigned long ret;
2320+
2321+ check_object_size(from, n, true);
2322+ pax_open_userland();
2323+ ret = ___copy_to_user(to, from, n);
2324+ pax_close_userland();
2325+ return ret;
2326+}
2327+
2328 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2329-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2330+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2331 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2332+
2333+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2334+{
2335+ unsigned long ret;
2336+ pax_open_userland();
2337+ ret = ___clear_user(addr, n);
2338+ pax_close_userland();
2339+ return ret;
2340+}
2341+
2342 #else
2343 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2344 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2345@@ -438,6 +516,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2346
2347 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2348 {
2349+ if ((long)n < 0)
2350+ return n;
2351+
2352 if (access_ok(VERIFY_READ, from, n))
2353 n = __copy_from_user(to, from, n);
2354 else /* security hole - plug it */
2355@@ -447,6 +528,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2356
2357 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2358 {
2359+ if ((long)n < 0)
2360+ return n;
2361+
2362 if (access_ok(VERIFY_WRITE, to, n))
2363 n = __copy_to_user(to, from, n);
2364 return n;
2365diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2366index 5af0ed1..cea83883 100644
2367--- a/arch/arm/include/uapi/asm/ptrace.h
2368+++ b/arch/arm/include/uapi/asm/ptrace.h
2369@@ -92,7 +92,7 @@
2370 * ARMv7 groups of PSR bits
2371 */
2372 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2373-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2374+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2375 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2376 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2377
2378diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2379index 1f031dd..d9b5e4a 100644
2380--- a/arch/arm/kernel/armksyms.c
2381+++ b/arch/arm/kernel/armksyms.c
2382@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2383
2384 /* networking */
2385 EXPORT_SYMBOL(csum_partial);
2386-EXPORT_SYMBOL(csum_partial_copy_from_user);
2387+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2388 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2389 EXPORT_SYMBOL(__csum_ipv6_magic);
2390
2391@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2392 #ifdef CONFIG_MMU
2393 EXPORT_SYMBOL(copy_page);
2394
2395-EXPORT_SYMBOL(__copy_from_user);
2396-EXPORT_SYMBOL(__copy_to_user);
2397-EXPORT_SYMBOL(__clear_user);
2398+EXPORT_SYMBOL(___copy_from_user);
2399+EXPORT_SYMBOL(___copy_to_user);
2400+EXPORT_SYMBOL(___clear_user);
2401
2402 EXPORT_SYMBOL(__get_user_1);
2403 EXPORT_SYMBOL(__get_user_2);
2404diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2405index b3fb8c9..59cfab2 100644
2406--- a/arch/arm/kernel/entry-armv.S
2407+++ b/arch/arm/kernel/entry-armv.S
2408@@ -47,6 +47,87 @@
2409 9997:
2410 .endm
2411
2412+ .macro pax_enter_kernel
2413+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2414+ @ make aligned space for saved DACR
2415+ sub sp, sp, #8
2416+ @ save regs
2417+ stmdb sp!, {r1, r2}
2418+ @ read DACR from cpu_domain into r1
2419+ mov r2, sp
2420+ @ assume 8K pages, since we have to split the immediate in two
2421+ bic r2, r2, #(0x1fc0)
2422+ bic r2, r2, #(0x3f)
2423+ ldr r1, [r2, #TI_CPU_DOMAIN]
2424+ @ store old DACR on stack
2425+ str r1, [sp, #8]
2426+#ifdef CONFIG_PAX_KERNEXEC
2427+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2428+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2429+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2430+#endif
2431+#ifdef CONFIG_PAX_MEMORY_UDEREF
2432+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2433+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2434+#endif
2435+ @ write r1 to current_thread_info()->cpu_domain
2436+ str r1, [r2, #TI_CPU_DOMAIN]
2437+ @ write r1 to DACR
2438+ mcr p15, 0, r1, c3, c0, 0
2439+ @ instruction sync
2440+ instr_sync
2441+ @ restore regs
2442+ ldmia sp!, {r1, r2}
2443+#endif
2444+ .endm
2445+
2446+ .macro pax_open_userland
2447+#ifdef CONFIG_PAX_MEMORY_UDEREF
2448+ @ save regs
2449+ stmdb sp!, {r0, r1}
2450+ @ read DACR from cpu_domain into r1
2451+ mov r0, sp
2452+ @ assume 8K pages, since we have to split the immediate in two
2453+ bic r0, r0, #(0x1fc0)
2454+ bic r0, r0, #(0x3f)
2455+ ldr r1, [r0, #TI_CPU_DOMAIN]
2456+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2457+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2458+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2459+ @ write r1 to current_thread_info()->cpu_domain
2460+ str r1, [r0, #TI_CPU_DOMAIN]
2461+ @ write r1 to DACR
2462+ mcr p15, 0, r1, c3, c0, 0
2463+ @ instruction sync
2464+ instr_sync
2465+ @ restore regs
2466+ ldmia sp!, {r0, r1}
2467+#endif
2468+ .endm
2469+
2470+ .macro pax_close_userland
2471+#ifdef CONFIG_PAX_MEMORY_UDEREF
2472+ @ save regs
2473+ stmdb sp!, {r0, r1}
2474+ @ read DACR from cpu_domain into r1
2475+ mov r0, sp
2476+ @ assume 8K pages, since we have to split the immediate in two
2477+ bic r0, r0, #(0x1fc0)
2478+ bic r0, r0, #(0x3f)
2479+ ldr r1, [r0, #TI_CPU_DOMAIN]
2480+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2481+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2482+ @ write r1 to current_thread_info()->cpu_domain
2483+ str r1, [r0, #TI_CPU_DOMAIN]
2484+ @ write r1 to DACR
2485+ mcr p15, 0, r1, c3, c0, 0
2486+ @ instruction sync
2487+ instr_sync
2488+ @ restore regs
2489+ ldmia sp!, {r0, r1}
2490+#endif
2491+ .endm
2492+
2493 .macro pabt_helper
2494 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2495 #ifdef MULTI_PABORT
2496@@ -89,11 +170,15 @@
2497 * Invalid mode handlers
2498 */
2499 .macro inv_entry, reason
2500+
2501+ pax_enter_kernel
2502+
2503 sub sp, sp, #S_FRAME_SIZE
2504 ARM( stmib sp, {r1 - lr} )
2505 THUMB( stmia sp, {r0 - r12} )
2506 THUMB( str sp, [sp, #S_SP] )
2507 THUMB( str lr, [sp, #S_LR] )
2508+
2509 mov r1, #\reason
2510 .endm
2511
2512@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2513 .macro svc_entry, stack_hole=0
2514 UNWIND(.fnstart )
2515 UNWIND(.save {r0 - pc} )
2516+
2517+ pax_enter_kernel
2518+
2519 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2520+
2521 #ifdef CONFIG_THUMB2_KERNEL
2522 SPFIX( str r0, [sp] ) @ temporarily saved
2523 SPFIX( mov r0, sp )
2524@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2525 ldmia r0, {r3 - r5}
2526 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2527 mov r6, #-1 @ "" "" "" ""
2528+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2529+ @ offset sp by 8 as done in pax_enter_kernel
2530+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2531+#else
2532 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2533+#endif
2534 SPFIX( addeq r2, r2, #4 )
2535 str r3, [sp, #-4]! @ save the "real" r0 copied
2536 @ from the exception stack
2537@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
2538 .macro usr_entry
2539 UNWIND(.fnstart )
2540 UNWIND(.cantunwind ) @ don't unwind the user space
2541+
2542+ pax_enter_kernel_user
2543+
2544 sub sp, sp, #S_FRAME_SIZE
2545 ARM( stmib sp, {r1 - r12} )
2546 THUMB( stmia sp, {r0 - r12} )
2547@@ -416,7 +513,9 @@ __und_usr:
2548 tst r3, #PSR_T_BIT @ Thumb mode?
2549 bne __und_usr_thumb
2550 sub r4, r2, #4 @ ARM instr at LR - 4
2551+ pax_open_userland
2552 1: ldrt r0, [r4]
2553+ pax_close_userland
2554 ARM_BE8(rev r0, r0) @ little endian instruction
2555
2556 @ r0 = 32-bit ARM instruction which caused the exception
2557@@ -450,10 +549,14 @@ __und_usr_thumb:
2558 */
2559 .arch armv6t2
2560 #endif
2561+ pax_open_userland
2562 2: ldrht r5, [r4]
2563+ pax_close_userland
2564 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2565 blo __und_usr_fault_16 @ 16bit undefined instruction
2566+ pax_open_userland
2567 3: ldrht r0, [r2]
2568+ pax_close_userland
2569 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2570 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2571 orr r0, r0, r5, lsl #16
2572@@ -482,7 +585,8 @@ ENDPROC(__und_usr)
2573 */
2574 .pushsection .fixup, "ax"
2575 .align 2
2576-4: mov pc, r9
2577+4: pax_close_userland
2578+ mov pc, r9
2579 .popsection
2580 .pushsection __ex_table,"a"
2581 .long 1b, 4b
2582@@ -692,7 +796,7 @@ ENTRY(__switch_to)
2583 THUMB( str lr, [ip], #4 )
2584 ldr r4, [r2, #TI_TP_VALUE]
2585 ldr r5, [r2, #TI_TP_VALUE + 4]
2586-#ifdef CONFIG_CPU_USE_DOMAINS
2587+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2588 ldr r6, [r2, #TI_CPU_DOMAIN]
2589 #endif
2590 switch_tls r1, r4, r5, r3, r7
2591@@ -701,7 +805,7 @@ ENTRY(__switch_to)
2592 ldr r8, =__stack_chk_guard
2593 ldr r7, [r7, #TSK_STACK_CANARY]
2594 #endif
2595-#ifdef CONFIG_CPU_USE_DOMAINS
2596+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2597 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2598 #endif
2599 mov r5, r0
2600diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2601index a2dcafd..1048b5a 100644
2602--- a/arch/arm/kernel/entry-common.S
2603+++ b/arch/arm/kernel/entry-common.S
2604@@ -10,18 +10,46 @@
2605
2606 #include <asm/unistd.h>
2607 #include <asm/ftrace.h>
2608+#include <asm/domain.h>
2609 #include <asm/unwind.h>
2610
2611+#include "entry-header.S"
2612+
2613 #ifdef CONFIG_NEED_RET_TO_USER
2614 #include <mach/entry-macro.S>
2615 #else
2616 .macro arch_ret_to_user, tmp1, tmp2
2617+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2618+ @ save regs
2619+ stmdb sp!, {r1, r2}
2620+ @ read DACR from cpu_domain into r1
2621+ mov r2, sp
2622+ @ assume 8K pages, since we have to split the immediate in two
2623+ bic r2, r2, #(0x1fc0)
2624+ bic r2, r2, #(0x3f)
2625+ ldr r1, [r2, #TI_CPU_DOMAIN]
2626+#ifdef CONFIG_PAX_KERNEXEC
2627+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2628+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2629+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2630+#endif
2631+#ifdef CONFIG_PAX_MEMORY_UDEREF
2632+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2633+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2634+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2635+#endif
2636+ @ write r1 to current_thread_info()->cpu_domain
2637+ str r1, [r2, #TI_CPU_DOMAIN]
2638+ @ write r1 to DACR
2639+ mcr p15, 0, r1, c3, c0, 0
2640+ @ instruction sync
2641+ instr_sync
2642+ @ restore regs
2643+ ldmia sp!, {r1, r2}
2644+#endif
2645 .endm
2646 #endif
2647
2648-#include "entry-header.S"
2649-
2650-
2651 .align 5
2652 /*
2653 * This is the fast syscall return path. We do as little as
2654@@ -411,6 +439,12 @@ ENTRY(vector_swi)
2655 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2656 #endif
2657
2658+ /*
2659+ * do this here to avoid a performance hit of wrapping the code above
2660+ * that directly dereferences userland to parse the SWI instruction
2661+ */
2662+ pax_enter_kernel_user
2663+
2664 adr tbl, sys_call_table @ load syscall table pointer
2665
2666 #if defined(CONFIG_OABI_COMPAT)
2667diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2668index 39f89fb..d612bd9 100644
2669--- a/arch/arm/kernel/entry-header.S
2670+++ b/arch/arm/kernel/entry-header.S
2671@@ -184,6 +184,60 @@
2672 msr cpsr_c, \rtemp @ switch back to the SVC mode
2673 .endm
2674
2675+ .macro pax_enter_kernel_user
2676+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2677+ @ save regs
2678+ stmdb sp!, {r0, r1}
2679+ @ read DACR from cpu_domain into r1
2680+ mov r0, sp
2681+ @ assume 8K pages, since we have to split the immediate in two
2682+ bic r0, r0, #(0x1fc0)
2683+ bic r0, r0, #(0x3f)
2684+ ldr r1, [r0, #TI_CPU_DOMAIN]
2685+#ifdef CONFIG_PAX_MEMORY_UDEREF
2686+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2687+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2688+#endif
2689+#ifdef CONFIG_PAX_KERNEXEC
2690+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2691+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2692+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2693+#endif
2694+ @ write r1 to current_thread_info()->cpu_domain
2695+ str r1, [r0, #TI_CPU_DOMAIN]
2696+ @ write r1 to DACR
2697+ mcr p15, 0, r1, c3, c0, 0
2698+ @ instruction sync
2699+ instr_sync
2700+ @ restore regs
2701+ ldmia sp!, {r0, r1}
2702+#endif
2703+ .endm
2704+
2705+ .macro pax_exit_kernel
2706+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2707+ @ save regs
2708+ stmdb sp!, {r0, r1}
2709+ @ read old DACR from stack into r1
2710+ ldr r1, [sp, #(8 + S_SP)]
2711+ sub r1, r1, #8
2712+ ldr r1, [r1]
2713+
2714+ @ write r1 to current_thread_info()->cpu_domain
2715+ mov r0, sp
2716+ @ assume 8K pages, since we have to split the immediate in two
2717+ bic r0, r0, #(0x1fc0)
2718+ bic r0, r0, #(0x3f)
2719+ str r1, [r0, #TI_CPU_DOMAIN]
2720+ @ write r1 to DACR
2721+ mcr p15, 0, r1, c3, c0, 0
2722+ @ instruction sync
2723+ instr_sync
2724+ @ restore regs
2725+ ldmia sp!, {r0, r1}
2726+#endif
2727+ .endm
2728+
2729 #ifndef CONFIG_THUMB2_KERNEL
2730 .macro svc_exit, rpsr, irq = 0
2731 .if \irq != 0
2732@@ -203,6 +257,9 @@
2733 blne trace_hardirqs_off
2734 #endif
2735 .endif
2736+
2737+ pax_exit_kernel
2738+
2739 msr spsr_cxsf, \rpsr
2740 #if defined(CONFIG_CPU_V6)
2741 ldr r0, [sp]
2742@@ -266,6 +323,9 @@
2743 blne trace_hardirqs_off
2744 #endif
2745 .endif
2746+
2747+ pax_exit_kernel
2748+
2749 ldr lr, [sp, #S_SP] @ top of the stack
2750 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2751 clrex @ clear the exclusive monitor
2752diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2753index 918875d..cd5fa27 100644
2754--- a/arch/arm/kernel/fiq.c
2755+++ b/arch/arm/kernel/fiq.c
2756@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
2757 void *base = vectors_page;
2758 unsigned offset = FIQ_OFFSET;
2759
2760+ pax_open_kernel();
2761 memcpy(base + offset, start, length);
2762+ pax_close_kernel();
2763+
2764 if (!cache_is_vipt_nonaliasing())
2765 flush_icache_range((unsigned long)base + offset, offset +
2766 length);
2767diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2768index 32f317e..710ae07 100644
2769--- a/arch/arm/kernel/head.S
2770+++ b/arch/arm/kernel/head.S
2771@@ -52,7 +52,9 @@
2772 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2773
2774 .macro pgtbl, rd, phys
2775- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2776+ mov \rd, #TEXT_OFFSET
2777+ sub \rd, #PG_DIR_SIZE
2778+ add \rd, \rd, \phys
2779 .endm
2780
2781 /*
2782@@ -436,7 +438,7 @@ __enable_mmu:
2783 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2784 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2785 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2786- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2787+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2788 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2789 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2790 #endif
2791diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2792index 45e4781..8eac93d 100644
2793--- a/arch/arm/kernel/module.c
2794+++ b/arch/arm/kernel/module.c
2795@@ -38,12 +38,39 @@
2796 #endif
2797
2798 #ifdef CONFIG_MMU
2799-void *module_alloc(unsigned long size)
2800+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2801 {
2802+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2803+ return NULL;
2804 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2805- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2806+ GFP_KERNEL, prot, NUMA_NO_NODE,
2807 __builtin_return_address(0));
2808 }
2809+
2810+void *module_alloc(unsigned long size)
2811+{
2812+
2813+#ifdef CONFIG_PAX_KERNEXEC
2814+ return __module_alloc(size, PAGE_KERNEL);
2815+#else
2816+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2817+#endif
2818+
2819+}
2820+
2821+#ifdef CONFIG_PAX_KERNEXEC
2822+void module_free_exec(struct module *mod, void *module_region)
2823+{
2824+ module_free(mod, module_region);
2825+}
2826+EXPORT_SYMBOL(module_free_exec);
2827+
2828+void *module_alloc_exec(unsigned long size)
2829+{
2830+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2831+}
2832+EXPORT_SYMBOL(module_alloc_exec);
2833+#endif
2834 #endif
2835
2836 int
2837diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2838index 07314af..c46655c 100644
2839--- a/arch/arm/kernel/patch.c
2840+++ b/arch/arm/kernel/patch.c
2841@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2842 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2843 int size;
2844
2845+ pax_open_kernel();
2846 if (thumb2 && __opcode_is_thumb16(insn)) {
2847 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2848 size = sizeof(u16);
2849@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2850 *(u32 *)addr = insn;
2851 size = sizeof(u32);
2852 }
2853+ pax_close_kernel();
2854
2855 flush_icache_range((uintptr_t)(addr),
2856 (uintptr_t)(addr) + size);
2857diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2858index 92f7b15..7048500 100644
2859--- a/arch/arm/kernel/process.c
2860+++ b/arch/arm/kernel/process.c
2861@@ -217,6 +217,7 @@ void machine_power_off(void)
2862
2863 if (pm_power_off)
2864 pm_power_off();
2865+ BUG();
2866 }
2867
2868 /*
2869@@ -230,7 +231,7 @@ void machine_power_off(void)
2870 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2871 * to use. Implementing such co-ordination would be essentially impossible.
2872 */
2873-void machine_restart(char *cmd)
2874+__noreturn void machine_restart(char *cmd)
2875 {
2876 local_irq_disable();
2877 smp_send_stop();
2878@@ -253,8 +254,8 @@ void __show_regs(struct pt_regs *regs)
2879
2880 show_regs_print_info(KERN_DEFAULT);
2881
2882- print_symbol("PC is at %s\n", instruction_pointer(regs));
2883- print_symbol("LR is at %s\n", regs->ARM_lr);
2884+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2885+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2886 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2887 "sp : %08lx ip : %08lx fp : %08lx\n",
2888 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2889@@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
2890 return 0;
2891 }
2892
2893-unsigned long arch_randomize_brk(struct mm_struct *mm)
2894-{
2895- unsigned long range_end = mm->brk + 0x02000000;
2896- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2897-}
2898-
2899 #ifdef CONFIG_MMU
2900 #ifdef CONFIG_KUSER_HELPERS
2901 /*
2902@@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
2903
2904 static int __init gate_vma_init(void)
2905 {
2906- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2907+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2908 return 0;
2909 }
2910 arch_initcall(gate_vma_init);
2911@@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
2912
2913 const char *arch_vma_name(struct vm_area_struct *vma)
2914 {
2915- return is_gate_vma(vma) ? "[vectors]" :
2916- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
2917- "[sigpage]" : NULL;
2918+ return is_gate_vma(vma) ? "[vectors]" : NULL;
2919 }
2920
2921-static struct page *signal_page;
2922-extern struct page *get_signal_page(void);
2923-
2924 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2925 {
2926 struct mm_struct *mm = current->mm;
2927- unsigned long addr;
2928- int ret;
2929-
2930- if (!signal_page)
2931- signal_page = get_signal_page();
2932- if (!signal_page)
2933- return -ENOMEM;
2934
2935 down_write(&mm->mmap_sem);
2936- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
2937- if (IS_ERR_VALUE(addr)) {
2938- ret = addr;
2939- goto up_fail;
2940- }
2941-
2942- ret = install_special_mapping(mm, addr, PAGE_SIZE,
2943- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2944- &signal_page);
2945-
2946- if (ret == 0)
2947- mm->context.sigpage = addr;
2948-
2949- up_fail:
2950+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2951 up_write(&mm->mmap_sem);
2952- return ret;
2953+ return 0;
2954 }
2955 #endif
2956diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2957index 4693188..4596c5e 100644
2958--- a/arch/arm/kernel/psci.c
2959+++ b/arch/arm/kernel/psci.c
2960@@ -24,7 +24,7 @@
2961 #include <asm/opcodes-virt.h>
2962 #include <asm/psci.h>
2963
2964-struct psci_operations psci_ops;
2965+struct psci_operations psci_ops __read_only;
2966
2967 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2968
2969diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2970index 0dd3b79..e018f64 100644
2971--- a/arch/arm/kernel/ptrace.c
2972+++ b/arch/arm/kernel/ptrace.c
2973@@ -929,10 +929,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2974 return current_thread_info()->syscall;
2975 }
2976
2977+#ifdef CONFIG_GRKERNSEC_SETXID
2978+extern void gr_delayed_cred_worker(void);
2979+#endif
2980+
2981 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2982 {
2983 current_thread_info()->syscall = scno;
2984
2985+#ifdef CONFIG_GRKERNSEC_SETXID
2986+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2987+ gr_delayed_cred_worker();
2988+#endif
2989+
2990 /* Do the secure computing check first; failures should be fast. */
2991 if (secure_computing(scno) == -1)
2992 return -1;
2993diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2994index 987a7f5..ab0c397 100644
2995--- a/arch/arm/kernel/setup.c
2996+++ b/arch/arm/kernel/setup.c
2997@@ -100,21 +100,23 @@ EXPORT_SYMBOL(system_serial_high);
2998 unsigned int elf_hwcap __read_mostly;
2999 EXPORT_SYMBOL(elf_hwcap);
3000
3001+pteval_t __supported_pte_mask __read_only;
3002+pmdval_t __supported_pmd_mask __read_only;
3003
3004 #ifdef MULTI_CPU
3005-struct processor processor __read_mostly;
3006+struct processor processor __read_only;
3007 #endif
3008 #ifdef MULTI_TLB
3009-struct cpu_tlb_fns cpu_tlb __read_mostly;
3010+struct cpu_tlb_fns cpu_tlb __read_only;
3011 #endif
3012 #ifdef MULTI_USER
3013-struct cpu_user_fns cpu_user __read_mostly;
3014+struct cpu_user_fns cpu_user __read_only;
3015 #endif
3016 #ifdef MULTI_CACHE
3017-struct cpu_cache_fns cpu_cache __read_mostly;
3018+struct cpu_cache_fns cpu_cache __read_only;
3019 #endif
3020 #ifdef CONFIG_OUTER_CACHE
3021-struct outer_cache_fns outer_cache __read_mostly;
3022+struct outer_cache_fns outer_cache __read_only;
3023 EXPORT_SYMBOL(outer_cache);
3024 #endif
3025
3026@@ -247,9 +249,13 @@ static int __get_cpu_architecture(void)
3027 asm("mrc p15, 0, %0, c0, c1, 4"
3028 : "=r" (mmfr0));
3029 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3030- (mmfr0 & 0x000000f0) >= 0x00000030)
3031+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3032 cpu_arch = CPU_ARCH_ARMv7;
3033- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3034+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3035+ __supported_pte_mask |= L_PTE_PXN;
3036+ __supported_pmd_mask |= PMD_PXNTABLE;
3037+ }
3038+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3039 (mmfr0 & 0x000000f0) == 0x00000020)
3040 cpu_arch = CPU_ARCH_ARMv6;
3041 else
3042diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3043index 04d6388..5115238 100644
3044--- a/arch/arm/kernel/signal.c
3045+++ b/arch/arm/kernel/signal.c
3046@@ -23,8 +23,6 @@
3047
3048 extern const unsigned long sigreturn_codes[7];
3049
3050-static unsigned long signal_return_offset;
3051-
3052 #ifdef CONFIG_CRUNCH
3053 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3054 {
3055@@ -395,8 +393,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3056 * except when the MPU has protected the vectors
3057 * page from PL0
3058 */
3059- retcode = mm->context.sigpage + signal_return_offset +
3060- (idx << 2) + thumb;
3061+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3062 } else
3063 #endif
3064 {
3065@@ -600,33 +597,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3066 } while (thread_flags & _TIF_WORK_MASK);
3067 return 0;
3068 }
3069-
3070-struct page *get_signal_page(void)
3071-{
3072- unsigned long ptr;
3073- unsigned offset;
3074- struct page *page;
3075- void *addr;
3076-
3077- page = alloc_pages(GFP_KERNEL, 0);
3078-
3079- if (!page)
3080- return NULL;
3081-
3082- addr = page_address(page);
3083-
3084- /* Give the signal return code some randomness */
3085- offset = 0x200 + (get_random_int() & 0x7fc);
3086- signal_return_offset = offset;
3087-
3088- /*
3089- * Copy signal return handlers into the vector page, and
3090- * set sigreturn to be a pointer to these.
3091- */
3092- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3093-
3094- ptr = (unsigned long)addr + offset;
3095- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3096-
3097- return page;
3098-}
3099diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3100index dc894ab..f929a0d 100644
3101--- a/arch/arm/kernel/smp.c
3102+++ b/arch/arm/kernel/smp.c
3103@@ -73,7 +73,7 @@ enum ipi_msg_type {
3104
3105 static DECLARE_COMPLETION(cpu_running);
3106
3107-static struct smp_operations smp_ops;
3108+static struct smp_operations smp_ops __read_only;
3109
3110 void __init smp_set_ops(struct smp_operations *ops)
3111 {
3112diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3113index 4636d56..ce4ec3d 100644
3114--- a/arch/arm/kernel/traps.c
3115+++ b/arch/arm/kernel/traps.c
3116@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3117 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3118 {
3119 #ifdef CONFIG_KALLSYMS
3120- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3121+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3122 #else
3123 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3124 #endif
3125@@ -264,6 +264,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3126 static int die_owner = -1;
3127 static unsigned int die_nest_count;
3128
3129+extern void gr_handle_kernel_exploit(void);
3130+
3131 static unsigned long oops_begin(void)
3132 {
3133 int cpu;
3134@@ -306,6 +308,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3135 panic("Fatal exception in interrupt");
3136 if (panic_on_oops)
3137 panic("Fatal exception");
3138+
3139+ gr_handle_kernel_exploit();
3140+
3141 if (signr)
3142 do_exit(signr);
3143 }
3144@@ -642,7 +647,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3145 * The user helper at 0xffff0fe0 must be used instead.
3146 * (see entry-armv.S for details)
3147 */
3148+ pax_open_kernel();
3149 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3150+ pax_close_kernel();
3151 }
3152 return 0;
3153
3154@@ -899,7 +906,11 @@ void __init early_trap_init(void *vectors_base)
3155 kuser_init(vectors_base);
3156
3157 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3158- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3159+
3160+#ifndef CONFIG_PAX_MEMORY_UDEREF
3161+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3162+#endif
3163+
3164 #else /* ifndef CONFIG_CPU_V7M */
3165 /*
3166 * on V7-M there is no need to copy the vector table to a dedicated
3167diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3168index 7bcee5c..e2f3249 100644
3169--- a/arch/arm/kernel/vmlinux.lds.S
3170+++ b/arch/arm/kernel/vmlinux.lds.S
3171@@ -8,7 +8,11 @@
3172 #include <asm/thread_info.h>
3173 #include <asm/memory.h>
3174 #include <asm/page.h>
3175-
3176+
3177+#ifdef CONFIG_PAX_KERNEXEC
3178+#include <asm/pgtable.h>
3179+#endif
3180+
3181 #define PROC_INFO \
3182 . = ALIGN(4); \
3183 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3184@@ -34,7 +38,7 @@
3185 #endif
3186
3187 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3188- defined(CONFIG_GENERIC_BUG)
3189+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3190 #define ARM_EXIT_KEEP(x) x
3191 #define ARM_EXIT_DISCARD(x)
3192 #else
3193@@ -90,6 +94,11 @@ SECTIONS
3194 _text = .;
3195 HEAD_TEXT
3196 }
3197+
3198+#ifdef CONFIG_PAX_KERNEXEC
3199+ . = ALIGN(1<<SECTION_SHIFT);
3200+#endif
3201+
3202 .text : { /* Real text segment */
3203 _stext = .; /* Text and read-only data */
3204 __exception_text_start = .;
3205@@ -112,6 +121,8 @@ SECTIONS
3206 ARM_CPU_KEEP(PROC_INFO)
3207 }
3208
3209+ _etext = .; /* End of text section */
3210+
3211 RO_DATA(PAGE_SIZE)
3212
3213 . = ALIGN(4);
3214@@ -142,7 +153,9 @@ SECTIONS
3215
3216 NOTES
3217
3218- _etext = .; /* End of text and rodata section */
3219+#ifdef CONFIG_PAX_KERNEXEC
3220+ . = ALIGN(1<<SECTION_SHIFT);
3221+#endif
3222
3223 #ifndef CONFIG_XIP_KERNEL
3224 . = ALIGN(PAGE_SIZE);
3225@@ -220,6 +233,11 @@ SECTIONS
3226 . = PAGE_OFFSET + TEXT_OFFSET;
3227 #else
3228 __init_end = .;
3229+
3230+#ifdef CONFIG_PAX_KERNEXEC
3231+ . = ALIGN(1<<SECTION_SHIFT);
3232+#endif
3233+
3234 . = ALIGN(THREAD_SIZE);
3235 __data_loc = .;
3236 #endif
3237diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3238index 2a700e0..745b980 100644
3239--- a/arch/arm/kvm/arm.c
3240+++ b/arch/arm/kvm/arm.c
3241@@ -56,7 +56,7 @@ static unsigned long hyp_default_vectors;
3242 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3243
3244 /* The VMID used in the VTTBR */
3245-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3246+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3247 static u8 kvm_next_vmid;
3248 static DEFINE_SPINLOCK(kvm_vmid_lock);
3249
3250@@ -397,7 +397,7 @@ void force_vm_exit(const cpumask_t *mask)
3251 */
3252 static bool need_new_vmid_gen(struct kvm *kvm)
3253 {
3254- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3255+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3256 }
3257
3258 /**
3259@@ -430,7 +430,7 @@ static void update_vttbr(struct kvm *kvm)
3260
3261 /* First user of a new VMID generation? */
3262 if (unlikely(kvm_next_vmid == 0)) {
3263- atomic64_inc(&kvm_vmid_gen);
3264+ atomic64_inc_unchecked(&kvm_vmid_gen);
3265 kvm_next_vmid = 1;
3266
3267 /*
3268@@ -447,7 +447,7 @@ static void update_vttbr(struct kvm *kvm)
3269 kvm_call_hyp(__kvm_flush_vm_context);
3270 }
3271
3272- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3273+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3274 kvm->arch.vmid = kvm_next_vmid;
3275 kvm_next_vmid++;
3276
3277diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3278index 14a0d98..7771a7d 100644
3279--- a/arch/arm/lib/clear_user.S
3280+++ b/arch/arm/lib/clear_user.S
3281@@ -12,14 +12,14 @@
3282
3283 .text
3284
3285-/* Prototype: int __clear_user(void *addr, size_t sz)
3286+/* Prototype: int ___clear_user(void *addr, size_t sz)
3287 * Purpose : clear some user memory
3288 * Params : addr - user memory address to clear
3289 * : sz - number of bytes to clear
3290 * Returns : number of bytes NOT cleared
3291 */
3292 ENTRY(__clear_user_std)
3293-WEAK(__clear_user)
3294+WEAK(___clear_user)
3295 stmfd sp!, {r1, lr}
3296 mov r2, #0
3297 cmp r1, #4
3298@@ -44,7 +44,7 @@ WEAK(__clear_user)
3299 USER( strnebt r2, [r0])
3300 mov r0, #0
3301 ldmfd sp!, {r1, pc}
3302-ENDPROC(__clear_user)
3303+ENDPROC(___clear_user)
3304 ENDPROC(__clear_user_std)
3305
3306 .pushsection .fixup,"ax"
3307diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3308index 66a477a..bee61d3 100644
3309--- a/arch/arm/lib/copy_from_user.S
3310+++ b/arch/arm/lib/copy_from_user.S
3311@@ -16,7 +16,7 @@
3312 /*
3313 * Prototype:
3314 *
3315- * size_t __copy_from_user(void *to, const void *from, size_t n)
3316+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3317 *
3318 * Purpose:
3319 *
3320@@ -84,11 +84,11 @@
3321
3322 .text
3323
3324-ENTRY(__copy_from_user)
3325+ENTRY(___copy_from_user)
3326
3327 #include "copy_template.S"
3328
3329-ENDPROC(__copy_from_user)
3330+ENDPROC(___copy_from_user)
3331
3332 .pushsection .fixup,"ax"
3333 .align 0
3334diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3335index 6ee2f67..d1cce76 100644
3336--- a/arch/arm/lib/copy_page.S
3337+++ b/arch/arm/lib/copy_page.S
3338@@ -10,6 +10,7 @@
3339 * ASM optimised string functions
3340 */
3341 #include <linux/linkage.h>
3342+#include <linux/const.h>
3343 #include <asm/assembler.h>
3344 #include <asm/asm-offsets.h>
3345 #include <asm/cache.h>
3346diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3347index d066df6..df28194 100644
3348--- a/arch/arm/lib/copy_to_user.S
3349+++ b/arch/arm/lib/copy_to_user.S
3350@@ -16,7 +16,7 @@
3351 /*
3352 * Prototype:
3353 *
3354- * size_t __copy_to_user(void *to, const void *from, size_t n)
3355+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3356 *
3357 * Purpose:
3358 *
3359@@ -88,11 +88,11 @@
3360 .text
3361
3362 ENTRY(__copy_to_user_std)
3363-WEAK(__copy_to_user)
3364+WEAK(___copy_to_user)
3365
3366 #include "copy_template.S"
3367
3368-ENDPROC(__copy_to_user)
3369+ENDPROC(___copy_to_user)
3370 ENDPROC(__copy_to_user_std)
3371
3372 .pushsection .fixup,"ax"
3373diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3374index 7d08b43..f7ca7ea 100644
3375--- a/arch/arm/lib/csumpartialcopyuser.S
3376+++ b/arch/arm/lib/csumpartialcopyuser.S
3377@@ -57,8 +57,8 @@
3378 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3379 */
3380
3381-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3382-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3383+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3384+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3385
3386 #include "csumpartialcopygeneric.S"
3387
3388diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3389index 5306de3..aed6d03 100644
3390--- a/arch/arm/lib/delay.c
3391+++ b/arch/arm/lib/delay.c
3392@@ -28,7 +28,7 @@
3393 /*
3394 * Default to the loop-based delay implementation.
3395 */
3396-struct arm_delay_ops arm_delay_ops = {
3397+struct arm_delay_ops arm_delay_ops __read_only = {
3398 .delay = __loop_delay,
3399 .const_udelay = __loop_const_udelay,
3400 .udelay = __loop_udelay,
3401diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3402index 3e58d71..029817c 100644
3403--- a/arch/arm/lib/uaccess_with_memcpy.c
3404+++ b/arch/arm/lib/uaccess_with_memcpy.c
3405@@ -136,7 +136,7 @@ out:
3406 }
3407
3408 unsigned long
3409-__copy_to_user(void __user *to, const void *from, unsigned long n)
3410+___copy_to_user(void __user *to, const void *from, unsigned long n)
3411 {
3412 /*
3413 * This test is stubbed out of the main function above to keep
3414@@ -190,7 +190,7 @@ out:
3415 return n;
3416 }
3417
3418-unsigned long __clear_user(void __user *addr, unsigned long n)
3419+unsigned long ___clear_user(void __user *addr, unsigned long n)
3420 {
3421 /* See rational for this in __copy_to_user() above. */
3422 if (n < 64)
3423diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3424index f3407a5..bd4256f 100644
3425--- a/arch/arm/mach-kirkwood/common.c
3426+++ b/arch/arm/mach-kirkwood/common.c
3427@@ -156,7 +156,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3428 clk_gate_ops.disable(hw);
3429 }
3430
3431-static struct clk_ops clk_gate_fn_ops;
3432+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3433+{
3434+ return clk_gate_ops.is_enabled(hw);
3435+}
3436+
3437+static struct clk_ops clk_gate_fn_ops = {
3438+ .enable = clk_gate_fn_enable,
3439+ .disable = clk_gate_fn_disable,
3440+ .is_enabled = clk_gate_fn_is_enabled,
3441+};
3442
3443 static struct clk __init *clk_register_gate_fn(struct device *dev,
3444 const char *name,
3445@@ -190,14 +199,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3446 gate_fn->fn_en = fn_en;
3447 gate_fn->fn_dis = fn_dis;
3448
3449- /* ops is the gate ops, but with our enable/disable functions */
3450- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3451- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3452- clk_gate_fn_ops = clk_gate_ops;
3453- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3454- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3455- }
3456-
3457 clk = clk_register(dev, &gate_fn->gate.hw);
3458
3459 if (IS_ERR(clk))
3460diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3461index 827d1500..2885dc6 100644
3462--- a/arch/arm/mach-omap2/board-n8x0.c
3463+++ b/arch/arm/mach-omap2/board-n8x0.c
3464@@ -627,7 +627,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3465 }
3466 #endif
3467
3468-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3469+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3470 .late_init = n8x0_menelaus_late_init,
3471 };
3472
3473diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3474index ab43755..ccfa231 100644
3475--- a/arch/arm/mach-omap2/gpmc.c
3476+++ b/arch/arm/mach-omap2/gpmc.c
3477@@ -148,7 +148,6 @@ struct omap3_gpmc_regs {
3478 };
3479
3480 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3481-static struct irq_chip gpmc_irq_chip;
3482 static int gpmc_irq_start;
3483
3484 static struct resource gpmc_mem_root;
3485@@ -716,6 +715,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3486
3487 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3488
3489+static struct irq_chip gpmc_irq_chip = {
3490+ .name = "gpmc",
3491+ .irq_startup = gpmc_irq_noop_ret,
3492+ .irq_enable = gpmc_irq_enable,
3493+ .irq_disable = gpmc_irq_disable,
3494+ .irq_shutdown = gpmc_irq_noop,
3495+ .irq_ack = gpmc_irq_noop,
3496+ .irq_mask = gpmc_irq_noop,
3497+ .irq_unmask = gpmc_irq_noop,
3498+
3499+};
3500+
3501 static int gpmc_setup_irq(void)
3502 {
3503 int i;
3504@@ -730,15 +741,6 @@ static int gpmc_setup_irq(void)
3505 return gpmc_irq_start;
3506 }
3507
3508- gpmc_irq_chip.name = "gpmc";
3509- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3510- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3511- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3512- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3513- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3514- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3515- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3516-
3517 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3518 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3519
3520diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3521index f991016..145ebeb 100644
3522--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3523+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3524@@ -84,7 +84,7 @@ struct cpu_pm_ops {
3525 int (*finish_suspend)(unsigned long cpu_state);
3526 void (*resume)(void);
3527 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3528-};
3529+} __no_const;
3530
3531 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3532 static struct powerdomain *mpuss_pd;
3533@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
3534 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3535 {}
3536
3537-struct cpu_pm_ops omap_pm_ops = {
3538+static struct cpu_pm_ops omap_pm_ops __read_only = {
3539 .finish_suspend = default_finish_suspend,
3540 .resume = dummy_cpu_resume,
3541 .scu_prepare = dummy_scu_prepare,
3542diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3543index 3664562..72f85c6 100644
3544--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3545+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3546@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3547 return NOTIFY_OK;
3548 }
3549
3550-static struct notifier_block __refdata irq_hotplug_notifier = {
3551+static struct notifier_block irq_hotplug_notifier = {
3552 .notifier_call = irq_cpu_hotplug_notify,
3553 };
3554
3555diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3556index e0a398c..a470fa5 100644
3557--- a/arch/arm/mach-omap2/omap_device.c
3558+++ b/arch/arm/mach-omap2/omap_device.c
3559@@ -508,7 +508,7 @@ void omap_device_delete(struct omap_device *od)
3560 struct platform_device __init *omap_device_build(const char *pdev_name,
3561 int pdev_id,
3562 struct omap_hwmod *oh,
3563- void *pdata, int pdata_len)
3564+ const void *pdata, int pdata_len)
3565 {
3566 struct omap_hwmod *ohs[] = { oh };
3567
3568@@ -536,7 +536,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3569 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3570 int pdev_id,
3571 struct omap_hwmod **ohs,
3572- int oh_cnt, void *pdata,
3573+ int oh_cnt, const void *pdata,
3574 int pdata_len)
3575 {
3576 int ret = -ENOMEM;
3577diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3578index 78c02b3..c94109a 100644
3579--- a/arch/arm/mach-omap2/omap_device.h
3580+++ b/arch/arm/mach-omap2/omap_device.h
3581@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3582 /* Core code interface */
3583
3584 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3585- struct omap_hwmod *oh, void *pdata,
3586+ struct omap_hwmod *oh, const void *pdata,
3587 int pdata_len);
3588
3589 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3590 struct omap_hwmod **oh, int oh_cnt,
3591- void *pdata, int pdata_len);
3592+ const void *pdata, int pdata_len);
3593
3594 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3595 struct omap_hwmod **ohs, int oh_cnt);
3596diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3597index 8a1b5e0..5f30074 100644
3598--- a/arch/arm/mach-omap2/omap_hwmod.c
3599+++ b/arch/arm/mach-omap2/omap_hwmod.c
3600@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3601 int (*init_clkdm)(struct omap_hwmod *oh);
3602 void (*update_context_lost)(struct omap_hwmod *oh);
3603 int (*get_context_lost)(struct omap_hwmod *oh);
3604-};
3605+} __no_const;
3606
3607 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3608-static struct omap_hwmod_soc_ops soc_ops;
3609+static struct omap_hwmod_soc_ops soc_ops __read_only;
3610
3611 /* omap_hwmod_list contains all registered struct omap_hwmods */
3612 static LIST_HEAD(omap_hwmod_list);
3613diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3614index 95fee54..cfa9cf1 100644
3615--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3616+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3617@@ -10,6 +10,7 @@
3618
3619 #include <linux/kernel.h>
3620 #include <linux/init.h>
3621+#include <asm/pgtable.h>
3622
3623 #include "powerdomain.h"
3624
3625@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3626
3627 void __init am43xx_powerdomains_init(void)
3628 {
3629- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3630+ pax_open_kernel();
3631+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3632+ pax_close_kernel();
3633 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3634 pwrdm_register_pwrdms(powerdomains_am43xx);
3635 pwrdm_complete_init();
3636diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3637index d15c7bb..b2d1f0c 100644
3638--- a/arch/arm/mach-omap2/wd_timer.c
3639+++ b/arch/arm/mach-omap2/wd_timer.c
3640@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3641 struct omap_hwmod *oh;
3642 char *oh_name = "wd_timer2";
3643 char *dev_name = "omap_wdt";
3644- struct omap_wd_timer_platform_data pdata;
3645+ static struct omap_wd_timer_platform_data pdata = {
3646+ .read_reset_sources = prm_read_reset_sources
3647+ };
3648
3649 if (!cpu_class_is_omap2() || of_have_populated_dt())
3650 return 0;
3651@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3652 return -EINVAL;
3653 }
3654
3655- pdata.read_reset_sources = prm_read_reset_sources;
3656-
3657 pdev = omap_device_build(dev_name, id, oh, &pdata,
3658 sizeof(struct omap_wd_timer_platform_data));
3659 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3660diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3661index b82dcae..44ee5b6 100644
3662--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3663+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3664@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3665 bool entered_lp2 = false;
3666
3667 if (tegra_pending_sgi())
3668- ACCESS_ONCE(abort_flag) = true;
3669+ ACCESS_ONCE_RW(abort_flag) = true;
3670
3671 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3672
3673diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3674index bdb3564..cebb96f 100644
3675--- a/arch/arm/mach-ux500/setup.h
3676+++ b/arch/arm/mach-ux500/setup.h
3677@@ -39,13 +39,6 @@ extern void ux500_timer_init(void);
3678 .type = MT_DEVICE, \
3679 }
3680
3681-#define __MEM_DEV_DESC(x, sz) { \
3682- .virtual = IO_ADDRESS(x), \
3683- .pfn = __phys_to_pfn(x), \
3684- .length = sz, \
3685- .type = MT_MEMORY, \
3686-}
3687-
3688 extern struct smp_operations ux500_smp_ops;
3689 extern void ux500_cpu_die(unsigned int cpu);
3690
3691diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3692index 1f8fed9..14d7823 100644
3693--- a/arch/arm/mm/Kconfig
3694+++ b/arch/arm/mm/Kconfig
3695@@ -446,7 +446,7 @@ config CPU_32v5
3696
3697 config CPU_32v6
3698 bool
3699- select CPU_USE_DOMAINS if CPU_V6 && MMU
3700+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3701 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3702
3703 config CPU_32v6K
3704@@ -601,6 +601,7 @@ config CPU_CP15_MPU
3705
3706 config CPU_USE_DOMAINS
3707 bool
3708+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3709 help
3710 This option enables or disables the use of domain switching
3711 via the set_fs() function.
3712@@ -800,6 +801,7 @@ config NEED_KUSER_HELPERS
3713 config KUSER_HELPERS
3714 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3715 default y
3716+ depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
3717 help
3718 Warning: disabling this option may break user programs.
3719
3720@@ -812,7 +814,7 @@ config KUSER_HELPERS
3721 See Documentation/arm/kernel_user_helpers.txt for details.
3722
3723 However, the fixed address nature of these helpers can be used
3724- by ROP (return orientated programming) authors when creating
3725+ by ROP (Return Oriented Programming) authors when creating
3726 exploits.
3727
3728 If all of the binaries and libraries which run on your platform
3729diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3730index 9240364..a2b8cf3 100644
3731--- a/arch/arm/mm/alignment.c
3732+++ b/arch/arm/mm/alignment.c
3733@@ -212,10 +212,12 @@ union offset_union {
3734 #define __get16_unaligned_check(ins,val,addr) \
3735 do { \
3736 unsigned int err = 0, v, a = addr; \
3737+ pax_open_userland(); \
3738 __get8_unaligned_check(ins,v,a,err); \
3739 val = v << ((BE) ? 8 : 0); \
3740 __get8_unaligned_check(ins,v,a,err); \
3741 val |= v << ((BE) ? 0 : 8); \
3742+ pax_close_userland(); \
3743 if (err) \
3744 goto fault; \
3745 } while (0)
3746@@ -229,6 +231,7 @@ union offset_union {
3747 #define __get32_unaligned_check(ins,val,addr) \
3748 do { \
3749 unsigned int err = 0, v, a = addr; \
3750+ pax_open_userland(); \
3751 __get8_unaligned_check(ins,v,a,err); \
3752 val = v << ((BE) ? 24 : 0); \
3753 __get8_unaligned_check(ins,v,a,err); \
3754@@ -237,6 +240,7 @@ union offset_union {
3755 val |= v << ((BE) ? 8 : 16); \
3756 __get8_unaligned_check(ins,v,a,err); \
3757 val |= v << ((BE) ? 0 : 24); \
3758+ pax_close_userland(); \
3759 if (err) \
3760 goto fault; \
3761 } while (0)
3762@@ -250,6 +254,7 @@ union offset_union {
3763 #define __put16_unaligned_check(ins,val,addr) \
3764 do { \
3765 unsigned int err = 0, v = val, a = addr; \
3766+ pax_open_userland(); \
3767 __asm__( FIRST_BYTE_16 \
3768 ARM( "1: "ins" %1, [%2], #1\n" ) \
3769 THUMB( "1: "ins" %1, [%2]\n" ) \
3770@@ -269,6 +274,7 @@ union offset_union {
3771 " .popsection\n" \
3772 : "=r" (err), "=&r" (v), "=&r" (a) \
3773 : "0" (err), "1" (v), "2" (a)); \
3774+ pax_close_userland(); \
3775 if (err) \
3776 goto fault; \
3777 } while (0)
3778@@ -282,6 +288,7 @@ union offset_union {
3779 #define __put32_unaligned_check(ins,val,addr) \
3780 do { \
3781 unsigned int err = 0, v = val, a = addr; \
3782+ pax_open_userland(); \
3783 __asm__( FIRST_BYTE_32 \
3784 ARM( "1: "ins" %1, [%2], #1\n" ) \
3785 THUMB( "1: "ins" %1, [%2]\n" ) \
3786@@ -311,6 +318,7 @@ union offset_union {
3787 " .popsection\n" \
3788 : "=r" (err), "=&r" (v), "=&r" (a) \
3789 : "0" (err), "1" (v), "2" (a)); \
3790+ pax_close_userland(); \
3791 if (err) \
3792 goto fault; \
3793 } while (0)
3794diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3795index 447da6f..77a5057 100644
3796--- a/arch/arm/mm/cache-l2x0.c
3797+++ b/arch/arm/mm/cache-l2x0.c
3798@@ -45,7 +45,7 @@ struct l2x0_of_data {
3799 void (*setup)(const struct device_node *, u32 *, u32 *);
3800 void (*save)(void);
3801 struct outer_cache_fns outer_cache;
3802-};
3803+} __do_const;
3804
3805 static bool of_init = false;
3806
3807diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3808index 84e6f77..0b52f31 100644
3809--- a/arch/arm/mm/context.c
3810+++ b/arch/arm/mm/context.c
3811@@ -43,7 +43,7 @@
3812 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3813
3814 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3815-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3816+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3817 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3818
3819 static DEFINE_PER_CPU(atomic64_t, active_asids);
3820@@ -180,7 +180,7 @@ static int is_reserved_asid(u64 asid)
3821 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3822 {
3823 u64 asid = atomic64_read(&mm->context.id);
3824- u64 generation = atomic64_read(&asid_generation);
3825+ u64 generation = atomic64_read_unchecked(&asid_generation);
3826
3827 if (asid != 0 && is_reserved_asid(asid)) {
3828 /*
3829@@ -198,7 +198,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3830 */
3831 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3832 if (asid == NUM_USER_ASIDS) {
3833- generation = atomic64_add_return(ASID_FIRST_VERSION,
3834+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3835 &asid_generation);
3836 flush_context(cpu);
3837 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3838@@ -227,14 +227,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3839 cpu_set_reserved_ttbr0();
3840
3841 asid = atomic64_read(&mm->context.id);
3842- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3843+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3844 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3845 goto switch_mm_fastpath;
3846
3847 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3848 /* Check that our ASID belongs to the current generation. */
3849 asid = atomic64_read(&mm->context.id);
3850- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3851+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3852 asid = new_context(mm, cpu);
3853 atomic64_set(&mm->context.id, asid);
3854 }
3855diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3856index eb8830a..5360ce7 100644
3857--- a/arch/arm/mm/fault.c
3858+++ b/arch/arm/mm/fault.c
3859@@ -25,6 +25,7 @@
3860 #include <asm/system_misc.h>
3861 #include <asm/system_info.h>
3862 #include <asm/tlbflush.h>
3863+#include <asm/sections.h>
3864
3865 #include "fault.h"
3866
3867@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3868 if (fixup_exception(regs))
3869 return;
3870
3871+#ifdef CONFIG_PAX_MEMORY_UDEREF
3872+ if (addr < TASK_SIZE) {
3873+ if (current->signal->curr_ip)
3874+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3875+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3876+ else
3877+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3878+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3879+ }
3880+#endif
3881+
3882+#ifdef CONFIG_PAX_KERNEXEC
3883+ if ((fsr & FSR_WRITE) &&
3884+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3885+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3886+ {
3887+ if (current->signal->curr_ip)
3888+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3889+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3890+ else
3891+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3892+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3893+ }
3894+#endif
3895+
3896 /*
3897 * No handler, we'll have to terminate things with extreme prejudice.
3898 */
3899@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3900 }
3901 #endif
3902
3903+#ifdef CONFIG_PAX_PAGEEXEC
3904+ if (fsr & FSR_LNX_PF) {
3905+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3906+ do_group_exit(SIGKILL);
3907+ }
3908+#endif
3909+
3910 tsk->thread.address = addr;
3911 tsk->thread.error_code = fsr;
3912 tsk->thread.trap_no = 14;
3913@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3914 }
3915 #endif /* CONFIG_MMU */
3916
3917+#ifdef CONFIG_PAX_PAGEEXEC
3918+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3919+{
3920+ long i;
3921+
3922+ printk(KERN_ERR "PAX: bytes at PC: ");
3923+ for (i = 0; i < 20; i++) {
3924+ unsigned char c;
3925+ if (get_user(c, (__force unsigned char __user *)pc+i))
3926+ printk(KERN_CONT "?? ");
3927+ else
3928+ printk(KERN_CONT "%02x ", c);
3929+ }
3930+ printk("\n");
3931+
3932+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3933+ for (i = -1; i < 20; i++) {
3934+ unsigned long c;
3935+ if (get_user(c, (__force unsigned long __user *)sp+i))
3936+ printk(KERN_CONT "???????? ");
3937+ else
3938+ printk(KERN_CONT "%08lx ", c);
3939+ }
3940+ printk("\n");
3941+}
3942+#endif
3943+
3944 /*
3945 * First Level Translation Fault Handler
3946 *
3947@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3948 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3949 struct siginfo info;
3950
3951+#ifdef CONFIG_PAX_MEMORY_UDEREF
3952+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3953+ if (current->signal->curr_ip)
3954+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3955+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3956+ else
3957+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3958+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3959+ goto die;
3960+ }
3961+#endif
3962+
3963 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3964 return;
3965
3966+die:
3967 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3968 inf->name, fsr, addr);
3969
3970@@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
3971 ifsr_info[nr].name = name;
3972 }
3973
3974+asmlinkage int sys_sigreturn(struct pt_regs *regs);
3975+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
3976+
3977 asmlinkage void __exception
3978 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3979 {
3980 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3981 struct siginfo info;
3982+ unsigned long pc = instruction_pointer(regs);
3983+
3984+ if (user_mode(regs)) {
3985+ unsigned long sigpage = current->mm->context.sigpage;
3986+
3987+ if (sigpage <= pc && pc < sigpage + 7*4) {
3988+ if (pc < sigpage + 3*4)
3989+ sys_sigreturn(regs);
3990+ else
3991+ sys_rt_sigreturn(regs);
3992+ return;
3993+ }
3994+ if (pc == 0xffff0f60UL) {
3995+ /*
3996+ * PaX: __kuser_cmpxchg64 emulation
3997+ */
3998+ // TODO
3999+ //regs->ARM_pc = regs->ARM_lr;
4000+ //return;
4001+ }
4002+ if (pc == 0xffff0fa0UL) {
4003+ /*
4004+ * PaX: __kuser_memory_barrier emulation
4005+ */
4006+ // dmb(); implied by the exception
4007+ regs->ARM_pc = regs->ARM_lr;
4008+ return;
4009+ }
4010+ if (pc == 0xffff0fc0UL) {
4011+ /*
4012+ * PaX: __kuser_cmpxchg emulation
4013+ */
4014+ // TODO
4015+ //long new;
4016+ //int op;
4017+
4018+ //op = FUTEX_OP_SET << 28;
4019+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4020+ //regs->ARM_r0 = old != new;
4021+ //regs->ARM_pc = regs->ARM_lr;
4022+ //return;
4023+ }
4024+ if (pc == 0xffff0fe0UL) {
4025+ /*
4026+ * PaX: __kuser_get_tls emulation
4027+ */
4028+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4029+ regs->ARM_pc = regs->ARM_lr;
4030+ return;
4031+ }
4032+ }
4033+
4034+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4035+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4036+ if (current->signal->curr_ip)
4037+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4038+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4039+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4040+ else
4041+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4042+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4043+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4044+ goto die;
4045+ }
4046+#endif
4047+
4048+#ifdef CONFIG_PAX_REFCOUNT
4049+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4050+ unsigned int bkpt;
4051+
4052+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4053+ current->thread.error_code = ifsr;
4054+ current->thread.trap_no = 0;
4055+ pax_report_refcount_overflow(regs);
4056+ fixup_exception(regs);
4057+ return;
4058+ }
4059+ }
4060+#endif
4061
4062 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4063 return;
4064
4065+die:
4066 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4067 inf->name, ifsr, addr);
4068
4069diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4070index cf08bdf..772656c 100644
4071--- a/arch/arm/mm/fault.h
4072+++ b/arch/arm/mm/fault.h
4073@@ -3,6 +3,7 @@
4074
4075 /*
4076 * Fault status register encodings. We steal bit 31 for our own purposes.
4077+ * Set when the FSR value is from an instruction fault.
4078 */
4079 #define FSR_LNX_PF (1 << 31)
4080 #define FSR_WRITE (1 << 11)
4081@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4082 }
4083 #endif
4084
4085+/* valid for LPAE and !LPAE */
4086+static inline int is_xn_fault(unsigned int fsr)
4087+{
4088+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4089+}
4090+
4091+static inline int is_domain_fault(unsigned int fsr)
4092+{
4093+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4094+}
4095+
4096 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4097 unsigned long search_exception_table(unsigned long addr);
4098
4099diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4100index 3e8f106..a0a1fe4 100644
4101--- a/arch/arm/mm/init.c
4102+++ b/arch/arm/mm/init.c
4103@@ -30,6 +30,8 @@
4104 #include <asm/setup.h>
4105 #include <asm/tlb.h>
4106 #include <asm/fixmap.h>
4107+#include <asm/system_info.h>
4108+#include <asm/cp15.h>
4109
4110 #include <asm/mach/arch.h>
4111 #include <asm/mach/map.h>
4112@@ -681,7 +683,46 @@ void free_initmem(void)
4113 {
4114 #ifdef CONFIG_HAVE_TCM
4115 extern char __tcm_start, __tcm_end;
4116+#endif
4117
4118+#ifdef CONFIG_PAX_KERNEXEC
4119+ unsigned long addr;
4120+ pgd_t *pgd;
4121+ pud_t *pud;
4122+ pmd_t *pmd;
4123+ int cpu_arch = cpu_architecture();
4124+ unsigned int cr = get_cr();
4125+
4126+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4127+ /* make pages tables, etc before .text NX */
4128+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4129+ pgd = pgd_offset_k(addr);
4130+ pud = pud_offset(pgd, addr);
4131+ pmd = pmd_offset(pud, addr);
4132+ __section_update(pmd, addr, PMD_SECT_XN);
4133+ }
4134+ /* make init NX */
4135+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4136+ pgd = pgd_offset_k(addr);
4137+ pud = pud_offset(pgd, addr);
4138+ pmd = pmd_offset(pud, addr);
4139+ __section_update(pmd, addr, PMD_SECT_XN);
4140+ }
4141+ /* make kernel code/rodata RX */
4142+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4143+ pgd = pgd_offset_k(addr);
4144+ pud = pud_offset(pgd, addr);
4145+ pmd = pmd_offset(pud, addr);
4146+#ifdef CONFIG_ARM_LPAE
4147+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4148+#else
4149+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4150+#endif
4151+ }
4152+ }
4153+#endif
4154+
4155+#ifdef CONFIG_HAVE_TCM
4156 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4157 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4158 #endif
4159diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4160index f123d6e..04bf569 100644
4161--- a/arch/arm/mm/ioremap.c
4162+++ b/arch/arm/mm/ioremap.c
4163@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4164 unsigned int mtype;
4165
4166 if (cached)
4167- mtype = MT_MEMORY;
4168+ mtype = MT_MEMORY_RX;
4169 else
4170- mtype = MT_MEMORY_NONCACHED;
4171+ mtype = MT_MEMORY_NONCACHED_RX;
4172
4173 return __arm_ioremap_caller(phys_addr, size, mtype,
4174 __builtin_return_address(0));
4175diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4176index 5e85ed3..b10a7ed 100644
4177--- a/arch/arm/mm/mmap.c
4178+++ b/arch/arm/mm/mmap.c
4179@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4180 struct vm_area_struct *vma;
4181 int do_align = 0;
4182 int aliasing = cache_is_vipt_aliasing();
4183+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4184 struct vm_unmapped_area_info info;
4185
4186 /*
4187@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4188 if (len > TASK_SIZE)
4189 return -ENOMEM;
4190
4191+#ifdef CONFIG_PAX_RANDMMAP
4192+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4193+#endif
4194+
4195 if (addr) {
4196 if (do_align)
4197 addr = COLOUR_ALIGN(addr, pgoff);
4198@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4199 addr = PAGE_ALIGN(addr);
4200
4201 vma = find_vma(mm, addr);
4202- if (TASK_SIZE - len >= addr &&
4203- (!vma || addr + len <= vma->vm_start))
4204+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4205 return addr;
4206 }
4207
4208@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4209 info.high_limit = TASK_SIZE;
4210 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4211 info.align_offset = pgoff << PAGE_SHIFT;
4212+ info.threadstack_offset = offset;
4213 return vm_unmapped_area(&info);
4214 }
4215
4216@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4217 unsigned long addr = addr0;
4218 int do_align = 0;
4219 int aliasing = cache_is_vipt_aliasing();
4220+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4221 struct vm_unmapped_area_info info;
4222
4223 /*
4224@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4225 return addr;
4226 }
4227
4228+#ifdef CONFIG_PAX_RANDMMAP
4229+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4230+#endif
4231+
4232 /* requesting a specific address */
4233 if (addr) {
4234 if (do_align)
4235@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4236 else
4237 addr = PAGE_ALIGN(addr);
4238 vma = find_vma(mm, addr);
4239- if (TASK_SIZE - len >= addr &&
4240- (!vma || addr + len <= vma->vm_start))
4241+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4242 return addr;
4243 }
4244
4245@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4246 info.high_limit = mm->mmap_base;
4247 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4248 info.align_offset = pgoff << PAGE_SHIFT;
4249+ info.threadstack_offset = offset;
4250 addr = vm_unmapped_area(&info);
4251
4252 /*
4253@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4254 {
4255 unsigned long random_factor = 0UL;
4256
4257+#ifdef CONFIG_PAX_RANDMMAP
4258+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4259+#endif
4260+
4261 /* 8 bits of randomness in 20 address space bits */
4262 if ((current->flags & PF_RANDOMIZE) &&
4263 !(current->personality & ADDR_NO_RANDOMIZE))
4264@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4265
4266 if (mmap_is_legacy()) {
4267 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4268+
4269+#ifdef CONFIG_PAX_RANDMMAP
4270+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4271+ mm->mmap_base += mm->delta_mmap;
4272+#endif
4273+
4274 mm->get_unmapped_area = arch_get_unmapped_area;
4275 } else {
4276 mm->mmap_base = mmap_base(random_factor);
4277+
4278+#ifdef CONFIG_PAX_RANDMMAP
4279+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4280+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4281+#endif
4282+
4283 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4284 }
4285 }
4286diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4287index 911d433..8580952 100644
4288--- a/arch/arm/mm/mmu.c
4289+++ b/arch/arm/mm/mmu.c
4290@@ -38,6 +38,22 @@
4291 #include "mm.h"
4292 #include "tcm.h"
4293
4294+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4295+void modify_domain(unsigned int dom, unsigned int type)
4296+{
4297+ struct thread_info *thread = current_thread_info();
4298+ unsigned int domain = thread->cpu_domain;
4299+ /*
4300+ * DOMAIN_MANAGER might be defined to some other value,
4301+ * use the arch-defined constant
4302+ */
4303+ domain &= ~domain_val(dom, 3);
4304+ thread->cpu_domain = domain | domain_val(dom, type);
4305+ set_domain(thread->cpu_domain);
4306+}
4307+EXPORT_SYMBOL(modify_domain);
4308+#endif
4309+
4310 /*
4311 * empty_zero_page is a special page that is used for
4312 * zero-initialized data and COW.
4313@@ -230,11 +246,19 @@ __setup("noalign", noalign_setup);
4314
4315 #endif /* ifdef CONFIG_CPU_CP15 / else */
4316
4317-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4318+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4319 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4320 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4321
4322-static struct mem_type mem_types[] = {
4323+#ifdef CONFIG_PAX_KERNEXEC
4324+#define L_PTE_KERNEXEC L_PTE_RDONLY
4325+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4326+#else
4327+#define L_PTE_KERNEXEC L_PTE_DIRTY
4328+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4329+#endif
4330+
4331+static struct mem_type mem_types[] __read_only = {
4332 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4333 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4334 L_PTE_SHARED,
4335@@ -266,16 +290,16 @@ static struct mem_type mem_types[] = {
4336 [MT_UNCACHED] = {
4337 .prot_pte = PROT_PTE_DEVICE,
4338 .prot_l1 = PMD_TYPE_TABLE,
4339- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4340+ .prot_sect = PROT_SECT_DEVICE,
4341 .domain = DOMAIN_IO,
4342 },
4343 [MT_CACHECLEAN] = {
4344- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4345+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4346 .domain = DOMAIN_KERNEL,
4347 },
4348 #ifndef CONFIG_ARM_LPAE
4349 [MT_MINICLEAN] = {
4350- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4351+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4352 .domain = DOMAIN_KERNEL,
4353 },
4354 #endif
4355@@ -283,36 +307,54 @@ static struct mem_type mem_types[] = {
4356 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4357 L_PTE_RDONLY,
4358 .prot_l1 = PMD_TYPE_TABLE,
4359- .domain = DOMAIN_USER,
4360+ .domain = DOMAIN_VECTORS,
4361 },
4362 [MT_HIGH_VECTORS] = {
4363 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4364 L_PTE_USER | L_PTE_RDONLY,
4365 .prot_l1 = PMD_TYPE_TABLE,
4366- .domain = DOMAIN_USER,
4367+ .domain = DOMAIN_VECTORS,
4368 },
4369- [MT_MEMORY] = {
4370+ [MT_MEMORY_RWX] = {
4371 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4372 .prot_l1 = PMD_TYPE_TABLE,
4373 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4374 .domain = DOMAIN_KERNEL,
4375 },
4376+ [MT_MEMORY_RW] = {
4377+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4378+ .prot_l1 = PMD_TYPE_TABLE,
4379+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4380+ .domain = DOMAIN_KERNEL,
4381+ },
4382+ [MT_MEMORY_RX] = {
4383+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4384+ .prot_l1 = PMD_TYPE_TABLE,
4385+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4386+ .domain = DOMAIN_KERNEL,
4387+ },
4388 [MT_ROM] = {
4389- .prot_sect = PMD_TYPE_SECT,
4390+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4391 .domain = DOMAIN_KERNEL,
4392 },
4393- [MT_MEMORY_NONCACHED] = {
4394+ [MT_MEMORY_NONCACHED_RW] = {
4395 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4396 L_PTE_MT_BUFFERABLE,
4397 .prot_l1 = PMD_TYPE_TABLE,
4398 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4399 .domain = DOMAIN_KERNEL,
4400 },
4401+ [MT_MEMORY_NONCACHED_RX] = {
4402+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4403+ L_PTE_MT_BUFFERABLE,
4404+ .prot_l1 = PMD_TYPE_TABLE,
4405+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4406+ .domain = DOMAIN_KERNEL,
4407+ },
4408 [MT_MEMORY_DTCM] = {
4409- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4410- L_PTE_XN,
4411+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4412 .prot_l1 = PMD_TYPE_TABLE,
4413- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4414+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4415 .domain = DOMAIN_KERNEL,
4416 },
4417 [MT_MEMORY_ITCM] = {
4418@@ -322,10 +364,10 @@ static struct mem_type mem_types[] = {
4419 },
4420 [MT_MEMORY_SO] = {
4421 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4422- L_PTE_MT_UNCACHED | L_PTE_XN,
4423+ L_PTE_MT_UNCACHED,
4424 .prot_l1 = PMD_TYPE_TABLE,
4425 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4426- PMD_SECT_UNCACHED | PMD_SECT_XN,
4427+ PMD_SECT_UNCACHED,
4428 .domain = DOMAIN_KERNEL,
4429 },
4430 [MT_MEMORY_DMA_READY] = {
4431@@ -411,9 +453,35 @@ static void __init build_mem_type_table(void)
4432 * to prevent speculative instruction fetches.
4433 */
4434 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4435+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4436 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4437+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4438 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4439+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4440 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4441+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4442+
4443+ /* Mark other regions on ARMv6+ as execute-never */
4444+
4445+#ifdef CONFIG_PAX_KERNEXEC
4446+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4447+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4448+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4449+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4450+#ifndef CONFIG_ARM_LPAE
4451+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4452+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4453+#endif
4454+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4455+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4456+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4457+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4458+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4459+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4460+#endif
4461+
4462+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4463+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4464 }
4465 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4466 /*
4467@@ -475,6 +543,9 @@ static void __init build_mem_type_table(void)
4468 * from SVC mode and no access from userspace.
4469 */
4470 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4471+#ifdef CONFIG_PAX_KERNEXEC
4472+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4473+#endif
4474 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4475 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4476 #endif
4477@@ -492,11 +563,17 @@ static void __init build_mem_type_table(void)
4478 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4479 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4480 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4481- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4482- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4483+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4484+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4485+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4486+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4487+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4488+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4489 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4490- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4491- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4492+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4493+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4494+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4495+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4496 }
4497 }
4498
4499@@ -507,15 +584,20 @@ static void __init build_mem_type_table(void)
4500 if (cpu_arch >= CPU_ARCH_ARMv6) {
4501 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4502 /* Non-cacheable Normal is XCB = 001 */
4503- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4504+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4505+ PMD_SECT_BUFFERED;
4506+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4507 PMD_SECT_BUFFERED;
4508 } else {
4509 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4510- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4511+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4512+ PMD_SECT_TEX(1);
4513+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4514 PMD_SECT_TEX(1);
4515 }
4516 } else {
4517- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4518+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4519+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4520 }
4521
4522 #ifdef CONFIG_ARM_LPAE
4523@@ -531,6 +613,8 @@ static void __init build_mem_type_table(void)
4524 vecs_pgprot |= PTE_EXT_AF;
4525 #endif
4526
4527+ user_pgprot |= __supported_pte_mask;
4528+
4529 for (i = 0; i < 16; i++) {
4530 pteval_t v = pgprot_val(protection_map[i]);
4531 protection_map[i] = __pgprot(v | user_pgprot);
4532@@ -548,10 +632,15 @@ static void __init build_mem_type_table(void)
4533
4534 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4535 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4536- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4537- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4538+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4539+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4540+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4541+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4542+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4543+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4544 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4545- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4546+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4547+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4548 mem_types[MT_ROM].prot_sect |= cp->pmd;
4549
4550 switch (cp->pmd) {
4551@@ -1193,18 +1282,15 @@ void __init arm_mm_memblock_reserve(void)
4552 * called function. This means you can't use any function or debugging
4553 * method which may touch any device, otherwise the kernel _will_ crash.
4554 */
4555+
4556+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4557+
4558 static void __init devicemaps_init(const struct machine_desc *mdesc)
4559 {
4560 struct map_desc map;
4561 unsigned long addr;
4562- void *vectors;
4563
4564- /*
4565- * Allocate the vector page early.
4566- */
4567- vectors = early_alloc(PAGE_SIZE * 2);
4568-
4569- early_trap_init(vectors);
4570+ early_trap_init(&vectors);
4571
4572 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4573 pmd_clear(pmd_off_k(addr));
4574@@ -1244,7 +1330,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4575 * location (0xffff0000). If we aren't using high-vectors, also
4576 * create a mapping at the low-vectors virtual address.
4577 */
4578- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4579+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4580 map.virtual = 0xffff0000;
4581 map.length = PAGE_SIZE;
4582 #ifdef CONFIG_KUSER_HELPERS
4583@@ -1316,8 +1402,39 @@ static void __init map_lowmem(void)
4584 map.pfn = __phys_to_pfn(start);
4585 map.virtual = __phys_to_virt(start);
4586 map.length = end - start;
4587- map.type = MT_MEMORY;
4588
4589+#ifdef CONFIG_PAX_KERNEXEC
4590+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4591+ struct map_desc kernel;
4592+ struct map_desc initmap;
4593+
4594+ /* when freeing initmem we will make this RW */
4595+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4596+ initmap.virtual = (unsigned long)__init_begin;
4597+ initmap.length = _sdata - __init_begin;
4598+ initmap.type = MT_MEMORY_RWX;
4599+ create_mapping(&initmap);
4600+
4601+ /* when freeing initmem we will make this RX */
4602+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4603+ kernel.virtual = (unsigned long)_stext;
4604+ kernel.length = __init_begin - _stext;
4605+ kernel.type = MT_MEMORY_RWX;
4606+ create_mapping(&kernel);
4607+
4608+ if (map.virtual < (unsigned long)_stext) {
4609+ map.length = (unsigned long)_stext - map.virtual;
4610+ map.type = MT_MEMORY_RWX;
4611+ create_mapping(&map);
4612+ }
4613+
4614+ map.pfn = __phys_to_pfn(__pa(_sdata));
4615+ map.virtual = (unsigned long)_sdata;
4616+ map.length = end - __pa(_sdata);
4617+ }
4618+#endif
4619+
4620+ map.type = MT_MEMORY_RW;
4621 create_mapping(&map);
4622 }
4623 }
4624diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4625index a5bc92d..0bb4730 100644
4626--- a/arch/arm/plat-omap/sram.c
4627+++ b/arch/arm/plat-omap/sram.c
4628@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4629 * Looks like we need to preserve some bootloader code at the
4630 * beginning of SRAM for jumping to flash for reboot to work...
4631 */
4632+ pax_open_kernel();
4633 memset_io(omap_sram_base + omap_sram_skip, 0,
4634 omap_sram_size - omap_sram_skip);
4635+ pax_close_kernel();
4636 }
4637diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4638index ce6d763..cfea917 100644
4639--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4640+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4641@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4642 int (*started)(unsigned ch);
4643 int (*flush)(unsigned ch);
4644 int (*stop)(unsigned ch);
4645-};
4646+} __no_const;
4647
4648 extern void *samsung_dmadev_get_ops(void);
4649 extern void *s3c_dma_get_ops(void);
4650diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4651index 7ecc2b2..5e56c66 100644
4652--- a/arch/arm64/include/asm/uaccess.h
4653+++ b/arch/arm64/include/asm/uaccess.h
4654@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4655 flag; \
4656 })
4657
4658+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4659 #define access_ok(type, addr, size) __range_ok(addr, size)
4660
4661 /*
4662diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4663index c3a58a1..78fbf54 100644
4664--- a/arch/avr32/include/asm/cache.h
4665+++ b/arch/avr32/include/asm/cache.h
4666@@ -1,8 +1,10 @@
4667 #ifndef __ASM_AVR32_CACHE_H
4668 #define __ASM_AVR32_CACHE_H
4669
4670+#include <linux/const.h>
4671+
4672 #define L1_CACHE_SHIFT 5
4673-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4674+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4675
4676 /*
4677 * Memory returned by kmalloc() may be used for DMA, so we must make
4678diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4679index d232888..87c8df1 100644
4680--- a/arch/avr32/include/asm/elf.h
4681+++ b/arch/avr32/include/asm/elf.h
4682@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4683 the loader. We need to make sure that it is out of the way of the program
4684 that it will "exec", and that there is sufficient room for the brk. */
4685
4686-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4687+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4688
4689+#ifdef CONFIG_PAX_ASLR
4690+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4691+
4692+#define PAX_DELTA_MMAP_LEN 15
4693+#define PAX_DELTA_STACK_LEN 15
4694+#endif
4695
4696 /* This yields a mask that user programs can use to figure out what
4697 instruction set this CPU supports. This could be done in user space,
4698diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4699index 479330b..53717a8 100644
4700--- a/arch/avr32/include/asm/kmap_types.h
4701+++ b/arch/avr32/include/asm/kmap_types.h
4702@@ -2,9 +2,9 @@
4703 #define __ASM_AVR32_KMAP_TYPES_H
4704
4705 #ifdef CONFIG_DEBUG_HIGHMEM
4706-# define KM_TYPE_NR 29
4707+# define KM_TYPE_NR 30
4708 #else
4709-# define KM_TYPE_NR 14
4710+# define KM_TYPE_NR 15
4711 #endif
4712
4713 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4714diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4715index 0eca933..eb78c7b 100644
4716--- a/arch/avr32/mm/fault.c
4717+++ b/arch/avr32/mm/fault.c
4718@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4719
4720 int exception_trace = 1;
4721
4722+#ifdef CONFIG_PAX_PAGEEXEC
4723+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4724+{
4725+ unsigned long i;
4726+
4727+ printk(KERN_ERR "PAX: bytes at PC: ");
4728+ for (i = 0; i < 20; i++) {
4729+ unsigned char c;
4730+ if (get_user(c, (unsigned char *)pc+i))
4731+ printk(KERN_CONT "???????? ");
4732+ else
4733+ printk(KERN_CONT "%02x ", c);
4734+ }
4735+ printk("\n");
4736+}
4737+#endif
4738+
4739 /*
4740 * This routine handles page faults. It determines the address and the
4741 * problem, and then passes it off to one of the appropriate routines.
4742@@ -176,6 +193,16 @@ bad_area:
4743 up_read(&mm->mmap_sem);
4744
4745 if (user_mode(regs)) {
4746+
4747+#ifdef CONFIG_PAX_PAGEEXEC
4748+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4749+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4750+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4751+ do_group_exit(SIGKILL);
4752+ }
4753+ }
4754+#endif
4755+
4756 if (exception_trace && printk_ratelimit())
4757 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4758 "sp %08lx ecr %lu\n",
4759diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4760index 568885a..f8008df 100644
4761--- a/arch/blackfin/include/asm/cache.h
4762+++ b/arch/blackfin/include/asm/cache.h
4763@@ -7,6 +7,7 @@
4764 #ifndef __ARCH_BLACKFIN_CACHE_H
4765 #define __ARCH_BLACKFIN_CACHE_H
4766
4767+#include <linux/const.h>
4768 #include <linux/linkage.h> /* for asmlinkage */
4769
4770 /*
4771@@ -14,7 +15,7 @@
4772 * Blackfin loads 32 bytes for cache
4773 */
4774 #define L1_CACHE_SHIFT 5
4775-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4776+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4777 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4778
4779 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4780diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4781index aea2718..3639a60 100644
4782--- a/arch/cris/include/arch-v10/arch/cache.h
4783+++ b/arch/cris/include/arch-v10/arch/cache.h
4784@@ -1,8 +1,9 @@
4785 #ifndef _ASM_ARCH_CACHE_H
4786 #define _ASM_ARCH_CACHE_H
4787
4788+#include <linux/const.h>
4789 /* Etrax 100LX have 32-byte cache-lines. */
4790-#define L1_CACHE_BYTES 32
4791 #define L1_CACHE_SHIFT 5
4792+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4793
4794 #endif /* _ASM_ARCH_CACHE_H */
4795diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4796index 7caf25d..ee65ac5 100644
4797--- a/arch/cris/include/arch-v32/arch/cache.h
4798+++ b/arch/cris/include/arch-v32/arch/cache.h
4799@@ -1,11 +1,12 @@
4800 #ifndef _ASM_CRIS_ARCH_CACHE_H
4801 #define _ASM_CRIS_ARCH_CACHE_H
4802
4803+#include <linux/const.h>
4804 #include <arch/hwregs/dma.h>
4805
4806 /* A cache-line is 32 bytes. */
4807-#define L1_CACHE_BYTES 32
4808 #define L1_CACHE_SHIFT 5
4809+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4810
4811 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4812
4813diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4814index b86329d..6709906 100644
4815--- a/arch/frv/include/asm/atomic.h
4816+++ b/arch/frv/include/asm/atomic.h
4817@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4818 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4819 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4820
4821+#define atomic64_read_unchecked(v) atomic64_read(v)
4822+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4823+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4824+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4825+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4826+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4827+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4828+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4829+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4830+
4831 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4832 {
4833 int c, old;
4834diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4835index 2797163..c2a401d 100644
4836--- a/arch/frv/include/asm/cache.h
4837+++ b/arch/frv/include/asm/cache.h
4838@@ -12,10 +12,11 @@
4839 #ifndef __ASM_CACHE_H
4840 #define __ASM_CACHE_H
4841
4842+#include <linux/const.h>
4843
4844 /* bytes per L1 cache line */
4845 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4846-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4847+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4848
4849 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4850 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4851diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4852index 43901f2..0d8b865 100644
4853--- a/arch/frv/include/asm/kmap_types.h
4854+++ b/arch/frv/include/asm/kmap_types.h
4855@@ -2,6 +2,6 @@
4856 #ifndef _ASM_KMAP_TYPES_H
4857 #define _ASM_KMAP_TYPES_H
4858
4859-#define KM_TYPE_NR 17
4860+#define KM_TYPE_NR 18
4861
4862 #endif
4863diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4864index 836f147..4cf23f5 100644
4865--- a/arch/frv/mm/elf-fdpic.c
4866+++ b/arch/frv/mm/elf-fdpic.c
4867@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4868 {
4869 struct vm_area_struct *vma;
4870 struct vm_unmapped_area_info info;
4871+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4872
4873 if (len > TASK_SIZE)
4874 return -ENOMEM;
4875@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4876 if (addr) {
4877 addr = PAGE_ALIGN(addr);
4878 vma = find_vma(current->mm, addr);
4879- if (TASK_SIZE - len >= addr &&
4880- (!vma || addr + len <= vma->vm_start))
4881+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4882 goto success;
4883 }
4884
4885@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4886 info.high_limit = (current->mm->start_stack - 0x00200000);
4887 info.align_mask = 0;
4888 info.align_offset = 0;
4889+ info.threadstack_offset = offset;
4890 addr = vm_unmapped_area(&info);
4891 if (!(addr & ~PAGE_MASK))
4892 goto success;
4893diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4894index f4ca594..adc72fd6 100644
4895--- a/arch/hexagon/include/asm/cache.h
4896+++ b/arch/hexagon/include/asm/cache.h
4897@@ -21,9 +21,11 @@
4898 #ifndef __ASM_CACHE_H
4899 #define __ASM_CACHE_H
4900
4901+#include <linux/const.h>
4902+
4903 /* Bytes per L1 cache line */
4904-#define L1_CACHE_SHIFT (5)
4905-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4906+#define L1_CACHE_SHIFT 5
4907+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4908
4909 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4910 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4911diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
4912index 4e4119b..dd7de0a 100644
4913--- a/arch/ia64/Kconfig
4914+++ b/arch/ia64/Kconfig
4915@@ -554,6 +554,7 @@ source "drivers/sn/Kconfig"
4916 config KEXEC
4917 bool "kexec system call"
4918 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
4919+ depends on !GRKERNSEC_KMEM
4920 help
4921 kexec is a system call that implements the ability to shutdown your
4922 current kernel, and to start another kernel. It is like a reboot
4923diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4924index 6e6fe18..a6ae668 100644
4925--- a/arch/ia64/include/asm/atomic.h
4926+++ b/arch/ia64/include/asm/atomic.h
4927@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4928 #define atomic64_inc(v) atomic64_add(1, (v))
4929 #define atomic64_dec(v) atomic64_sub(1, (v))
4930
4931+#define atomic64_read_unchecked(v) atomic64_read(v)
4932+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4933+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4934+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4935+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4936+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4937+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4938+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4939+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4940+
4941 /* Atomic operations are already serializing */
4942 #define smp_mb__before_atomic_dec() barrier()
4943 #define smp_mb__after_atomic_dec() barrier()
4944diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4945index 988254a..e1ee885 100644
4946--- a/arch/ia64/include/asm/cache.h
4947+++ b/arch/ia64/include/asm/cache.h
4948@@ -1,6 +1,7 @@
4949 #ifndef _ASM_IA64_CACHE_H
4950 #define _ASM_IA64_CACHE_H
4951
4952+#include <linux/const.h>
4953
4954 /*
4955 * Copyright (C) 1998-2000 Hewlett-Packard Co
4956@@ -9,7 +10,7 @@
4957
4958 /* Bytes per L1 (data) cache line. */
4959 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4960-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4961+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4962
4963 #ifdef CONFIG_SMP
4964 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4965diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4966index 5a83c5c..4d7f553 100644
4967--- a/arch/ia64/include/asm/elf.h
4968+++ b/arch/ia64/include/asm/elf.h
4969@@ -42,6 +42,13 @@
4970 */
4971 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4972
4973+#ifdef CONFIG_PAX_ASLR
4974+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4975+
4976+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4977+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4978+#endif
4979+
4980 #define PT_IA_64_UNWIND 0x70000001
4981
4982 /* IA-64 relocations: */
4983diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4984index 5767cdf..7462574 100644
4985--- a/arch/ia64/include/asm/pgalloc.h
4986+++ b/arch/ia64/include/asm/pgalloc.h
4987@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4988 pgd_val(*pgd_entry) = __pa(pud);
4989 }
4990
4991+static inline void
4992+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4993+{
4994+ pgd_populate(mm, pgd_entry, pud);
4995+}
4996+
4997 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4998 {
4999 return quicklist_alloc(0, GFP_KERNEL, NULL);
5000@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5001 pud_val(*pud_entry) = __pa(pmd);
5002 }
5003
5004+static inline void
5005+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5006+{
5007+ pud_populate(mm, pud_entry, pmd);
5008+}
5009+
5010 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5011 {
5012 return quicklist_alloc(0, GFP_KERNEL, NULL);
5013diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5014index 7935115..c0eca6a 100644
5015--- a/arch/ia64/include/asm/pgtable.h
5016+++ b/arch/ia64/include/asm/pgtable.h
5017@@ -12,7 +12,7 @@
5018 * David Mosberger-Tang <davidm@hpl.hp.com>
5019 */
5020
5021-
5022+#include <linux/const.h>
5023 #include <asm/mman.h>
5024 #include <asm/page.h>
5025 #include <asm/processor.h>
5026@@ -142,6 +142,17 @@
5027 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5028 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5029 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5030+
5031+#ifdef CONFIG_PAX_PAGEEXEC
5032+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5033+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5034+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5035+#else
5036+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5037+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5038+# define PAGE_COPY_NOEXEC PAGE_COPY
5039+#endif
5040+
5041 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5042 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5043 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5044diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5045index 45698cd..e8e2dbc 100644
5046--- a/arch/ia64/include/asm/spinlock.h
5047+++ b/arch/ia64/include/asm/spinlock.h
5048@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5049 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5050
5051 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5052- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5053+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5054 }
5055
5056 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5057diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5058index 449c8c0..3d4b1e9 100644
5059--- a/arch/ia64/include/asm/uaccess.h
5060+++ b/arch/ia64/include/asm/uaccess.h
5061@@ -70,6 +70,7 @@
5062 && ((segment).seg == KERNEL_DS.seg \
5063 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5064 })
5065+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5066 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5067
5068 /*
5069@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5070 static inline unsigned long
5071 __copy_to_user (void __user *to, const void *from, unsigned long count)
5072 {
5073+ if (count > INT_MAX)
5074+ return count;
5075+
5076+ if (!__builtin_constant_p(count))
5077+ check_object_size(from, count, true);
5078+
5079 return __copy_user(to, (__force void __user *) from, count);
5080 }
5081
5082 static inline unsigned long
5083 __copy_from_user (void *to, const void __user *from, unsigned long count)
5084 {
5085+ if (count > INT_MAX)
5086+ return count;
5087+
5088+ if (!__builtin_constant_p(count))
5089+ check_object_size(to, count, false);
5090+
5091 return __copy_user((__force void __user *) to, from, count);
5092 }
5093
5094@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5095 ({ \
5096 void __user *__cu_to = (to); \
5097 const void *__cu_from = (from); \
5098- long __cu_len = (n); \
5099+ unsigned long __cu_len = (n); \
5100 \
5101- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5102+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5103+ if (!__builtin_constant_p(n)) \
5104+ check_object_size(__cu_from, __cu_len, true); \
5105 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5106+ } \
5107 __cu_len; \
5108 })
5109
5110@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5111 ({ \
5112 void *__cu_to = (to); \
5113 const void __user *__cu_from = (from); \
5114- long __cu_len = (n); \
5115+ unsigned long __cu_len = (n); \
5116 \
5117 __chk_user_ptr(__cu_from); \
5118- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5119+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5120+ if (!__builtin_constant_p(n)) \
5121+ check_object_size(__cu_to, __cu_len, false); \
5122 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5123+ } \
5124 __cu_len; \
5125 })
5126
5127diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5128index 24603be..948052d 100644
5129--- a/arch/ia64/kernel/module.c
5130+++ b/arch/ia64/kernel/module.c
5131@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5132 void
5133 module_free (struct module *mod, void *module_region)
5134 {
5135- if (mod && mod->arch.init_unw_table &&
5136- module_region == mod->module_init) {
5137+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5138 unw_remove_unwind_table(mod->arch.init_unw_table);
5139 mod->arch.init_unw_table = NULL;
5140 }
5141@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5142 }
5143
5144 static inline int
5145+in_init_rx (const struct module *mod, uint64_t addr)
5146+{
5147+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5148+}
5149+
5150+static inline int
5151+in_init_rw (const struct module *mod, uint64_t addr)
5152+{
5153+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5154+}
5155+
5156+static inline int
5157 in_init (const struct module *mod, uint64_t addr)
5158 {
5159- return addr - (uint64_t) mod->module_init < mod->init_size;
5160+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5161+}
5162+
5163+static inline int
5164+in_core_rx (const struct module *mod, uint64_t addr)
5165+{
5166+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5167+}
5168+
5169+static inline int
5170+in_core_rw (const struct module *mod, uint64_t addr)
5171+{
5172+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5173 }
5174
5175 static inline int
5176 in_core (const struct module *mod, uint64_t addr)
5177 {
5178- return addr - (uint64_t) mod->module_core < mod->core_size;
5179+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5180 }
5181
5182 static inline int
5183@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5184 break;
5185
5186 case RV_BDREL:
5187- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5188+ if (in_init_rx(mod, val))
5189+ val -= (uint64_t) mod->module_init_rx;
5190+ else if (in_init_rw(mod, val))
5191+ val -= (uint64_t) mod->module_init_rw;
5192+ else if (in_core_rx(mod, val))
5193+ val -= (uint64_t) mod->module_core_rx;
5194+ else if (in_core_rw(mod, val))
5195+ val -= (uint64_t) mod->module_core_rw;
5196 break;
5197
5198 case RV_LTV:
5199@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5200 * addresses have been selected...
5201 */
5202 uint64_t gp;
5203- if (mod->core_size > MAX_LTOFF)
5204+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5205 /*
5206 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5207 * at the end of the module.
5208 */
5209- gp = mod->core_size - MAX_LTOFF / 2;
5210+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5211 else
5212- gp = mod->core_size / 2;
5213- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5214+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5215+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5216 mod->arch.gp = gp;
5217 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5218 }
5219diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5220index ab33328..f39506c 100644
5221--- a/arch/ia64/kernel/palinfo.c
5222+++ b/arch/ia64/kernel/palinfo.c
5223@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5224 return NOTIFY_OK;
5225 }
5226
5227-static struct notifier_block __refdata palinfo_cpu_notifier =
5228+static struct notifier_block palinfo_cpu_notifier =
5229 {
5230 .notifier_call = palinfo_cpu_callback,
5231 .priority = 0,
5232diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5233index 41e33f8..65180b2a 100644
5234--- a/arch/ia64/kernel/sys_ia64.c
5235+++ b/arch/ia64/kernel/sys_ia64.c
5236@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5237 unsigned long align_mask = 0;
5238 struct mm_struct *mm = current->mm;
5239 struct vm_unmapped_area_info info;
5240+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5241
5242 if (len > RGN_MAP_LIMIT)
5243 return -ENOMEM;
5244@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5245 if (REGION_NUMBER(addr) == RGN_HPAGE)
5246 addr = 0;
5247 #endif
5248+
5249+#ifdef CONFIG_PAX_RANDMMAP
5250+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5251+ addr = mm->free_area_cache;
5252+ else
5253+#endif
5254+
5255 if (!addr)
5256 addr = TASK_UNMAPPED_BASE;
5257
5258@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5259 info.high_limit = TASK_SIZE;
5260 info.align_mask = align_mask;
5261 info.align_offset = 0;
5262+ info.threadstack_offset = offset;
5263 return vm_unmapped_area(&info);
5264 }
5265
5266diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5267index 0ccb28f..8992469 100644
5268--- a/arch/ia64/kernel/vmlinux.lds.S
5269+++ b/arch/ia64/kernel/vmlinux.lds.S
5270@@ -198,7 +198,7 @@ SECTIONS {
5271 /* Per-cpu data: */
5272 . = ALIGN(PERCPU_PAGE_SIZE);
5273 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5274- __phys_per_cpu_start = __per_cpu_load;
5275+ __phys_per_cpu_start = per_cpu_load;
5276 /*
5277 * ensure percpu data fits
5278 * into percpu page size
5279diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5280index 7225dad..2a7c8256 100644
5281--- a/arch/ia64/mm/fault.c
5282+++ b/arch/ia64/mm/fault.c
5283@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5284 return pte_present(pte);
5285 }
5286
5287+#ifdef CONFIG_PAX_PAGEEXEC
5288+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5289+{
5290+ unsigned long i;
5291+
5292+ printk(KERN_ERR "PAX: bytes at PC: ");
5293+ for (i = 0; i < 8; i++) {
5294+ unsigned int c;
5295+ if (get_user(c, (unsigned int *)pc+i))
5296+ printk(KERN_CONT "???????? ");
5297+ else
5298+ printk(KERN_CONT "%08x ", c);
5299+ }
5300+ printk("\n");
5301+}
5302+#endif
5303+
5304 # define VM_READ_BIT 0
5305 # define VM_WRITE_BIT 1
5306 # define VM_EXEC_BIT 2
5307@@ -151,8 +168,21 @@ retry:
5308 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5309 goto bad_area;
5310
5311- if ((vma->vm_flags & mask) != mask)
5312+ if ((vma->vm_flags & mask) != mask) {
5313+
5314+#ifdef CONFIG_PAX_PAGEEXEC
5315+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5316+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5317+ goto bad_area;
5318+
5319+ up_read(&mm->mmap_sem);
5320+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5321+ do_group_exit(SIGKILL);
5322+ }
5323+#endif
5324+
5325 goto bad_area;
5326+ }
5327
5328 /*
5329 * If for any reason at all we couldn't handle the fault, make
5330diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5331index 68232db..6ca80af 100644
5332--- a/arch/ia64/mm/hugetlbpage.c
5333+++ b/arch/ia64/mm/hugetlbpage.c
5334@@ -154,6 +154,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5335 unsigned long pgoff, unsigned long flags)
5336 {
5337 struct vm_unmapped_area_info info;
5338+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5339
5340 if (len > RGN_MAP_LIMIT)
5341 return -ENOMEM;
5342@@ -177,6 +178,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5343 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5344 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5345 info.align_offset = 0;
5346+ info.threadstack_offset = offset;
5347 return vm_unmapped_area(&info);
5348 }
5349
5350diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5351index 88504ab..cbb6c9f 100644
5352--- a/arch/ia64/mm/init.c
5353+++ b/arch/ia64/mm/init.c
5354@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5355 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5356 vma->vm_end = vma->vm_start + PAGE_SIZE;
5357 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5358+
5359+#ifdef CONFIG_PAX_PAGEEXEC
5360+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5361+ vma->vm_flags &= ~VM_EXEC;
5362+
5363+#ifdef CONFIG_PAX_MPROTECT
5364+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5365+ vma->vm_flags &= ~VM_MAYEXEC;
5366+#endif
5367+
5368+ }
5369+#endif
5370+
5371 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5372 down_write(&current->mm->mmap_sem);
5373 if (insert_vm_struct(current->mm, vma)) {
5374diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5375index 40b3ee9..8c2c112 100644
5376--- a/arch/m32r/include/asm/cache.h
5377+++ b/arch/m32r/include/asm/cache.h
5378@@ -1,8 +1,10 @@
5379 #ifndef _ASM_M32R_CACHE_H
5380 #define _ASM_M32R_CACHE_H
5381
5382+#include <linux/const.h>
5383+
5384 /* L1 cache line size */
5385 #define L1_CACHE_SHIFT 4
5386-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5387+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5388
5389 #endif /* _ASM_M32R_CACHE_H */
5390diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5391index 82abd15..d95ae5d 100644
5392--- a/arch/m32r/lib/usercopy.c
5393+++ b/arch/m32r/lib/usercopy.c
5394@@ -14,6 +14,9 @@
5395 unsigned long
5396 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5397 {
5398+ if ((long)n < 0)
5399+ return n;
5400+
5401 prefetch(from);
5402 if (access_ok(VERIFY_WRITE, to, n))
5403 __copy_user(to,from,n);
5404@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5405 unsigned long
5406 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5407 {
5408+ if ((long)n < 0)
5409+ return n;
5410+
5411 prefetchw(to);
5412 if (access_ok(VERIFY_READ, from, n))
5413 __copy_user_zeroing(to,from,n);
5414diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5415index 0395c51..5f26031 100644
5416--- a/arch/m68k/include/asm/cache.h
5417+++ b/arch/m68k/include/asm/cache.h
5418@@ -4,9 +4,11 @@
5419 #ifndef __ARCH_M68K_CACHE_H
5420 #define __ARCH_M68K_CACHE_H
5421
5422+#include <linux/const.h>
5423+
5424 /* bytes per L1 cache line */
5425 #define L1_CACHE_SHIFT 4
5426-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5427+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5428
5429 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5430
5431diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5432index 0424315..defcca9 100644
5433--- a/arch/metag/mm/hugetlbpage.c
5434+++ b/arch/metag/mm/hugetlbpage.c
5435@@ -205,6 +205,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5436 info.high_limit = TASK_SIZE;
5437 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5438 info.align_offset = 0;
5439+ info.threadstack_offset = 0;
5440 return vm_unmapped_area(&info);
5441 }
5442
5443diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5444index 4efe96a..60e8699 100644
5445--- a/arch/microblaze/include/asm/cache.h
5446+++ b/arch/microblaze/include/asm/cache.h
5447@@ -13,11 +13,12 @@
5448 #ifndef _ASM_MICROBLAZE_CACHE_H
5449 #define _ASM_MICROBLAZE_CACHE_H
5450
5451+#include <linux/const.h>
5452 #include <asm/registers.h>
5453
5454 #define L1_CACHE_SHIFT 5
5455 /* word-granular cache in microblaze */
5456-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5457+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5458
5459 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5460
5461diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5462index 650de39..6982b02 100644
5463--- a/arch/mips/Kconfig
5464+++ b/arch/mips/Kconfig
5465@@ -2268,6 +2268,7 @@ source "kernel/Kconfig.preempt"
5466
5467 config KEXEC
5468 bool "Kexec system call"
5469+ depends on !GRKERNSEC_KMEM
5470 help
5471 kexec is a system call that implements the ability to shutdown your
5472 current kernel, and to start another kernel. It is like a reboot
5473diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5474index 02f2444..506969c 100644
5475--- a/arch/mips/cavium-octeon/dma-octeon.c
5476+++ b/arch/mips/cavium-octeon/dma-octeon.c
5477@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5478 if (dma_release_from_coherent(dev, order, vaddr))
5479 return;
5480
5481- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5482+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5483 }
5484
5485 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5486diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5487index 7eed2f2..c4e385d 100644
5488--- a/arch/mips/include/asm/atomic.h
5489+++ b/arch/mips/include/asm/atomic.h
5490@@ -21,15 +21,39 @@
5491 #include <asm/cmpxchg.h>
5492 #include <asm/war.h>
5493
5494+#ifdef CONFIG_GENERIC_ATOMIC64
5495+#include <asm-generic/atomic64.h>
5496+#endif
5497+
5498 #define ATOMIC_INIT(i) { (i) }
5499
5500+#ifdef CONFIG_64BIT
5501+#define _ASM_EXTABLE(from, to) \
5502+" .section __ex_table,\"a\"\n" \
5503+" .dword " #from ", " #to"\n" \
5504+" .previous\n"
5505+#else
5506+#define _ASM_EXTABLE(from, to) \
5507+" .section __ex_table,\"a\"\n" \
5508+" .word " #from ", " #to"\n" \
5509+" .previous\n"
5510+#endif
5511+
5512 /*
5513 * atomic_read - read atomic variable
5514 * @v: pointer of type atomic_t
5515 *
5516 * Atomically reads the value of @v.
5517 */
5518-#define atomic_read(v) (*(volatile int *)&(v)->counter)
5519+static inline int atomic_read(const atomic_t *v)
5520+{
5521+ return (*(volatile const int *) &v->counter);
5522+}
5523+
5524+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5525+{
5526+ return (*(volatile const int *) &v->counter);
5527+}
5528
5529 /*
5530 * atomic_set - set atomic variable
5531@@ -38,7 +62,15 @@
5532 *
5533 * Atomically sets the value of @v to @i.
5534 */
5535-#define atomic_set(v, i) ((v)->counter = (i))
5536+static inline void atomic_set(atomic_t *v, int i)
5537+{
5538+ v->counter = i;
5539+}
5540+
5541+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5542+{
5543+ v->counter = i;
5544+}
5545
5546 /*
5547 * atomic_add - add integer to atomic variable
5548@@ -47,7 +79,67 @@
5549 *
5550 * Atomically adds @i to @v.
5551 */
5552-static __inline__ void atomic_add(int i, atomic_t * v)
5553+static __inline__ void atomic_add(int i, atomic_t *v)
5554+{
5555+ int temp;
5556+
5557+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5558+ __asm__ __volatile__(
5559+ " .set mips3 \n"
5560+ "1: ll %0, %1 # atomic_add \n"
5561+#ifdef CONFIG_PAX_REFCOUNT
5562+ /* Exception on overflow. */
5563+ "2: add %0, %2 \n"
5564+#else
5565+ " addu %0, %2 \n"
5566+#endif
5567+ " sc %0, %1 \n"
5568+ " beqzl %0, 1b \n"
5569+#ifdef CONFIG_PAX_REFCOUNT
5570+ "3: \n"
5571+ _ASM_EXTABLE(2b, 3b)
5572+#endif
5573+ " .set mips0 \n"
5574+ : "=&r" (temp), "+m" (v->counter)
5575+ : "Ir" (i));
5576+ } else if (kernel_uses_llsc) {
5577+ __asm__ __volatile__(
5578+ " .set mips3 \n"
5579+ "1: ll %0, %1 # atomic_add \n"
5580+#ifdef CONFIG_PAX_REFCOUNT
5581+ /* Exception on overflow. */
5582+ "2: add %0, %2 \n"
5583+#else
5584+ " addu %0, %2 \n"
5585+#endif
5586+ " sc %0, %1 \n"
5587+ " beqz %0, 1b \n"
5588+#ifdef CONFIG_PAX_REFCOUNT
5589+ "3: \n"
5590+ _ASM_EXTABLE(2b, 3b)
5591+#endif
5592+ " .set mips0 \n"
5593+ : "=&r" (temp), "+m" (v->counter)
5594+ : "Ir" (i));
5595+ } else {
5596+ unsigned long flags;
5597+
5598+ raw_local_irq_save(flags);
5599+ __asm__ __volatile__(
5600+#ifdef CONFIG_PAX_REFCOUNT
5601+ /* Exception on overflow. */
5602+ "1: add %0, %1 \n"
5603+ "2: \n"
5604+ _ASM_EXTABLE(1b, 2b)
5605+#else
5606+ " addu %0, %1 \n"
5607+#endif
5608+ : "+r" (v->counter) : "Ir" (i));
5609+ raw_local_irq_restore(flags);
5610+ }
5611+}
5612+
5613+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5614 {
5615 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5616 int temp;
5617@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5618 *
5619 * Atomically subtracts @i from @v.
5620 */
5621-static __inline__ void atomic_sub(int i, atomic_t * v)
5622+static __inline__ void atomic_sub(int i, atomic_t *v)
5623+{
5624+ int temp;
5625+
5626+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5627+ __asm__ __volatile__(
5628+ " .set mips3 \n"
5629+ "1: ll %0, %1 # atomic64_sub \n"
5630+#ifdef CONFIG_PAX_REFCOUNT
5631+ /* Exception on overflow. */
5632+ "2: sub %0, %2 \n"
5633+#else
5634+ " subu %0, %2 \n"
5635+#endif
5636+ " sc %0, %1 \n"
5637+ " beqzl %0, 1b \n"
5638+#ifdef CONFIG_PAX_REFCOUNT
5639+ "3: \n"
5640+ _ASM_EXTABLE(2b, 3b)
5641+#endif
5642+ " .set mips0 \n"
5643+ : "=&r" (temp), "+m" (v->counter)
5644+ : "Ir" (i));
5645+ } else if (kernel_uses_llsc) {
5646+ __asm__ __volatile__(
5647+ " .set mips3 \n"
5648+ "1: ll %0, %1 # atomic64_sub \n"
5649+#ifdef CONFIG_PAX_REFCOUNT
5650+ /* Exception on overflow. */
5651+ "2: sub %0, %2 \n"
5652+#else
5653+ " subu %0, %2 \n"
5654+#endif
5655+ " sc %0, %1 \n"
5656+ " beqz %0, 1b \n"
5657+#ifdef CONFIG_PAX_REFCOUNT
5658+ "3: \n"
5659+ _ASM_EXTABLE(2b, 3b)
5660+#endif
5661+ " .set mips0 \n"
5662+ : "=&r" (temp), "+m" (v->counter)
5663+ : "Ir" (i));
5664+ } else {
5665+ unsigned long flags;
5666+
5667+ raw_local_irq_save(flags);
5668+ __asm__ __volatile__(
5669+#ifdef CONFIG_PAX_REFCOUNT
5670+ /* Exception on overflow. */
5671+ "1: sub %0, %1 \n"
5672+ "2: \n"
5673+ _ASM_EXTABLE(1b, 2b)
5674+#else
5675+ " subu %0, %1 \n"
5676+#endif
5677+ : "+r" (v->counter) : "Ir" (i));
5678+ raw_local_irq_restore(flags);
5679+ }
5680+}
5681+
5682+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
5683 {
5684 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5685 int temp;
5686@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
5687 /*
5688 * Same as above, but return the result value
5689 */
5690-static __inline__ int atomic_add_return(int i, atomic_t * v)
5691+static __inline__ int atomic_add_return(int i, atomic_t *v)
5692+{
5693+ int result;
5694+ int temp;
5695+
5696+ smp_mb__before_llsc();
5697+
5698+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5699+ __asm__ __volatile__(
5700+ " .set mips3 \n"
5701+ "1: ll %1, %2 # atomic_add_return \n"
5702+#ifdef CONFIG_PAX_REFCOUNT
5703+ "2: add %0, %1, %3 \n"
5704+#else
5705+ " addu %0, %1, %3 \n"
5706+#endif
5707+ " sc %0, %2 \n"
5708+ " beqzl %0, 1b \n"
5709+#ifdef CONFIG_PAX_REFCOUNT
5710+ " b 4f \n"
5711+ " .set noreorder \n"
5712+ "3: b 5f \n"
5713+ " move %0, %1 \n"
5714+ " .set reorder \n"
5715+ _ASM_EXTABLE(2b, 3b)
5716+#endif
5717+ "4: addu %0, %1, %3 \n"
5718+#ifdef CONFIG_PAX_REFCOUNT
5719+ "5: \n"
5720+#endif
5721+ " .set mips0 \n"
5722+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5723+ : "Ir" (i));
5724+ } else if (kernel_uses_llsc) {
5725+ __asm__ __volatile__(
5726+ " .set mips3 \n"
5727+ "1: ll %1, %2 # atomic_add_return \n"
5728+#ifdef CONFIG_PAX_REFCOUNT
5729+ "2: add %0, %1, %3 \n"
5730+#else
5731+ " addu %0, %1, %3 \n"
5732+#endif
5733+ " sc %0, %2 \n"
5734+ " bnez %0, 4f \n"
5735+ " b 1b \n"
5736+#ifdef CONFIG_PAX_REFCOUNT
5737+ " .set noreorder \n"
5738+ "3: b 5f \n"
5739+ " move %0, %1 \n"
5740+ " .set reorder \n"
5741+ _ASM_EXTABLE(2b, 3b)
5742+#endif
5743+ "4: addu %0, %1, %3 \n"
5744+#ifdef CONFIG_PAX_REFCOUNT
5745+ "5: \n"
5746+#endif
5747+ " .set mips0 \n"
5748+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5749+ : "Ir" (i));
5750+ } else {
5751+ unsigned long flags;
5752+
5753+ raw_local_irq_save(flags);
5754+ __asm__ __volatile__(
5755+ " lw %0, %1 \n"
5756+#ifdef CONFIG_PAX_REFCOUNT
5757+ /* Exception on overflow. */
5758+ "1: add %0, %2 \n"
5759+#else
5760+ " addu %0, %2 \n"
5761+#endif
5762+ " sw %0, %1 \n"
5763+#ifdef CONFIG_PAX_REFCOUNT
5764+ /* Note: Dest reg is not modified on overflow */
5765+ "2: \n"
5766+ _ASM_EXTABLE(1b, 2b)
5767+#endif
5768+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5769+ raw_local_irq_restore(flags);
5770+ }
5771+
5772+ smp_llsc_mb();
5773+
5774+ return result;
5775+}
5776+
5777+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5778 {
5779 int result;
5780
5781@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
5782 return result;
5783 }
5784
5785-static __inline__ int atomic_sub_return(int i, atomic_t * v)
5786+static __inline__ int atomic_sub_return(int i, atomic_t *v)
5787+{
5788+ int result;
5789+ int temp;
5790+
5791+ smp_mb__before_llsc();
5792+
5793+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5794+ __asm__ __volatile__(
5795+ " .set mips3 \n"
5796+ "1: ll %1, %2 # atomic_sub_return \n"
5797+#ifdef CONFIG_PAX_REFCOUNT
5798+ "2: sub %0, %1, %3 \n"
5799+#else
5800+ " subu %0, %1, %3 \n"
5801+#endif
5802+ " sc %0, %2 \n"
5803+ " beqzl %0, 1b \n"
5804+#ifdef CONFIG_PAX_REFCOUNT
5805+ " b 4f \n"
5806+ " .set noreorder \n"
5807+ "3: b 5f \n"
5808+ " move %0, %1 \n"
5809+ " .set reorder \n"
5810+ _ASM_EXTABLE(2b, 3b)
5811+#endif
5812+ "4: subu %0, %1, %3 \n"
5813+#ifdef CONFIG_PAX_REFCOUNT
5814+ "5: \n"
5815+#endif
5816+ " .set mips0 \n"
5817+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
5818+ : "Ir" (i), "m" (v->counter)
5819+ : "memory");
5820+ } else if (kernel_uses_llsc) {
5821+ __asm__ __volatile__(
5822+ " .set mips3 \n"
5823+ "1: ll %1, %2 # atomic_sub_return \n"
5824+#ifdef CONFIG_PAX_REFCOUNT
5825+ "2: sub %0, %1, %3 \n"
5826+#else
5827+ " subu %0, %1, %3 \n"
5828+#endif
5829+ " sc %0, %2 \n"
5830+ " bnez %0, 4f \n"
5831+ " b 1b \n"
5832+#ifdef CONFIG_PAX_REFCOUNT
5833+ " .set noreorder \n"
5834+ "3: b 5f \n"
5835+ " move %0, %1 \n"
5836+ " .set reorder \n"
5837+ _ASM_EXTABLE(2b, 3b)
5838+#endif
5839+ "4: subu %0, %1, %3 \n"
5840+#ifdef CONFIG_PAX_REFCOUNT
5841+ "5: \n"
5842+#endif
5843+ " .set mips0 \n"
5844+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5845+ : "Ir" (i));
5846+ } else {
5847+ unsigned long flags;
5848+
5849+ raw_local_irq_save(flags);
5850+ __asm__ __volatile__(
5851+ " lw %0, %1 \n"
5852+#ifdef CONFIG_PAX_REFCOUNT
5853+ /* Exception on overflow. */
5854+ "1: sub %0, %2 \n"
5855+#else
5856+ " subu %0, %2 \n"
5857+#endif
5858+ " sw %0, %1 \n"
5859+#ifdef CONFIG_PAX_REFCOUNT
5860+ /* Note: Dest reg is not modified on overflow */
5861+ "2: \n"
5862+ _ASM_EXTABLE(1b, 2b)
5863+#endif
5864+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5865+ raw_local_irq_restore(flags);
5866+ }
5867+
5868+ smp_llsc_mb();
5869+
5870+ return result;
5871+}
5872+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
5873 {
5874 int result;
5875
5876@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
5877 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5878 * The function returns the old value of @v minus @i.
5879 */
5880-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5881+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5882 {
5883 int result;
5884
5885@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5886 return result;
5887 }
5888
5889-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5890-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5891+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5892+{
5893+ return cmpxchg(&v->counter, old, new);
5894+}
5895+
5896+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5897+ int new)
5898+{
5899+ return cmpxchg(&(v->counter), old, new);
5900+}
5901+
5902+static inline int atomic_xchg(atomic_t *v, int new)
5903+{
5904+ return xchg(&v->counter, new);
5905+}
5906+
5907+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5908+{
5909+ return xchg(&(v->counter), new);
5910+}
5911
5912 /**
5913 * __atomic_add_unless - add unless the number is a given value
5914@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5915
5916 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5917 #define atomic_inc_return(v) atomic_add_return(1, (v))
5918+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5919+{
5920+ return atomic_add_return_unchecked(1, v);
5921+}
5922
5923 /*
5924 * atomic_sub_and_test - subtract value from variable and test result
5925@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5926 * other cases.
5927 */
5928 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5929+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5930+{
5931+ return atomic_add_return_unchecked(1, v) == 0;
5932+}
5933
5934 /*
5935 * atomic_dec_and_test - decrement by 1 and test
5936@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5937 * Atomically increments @v by 1.
5938 */
5939 #define atomic_inc(v) atomic_add(1, (v))
5940+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
5941+{
5942+ atomic_add_unchecked(1, v);
5943+}
5944
5945 /*
5946 * atomic_dec - decrement and test
5947@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5948 * Atomically decrements @v by 1.
5949 */
5950 #define atomic_dec(v) atomic_sub(1, (v))
5951+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
5952+{
5953+ atomic_sub_unchecked(1, v);
5954+}
5955
5956 /*
5957 * atomic_add_negative - add and test if negative
5958@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5959 * @v: pointer of type atomic64_t
5960 *
5961 */
5962-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
5963+static inline long atomic64_read(const atomic64_t *v)
5964+{
5965+ return (*(volatile const long *) &v->counter);
5966+}
5967+
5968+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5969+{
5970+ return (*(volatile const long *) &v->counter);
5971+}
5972
5973 /*
5974 * atomic64_set - set atomic variable
5975 * @v: pointer of type atomic64_t
5976 * @i: required value
5977 */
5978-#define atomic64_set(v, i) ((v)->counter = (i))
5979+static inline void atomic64_set(atomic64_t *v, long i)
5980+{
5981+ v->counter = i;
5982+}
5983+
5984+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5985+{
5986+ v->counter = i;
5987+}
5988
5989 /*
5990 * atomic64_add - add integer to atomic variable
5991@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5992 *
5993 * Atomically adds @i to @v.
5994 */
5995-static __inline__ void atomic64_add(long i, atomic64_t * v)
5996+static __inline__ void atomic64_add(long i, atomic64_t *v)
5997+{
5998+ long temp;
5999+
6000+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6001+ __asm__ __volatile__(
6002+ " .set mips3 \n"
6003+ "1: lld %0, %1 # atomic64_add \n"
6004+#ifdef CONFIG_PAX_REFCOUNT
6005+ /* Exception on overflow. */
6006+ "2: dadd %0, %2 \n"
6007+#else
6008+ " daddu %0, %2 \n"
6009+#endif
6010+ " scd %0, %1 \n"
6011+ " beqzl %0, 1b \n"
6012+#ifdef CONFIG_PAX_REFCOUNT
6013+ "3: \n"
6014+ _ASM_EXTABLE(2b, 3b)
6015+#endif
6016+ " .set mips0 \n"
6017+ : "=&r" (temp), "+m" (v->counter)
6018+ : "Ir" (i));
6019+ } else if (kernel_uses_llsc) {
6020+ __asm__ __volatile__(
6021+ " .set mips3 \n"
6022+ "1: lld %0, %1 # atomic64_add \n"
6023+#ifdef CONFIG_PAX_REFCOUNT
6024+ /* Exception on overflow. */
6025+ "2: dadd %0, %2 \n"
6026+#else
6027+ " daddu %0, %2 \n"
6028+#endif
6029+ " scd %0, %1 \n"
6030+ " beqz %0, 1b \n"
6031+#ifdef CONFIG_PAX_REFCOUNT
6032+ "3: \n"
6033+ _ASM_EXTABLE(2b, 3b)
6034+#endif
6035+ " .set mips0 \n"
6036+ : "=&r" (temp), "+m" (v->counter)
6037+ : "Ir" (i));
6038+ } else {
6039+ unsigned long flags;
6040+
6041+ raw_local_irq_save(flags);
6042+ __asm__ __volatile__(
6043+#ifdef CONFIG_PAX_REFCOUNT
6044+ /* Exception on overflow. */
6045+ "1: dadd %0, %1 \n"
6046+ "2: \n"
6047+ _ASM_EXTABLE(1b, 2b)
6048+#else
6049+ " daddu %0, %1 \n"
6050+#endif
6051+ : "+r" (v->counter) : "Ir" (i));
6052+ raw_local_irq_restore(flags);
6053+ }
6054+}
6055+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6056 {
6057 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6058 long temp;
6059@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6060 *
6061 * Atomically subtracts @i from @v.
6062 */
6063-static __inline__ void atomic64_sub(long i, atomic64_t * v)
6064+static __inline__ void atomic64_sub(long i, atomic64_t *v)
6065+{
6066+ long temp;
6067+
6068+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6069+ __asm__ __volatile__(
6070+ " .set mips3 \n"
6071+ "1: lld %0, %1 # atomic64_sub \n"
6072+#ifdef CONFIG_PAX_REFCOUNT
6073+ /* Exception on overflow. */
6074+ "2: dsub %0, %2 \n"
6075+#else
6076+ " dsubu %0, %2 \n"
6077+#endif
6078+ " scd %0, %1 \n"
6079+ " beqzl %0, 1b \n"
6080+#ifdef CONFIG_PAX_REFCOUNT
6081+ "3: \n"
6082+ _ASM_EXTABLE(2b, 3b)
6083+#endif
6084+ " .set mips0 \n"
6085+ : "=&r" (temp), "+m" (v->counter)
6086+ : "Ir" (i));
6087+ } else if (kernel_uses_llsc) {
6088+ __asm__ __volatile__(
6089+ " .set mips3 \n"
6090+ "1: lld %0, %1 # atomic64_sub \n"
6091+#ifdef CONFIG_PAX_REFCOUNT
6092+ /* Exception on overflow. */
6093+ "2: dsub %0, %2 \n"
6094+#else
6095+ " dsubu %0, %2 \n"
6096+#endif
6097+ " scd %0, %1 \n"
6098+ " beqz %0, 1b \n"
6099+#ifdef CONFIG_PAX_REFCOUNT
6100+ "3: \n"
6101+ _ASM_EXTABLE(2b, 3b)
6102+#endif
6103+ " .set mips0 \n"
6104+ : "=&r" (temp), "+m" (v->counter)
6105+ : "Ir" (i));
6106+ } else {
6107+ unsigned long flags;
6108+
6109+ raw_local_irq_save(flags);
6110+ __asm__ __volatile__(
6111+#ifdef CONFIG_PAX_REFCOUNT
6112+ /* Exception on overflow. */
6113+ "1: dsub %0, %1 \n"
6114+ "2: \n"
6115+ _ASM_EXTABLE(1b, 2b)
6116+#else
6117+ " dsubu %0, %1 \n"
6118+#endif
6119+ : "+r" (v->counter) : "Ir" (i));
6120+ raw_local_irq_restore(flags);
6121+ }
6122+}
6123+
6124+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6125 {
6126 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6127 long temp;
6128@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6129 /*
6130 * Same as above, but return the result value
6131 */
6132-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6133+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6134+{
6135+ long result;
6136+ long temp;
6137+
6138+ smp_mb__before_llsc();
6139+
6140+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6141+ __asm__ __volatile__(
6142+ " .set mips3 \n"
6143+ "1: lld %1, %2 # atomic64_add_return \n"
6144+#ifdef CONFIG_PAX_REFCOUNT
6145+ "2: dadd %0, %1, %3 \n"
6146+#else
6147+ " daddu %0, %1, %3 \n"
6148+#endif
6149+ " scd %0, %2 \n"
6150+ " beqzl %0, 1b \n"
6151+#ifdef CONFIG_PAX_REFCOUNT
6152+ " b 4f \n"
6153+ " .set noreorder \n"
6154+ "3: b 5f \n"
6155+ " move %0, %1 \n"
6156+ " .set reorder \n"
6157+ _ASM_EXTABLE(2b, 3b)
6158+#endif
6159+ "4: daddu %0, %1, %3 \n"
6160+#ifdef CONFIG_PAX_REFCOUNT
6161+ "5: \n"
6162+#endif
6163+ " .set mips0 \n"
6164+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6165+ : "Ir" (i));
6166+ } else if (kernel_uses_llsc) {
6167+ __asm__ __volatile__(
6168+ " .set mips3 \n"
6169+ "1: lld %1, %2 # atomic64_add_return \n"
6170+#ifdef CONFIG_PAX_REFCOUNT
6171+ "2: dadd %0, %1, %3 \n"
6172+#else
6173+ " daddu %0, %1, %3 \n"
6174+#endif
6175+ " scd %0, %2 \n"
6176+ " bnez %0, 4f \n"
6177+ " b 1b \n"
6178+#ifdef CONFIG_PAX_REFCOUNT
6179+ " .set noreorder \n"
6180+ "3: b 5f \n"
6181+ " move %0, %1 \n"
6182+ " .set reorder \n"
6183+ _ASM_EXTABLE(2b, 3b)
6184+#endif
6185+ "4: daddu %0, %1, %3 \n"
6186+#ifdef CONFIG_PAX_REFCOUNT
6187+ "5: \n"
6188+#endif
6189+ " .set mips0 \n"
6190+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6191+ : "Ir" (i), "m" (v->counter)
6192+ : "memory");
6193+ } else {
6194+ unsigned long flags;
6195+
6196+ raw_local_irq_save(flags);
6197+ __asm__ __volatile__(
6198+ " ld %0, %1 \n"
6199+#ifdef CONFIG_PAX_REFCOUNT
6200+ /* Exception on overflow. */
6201+ "1: dadd %0, %2 \n"
6202+#else
6203+ " daddu %0, %2 \n"
6204+#endif
6205+ " sd %0, %1 \n"
6206+#ifdef CONFIG_PAX_REFCOUNT
6207+ /* Note: Dest reg is not modified on overflow */
6208+ "2: \n"
6209+ _ASM_EXTABLE(1b, 2b)
6210+#endif
6211+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6212+ raw_local_irq_restore(flags);
6213+ }
6214+
6215+ smp_llsc_mb();
6216+
6217+ return result;
6218+}
6219+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6220 {
6221 long result;
6222
6223@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6224 return result;
6225 }
6226
6227-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6228+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6229+{
6230+ long result;
6231+ long temp;
6232+
6233+ smp_mb__before_llsc();
6234+
6235+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6236+ long temp;
6237+
6238+ __asm__ __volatile__(
6239+ " .set mips3 \n"
6240+ "1: lld %1, %2 # atomic64_sub_return \n"
6241+#ifdef CONFIG_PAX_REFCOUNT
6242+ "2: dsub %0, %1, %3 \n"
6243+#else
6244+ " dsubu %0, %1, %3 \n"
6245+#endif
6246+ " scd %0, %2 \n"
6247+ " beqzl %0, 1b \n"
6248+#ifdef CONFIG_PAX_REFCOUNT
6249+ " b 4f \n"
6250+ " .set noreorder \n"
6251+ "3: b 5f \n"
6252+ " move %0, %1 \n"
6253+ " .set reorder \n"
6254+ _ASM_EXTABLE(2b, 3b)
6255+#endif
6256+ "4: dsubu %0, %1, %3 \n"
6257+#ifdef CONFIG_PAX_REFCOUNT
6258+ "5: \n"
6259+#endif
6260+ " .set mips0 \n"
6261+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6262+ : "Ir" (i), "m" (v->counter)
6263+ : "memory");
6264+ } else if (kernel_uses_llsc) {
6265+ __asm__ __volatile__(
6266+ " .set mips3 \n"
6267+ "1: lld %1, %2 # atomic64_sub_return \n"
6268+#ifdef CONFIG_PAX_REFCOUNT
6269+ "2: dsub %0, %1, %3 \n"
6270+#else
6271+ " dsubu %0, %1, %3 \n"
6272+#endif
6273+ " scd %0, %2 \n"
6274+ " bnez %0, 4f \n"
6275+ " b 1b \n"
6276+#ifdef CONFIG_PAX_REFCOUNT
6277+ " .set noreorder \n"
6278+ "3: b 5f \n"
6279+ " move %0, %1 \n"
6280+ " .set reorder \n"
6281+ _ASM_EXTABLE(2b, 3b)
6282+#endif
6283+ "4: dsubu %0, %1, %3 \n"
6284+#ifdef CONFIG_PAX_REFCOUNT
6285+ "5: \n"
6286+#endif
6287+ " .set mips0 \n"
6288+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6289+ : "Ir" (i), "m" (v->counter)
6290+ : "memory");
6291+ } else {
6292+ unsigned long flags;
6293+
6294+ raw_local_irq_save(flags);
6295+ __asm__ __volatile__(
6296+ " ld %0, %1 \n"
6297+#ifdef CONFIG_PAX_REFCOUNT
6298+ /* Exception on overflow. */
6299+ "1: dsub %0, %2 \n"
6300+#else
6301+ " dsubu %0, %2 \n"
6302+#endif
6303+ " sd %0, %1 \n"
6304+#ifdef CONFIG_PAX_REFCOUNT
6305+ /* Note: Dest reg is not modified on overflow */
6306+ "2: \n"
6307+ _ASM_EXTABLE(1b, 2b)
6308+#endif
6309+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6310+ raw_local_irq_restore(flags);
6311+ }
6312+
6313+ smp_llsc_mb();
6314+
6315+ return result;
6316+}
6317+
6318+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6319 {
6320 long result;
6321
6322@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6323 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6324 * The function returns the old value of @v minus @i.
6325 */
6326-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6327+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6328 {
6329 long result;
6330
6331@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6332 return result;
6333 }
6334
6335-#define atomic64_cmpxchg(v, o, n) \
6336- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6337-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6338+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6339+{
6340+ return cmpxchg(&v->counter, old, new);
6341+}
6342+
6343+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6344+ long new)
6345+{
6346+ return cmpxchg(&(v->counter), old, new);
6347+}
6348+
6349+static inline long atomic64_xchg(atomic64_t *v, long new)
6350+{
6351+ return xchg(&v->counter, new);
6352+}
6353+
6354+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6355+{
6356+ return xchg(&(v->counter), new);
6357+}
6358
6359 /**
6360 * atomic64_add_unless - add unless the number is a given value
6361@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6362
6363 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6364 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6365+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6366
6367 /*
6368 * atomic64_sub_and_test - subtract value from variable and test result
6369@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6370 * other cases.
6371 */
6372 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6373+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6374
6375 /*
6376 * atomic64_dec_and_test - decrement by 1 and test
6377@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6378 * Atomically increments @v by 1.
6379 */
6380 #define atomic64_inc(v) atomic64_add(1, (v))
6381+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6382
6383 /*
6384 * atomic64_dec - decrement and test
6385@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6386 * Atomically decrements @v by 1.
6387 */
6388 #define atomic64_dec(v) atomic64_sub(1, (v))
6389+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6390
6391 /*
6392 * atomic64_add_negative - add and test if negative
6393diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6394index b4db69f..8f3b093 100644
6395--- a/arch/mips/include/asm/cache.h
6396+++ b/arch/mips/include/asm/cache.h
6397@@ -9,10 +9,11 @@
6398 #ifndef _ASM_CACHE_H
6399 #define _ASM_CACHE_H
6400
6401+#include <linux/const.h>
6402 #include <kmalloc.h>
6403
6404 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6405-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6406+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6407
6408 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6409 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6410diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6411index a66359e..d3d474a 100644
6412--- a/arch/mips/include/asm/elf.h
6413+++ b/arch/mips/include/asm/elf.h
6414@@ -373,13 +373,16 @@ extern const char *__elf_platform;
6415 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6416 #endif
6417
6418+#ifdef CONFIG_PAX_ASLR
6419+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6420+
6421+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6422+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6423+#endif
6424+
6425 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6426 struct linux_binprm;
6427 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6428 int uses_interp);
6429
6430-struct mm_struct;
6431-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6432-#define arch_randomize_brk arch_randomize_brk
6433-
6434 #endif /* _ASM_ELF_H */
6435diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6436index c1f6afa..38cc6e9 100644
6437--- a/arch/mips/include/asm/exec.h
6438+++ b/arch/mips/include/asm/exec.h
6439@@ -12,6 +12,6 @@
6440 #ifndef _ASM_EXEC_H
6441 #define _ASM_EXEC_H
6442
6443-extern unsigned long arch_align_stack(unsigned long sp);
6444+#define arch_align_stack(x) ((x) & ~0xfUL)
6445
6446 #endif /* _ASM_EXEC_H */
6447diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
6448index ce35c9a..434321c 100644
6449--- a/arch/mips/include/asm/ftrace.h
6450+++ b/arch/mips/include/asm/ftrace.h
6451@@ -22,12 +22,12 @@ extern void _mcount(void);
6452 #define safe_load(load, src, dst, error) \
6453 do { \
6454 asm volatile ( \
6455- "1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\
6456- " li %[" STR(error) "], 0\n" \
6457+ "1: " load " %[dest], 0(%[source])\n" \
6458+ " li %[err], 0\n" \
6459 "2:\n" \
6460 \
6461 ".section .fixup, \"ax\"\n" \
6462- "3: li %[" STR(error) "], 1\n" \
6463+ "3: li %[err], 1\n" \
6464 " j 2b\n" \
6465 ".previous\n" \
6466 \
6467@@ -35,8 +35,8 @@ do { \
6468 STR(PTR) "\t1b, 3b\n\t" \
6469 ".previous\n" \
6470 \
6471- : [dst] "=&r" (dst), [error] "=r" (error)\
6472- : [src] "r" (src) \
6473+ : [dest] "=&r" (dst), [err] "=r" (error)\
6474+ : [source] "r" (src) \
6475 : "memory" \
6476 ); \
6477 } while (0)
6478@@ -44,12 +44,12 @@ do { \
6479 #define safe_store(store, src, dst, error) \
6480 do { \
6481 asm volatile ( \
6482- "1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\
6483- " li %[" STR(error) "], 0\n" \
6484+ "1: " store " %[source], 0(%[dest])\n"\
6485+ " li %[err], 0\n" \
6486 "2:\n" \
6487 \
6488 ".section .fixup, \"ax\"\n" \
6489- "3: li %[" STR(error) "], 1\n" \
6490+ "3: li %[err], 1\n" \
6491 " j 2b\n" \
6492 ".previous\n" \
6493 \
6494@@ -57,8 +57,8 @@ do { \
6495 STR(PTR) "\t1b, 3b\n\t" \
6496 ".previous\n" \
6497 \
6498- : [error] "=r" (error) \
6499- : [dst] "r" (dst), [src] "r" (src)\
6500+ : [err] "=r" (error) \
6501+ : [dest] "r" (dst), [source] "r" (src)\
6502 : "memory" \
6503 ); \
6504 } while (0)
6505diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6506index 9e8ef59..1139d6b 100644
6507--- a/arch/mips/include/asm/hw_irq.h
6508+++ b/arch/mips/include/asm/hw_irq.h
6509@@ -10,7 +10,7 @@
6510
6511 #include <linux/atomic.h>
6512
6513-extern atomic_t irq_err_count;
6514+extern atomic_unchecked_t irq_err_count;
6515
6516 /*
6517 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6518diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6519index d44622c..64990d2 100644
6520--- a/arch/mips/include/asm/local.h
6521+++ b/arch/mips/include/asm/local.h
6522@@ -12,15 +12,25 @@ typedef struct
6523 atomic_long_t a;
6524 } local_t;
6525
6526+typedef struct {
6527+ atomic_long_unchecked_t a;
6528+} local_unchecked_t;
6529+
6530 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6531
6532 #define local_read(l) atomic_long_read(&(l)->a)
6533+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6534 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6535+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6536
6537 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6538+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6539 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6540+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6541 #define local_inc(l) atomic_long_inc(&(l)->a)
6542+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6543 #define local_dec(l) atomic_long_dec(&(l)->a)
6544+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6545
6546 /*
6547 * Same as above, but return the result value
6548@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6549 return result;
6550 }
6551
6552+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6553+{
6554+ unsigned long result;
6555+
6556+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6557+ unsigned long temp;
6558+
6559+ __asm__ __volatile__(
6560+ " .set mips3 \n"
6561+ "1:" __LL "%1, %2 # local_add_return \n"
6562+ " addu %0, %1, %3 \n"
6563+ __SC "%0, %2 \n"
6564+ " beqzl %0, 1b \n"
6565+ " addu %0, %1, %3 \n"
6566+ " .set mips0 \n"
6567+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6568+ : "Ir" (i), "m" (l->a.counter)
6569+ : "memory");
6570+ } else if (kernel_uses_llsc) {
6571+ unsigned long temp;
6572+
6573+ __asm__ __volatile__(
6574+ " .set mips3 \n"
6575+ "1:" __LL "%1, %2 # local_add_return \n"
6576+ " addu %0, %1, %3 \n"
6577+ __SC "%0, %2 \n"
6578+ " beqz %0, 1b \n"
6579+ " addu %0, %1, %3 \n"
6580+ " .set mips0 \n"
6581+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6582+ : "Ir" (i), "m" (l->a.counter)
6583+ : "memory");
6584+ } else {
6585+ unsigned long flags;
6586+
6587+ local_irq_save(flags);
6588+ result = l->a.counter;
6589+ result += i;
6590+ l->a.counter = result;
6591+ local_irq_restore(flags);
6592+ }
6593+
6594+ return result;
6595+}
6596+
6597 static __inline__ long local_sub_return(long i, local_t * l)
6598 {
6599 unsigned long result;
6600@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6601
6602 #define local_cmpxchg(l, o, n) \
6603 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6604+#define local_cmpxchg_unchecked(l, o, n) \
6605+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6606 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6607
6608 /**
6609diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6610index f6be474..12ad554 100644
6611--- a/arch/mips/include/asm/page.h
6612+++ b/arch/mips/include/asm/page.h
6613@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6614 #ifdef CONFIG_CPU_MIPS32
6615 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6616 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6617- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6618+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6619 #else
6620 typedef struct { unsigned long long pte; } pte_t;
6621 #define pte_val(x) ((x).pte)
6622diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6623index b336037..5b874cc 100644
6624--- a/arch/mips/include/asm/pgalloc.h
6625+++ b/arch/mips/include/asm/pgalloc.h
6626@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6627 {
6628 set_pud(pud, __pud((unsigned long)pmd));
6629 }
6630+
6631+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6632+{
6633+ pud_populate(mm, pud, pmd);
6634+}
6635 #endif
6636
6637 /*
6638diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6639index 008324d..f67c239 100644
6640--- a/arch/mips/include/asm/pgtable.h
6641+++ b/arch/mips/include/asm/pgtable.h
6642@@ -20,6 +20,9 @@
6643 #include <asm/io.h>
6644 #include <asm/pgtable-bits.h>
6645
6646+#define ktla_ktva(addr) (addr)
6647+#define ktva_ktla(addr) (addr)
6648+
6649 struct mm_struct;
6650 struct vm_area_struct;
6651
6652diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h
6653index 25da651..ae2a259 100644
6654--- a/arch/mips/include/asm/smtc_proc.h
6655+++ b/arch/mips/include/asm/smtc_proc.h
6656@@ -18,6 +18,6 @@ extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6657
6658 /* Count of number of recoveries of "stolen" FPU access rights on 34K */
6659
6660-extern atomic_t smtc_fpu_recoveries;
6661+extern atomic_unchecked_t smtc_fpu_recoveries;
6662
6663 #endif /* __ASM_SMTC_PROC_H */
6664diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
6665index 81c8913..81d8432 100644
6666--- a/arch/mips/include/asm/syscall.h
6667+++ b/arch/mips/include/asm/syscall.h
6668@@ -29,7 +29,7 @@ static inline long syscall_get_nr(struct task_struct *task,
6669 static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
6670 struct task_struct *task, struct pt_regs *regs, unsigned int n)
6671 {
6672- unsigned long usp = regs->regs[29];
6673+ unsigned long usp __maybe_unused = regs->regs[29];
6674
6675 switch (n) {
6676 case 0: case 1: case 2: case 3:
6677@@ -39,14 +39,14 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
6678
6679 #ifdef CONFIG_32BIT
6680 case 4: case 5: case 6: case 7:
6681- return get_user(*arg, (int *)usp + 4 * n);
6682+ return get_user(*arg, (int *)usp + n);
6683 #endif
6684
6685 #ifdef CONFIG_64BIT
6686 case 4: case 5: case 6: case 7:
6687 #ifdef CONFIG_MIPS32_O32
6688 if (test_thread_flag(TIF_32BIT_REGS))
6689- return get_user(*arg, (int *)usp + 4 * n);
6690+ return get_user(*arg, (int *)usp + n);
6691 else
6692 #endif
6693 *arg = regs->regs[4 + n];
6694@@ -83,11 +83,10 @@ static inline void syscall_get_arguments(struct task_struct *task,
6695 unsigned int i, unsigned int n,
6696 unsigned long *args)
6697 {
6698- unsigned long arg;
6699 int ret;
6700
6701 while (n--)
6702- ret |= mips_get_syscall_arg(&arg, task, regs, i++);
6703+ ret |= mips_get_syscall_arg(args++, task, regs, i++);
6704
6705 /*
6706 * No way to communicate an error because this is a void function.
6707diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6708index 4f58ef6..5e7081b 100644
6709--- a/arch/mips/include/asm/thread_info.h
6710+++ b/arch/mips/include/asm/thread_info.h
6711@@ -115,6 +115,8 @@ static inline struct thread_info *current_thread_info(void)
6712 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
6713 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
6714 #define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
6715+/* li takes a 32bit immediate */
6716+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
6717 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
6718
6719 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6720@@ -132,13 +134,14 @@ static inline struct thread_info *current_thread_info(void)
6721 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
6722 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
6723 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6724+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6725
6726 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6727- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6728+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6729
6730 /* work to do in syscall_trace_leave() */
6731 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6732- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6733+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6734
6735 /* work to do on interrupt/exception return */
6736 #define _TIF_WORK_MASK \
6737@@ -146,7 +149,7 @@ static inline struct thread_info *current_thread_info(void)
6738 /* work to do on any return to u-space */
6739 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6740 _TIF_WORK_SYSCALL_EXIT | \
6741- _TIF_SYSCALL_TRACEPOINT)
6742+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6743
6744 /*
6745 * We stash processor id into a COP0 register to retrieve it fast
6746diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6747index f3fa375..3af6637 100644
6748--- a/arch/mips/include/asm/uaccess.h
6749+++ b/arch/mips/include/asm/uaccess.h
6750@@ -128,6 +128,7 @@ extern u64 __ua_limit;
6751 __ok == 0; \
6752 })
6753
6754+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6755 #define access_ok(type, addr, size) \
6756 likely(__access_ok((addr), (size), __access_mask))
6757
6758diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6759index 1188e00..41cf144 100644
6760--- a/arch/mips/kernel/binfmt_elfn32.c
6761+++ b/arch/mips/kernel/binfmt_elfn32.c
6762@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6763 #undef ELF_ET_DYN_BASE
6764 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6765
6766+#ifdef CONFIG_PAX_ASLR
6767+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6768+
6769+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6770+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6771+#endif
6772+
6773 #include <asm/processor.h>
6774 #include <linux/module.h>
6775 #include <linux/elfcore.h>
6776diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6777index 202e581..689ca79 100644
6778--- a/arch/mips/kernel/binfmt_elfo32.c
6779+++ b/arch/mips/kernel/binfmt_elfo32.c
6780@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6781 #undef ELF_ET_DYN_BASE
6782 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6783
6784+#ifdef CONFIG_PAX_ASLR
6785+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6786+
6787+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6788+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6789+#endif
6790+
6791 #include <asm/processor.h>
6792
6793 /*
6794diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
6795index 185ba25..374ed74 100644
6796--- a/arch/mips/kernel/ftrace.c
6797+++ b/arch/mips/kernel/ftrace.c
6798@@ -111,11 +111,10 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
6799 safe_store_code(new_code1, ip, faulted);
6800 if (unlikely(faulted))
6801 return -EFAULT;
6802- ip += 4;
6803- safe_store_code(new_code2, ip, faulted);
6804+ safe_store_code(new_code2, ip + 4, faulted);
6805 if (unlikely(faulted))
6806 return -EFAULT;
6807- flush_icache_range(ip, ip + 8); /* original ip + 12 */
6808+ flush_icache_range(ip, ip + 8);
6809 return 0;
6810 }
6811 #endif
6812diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6813index 2b91fe8..fe4f6b4 100644
6814--- a/arch/mips/kernel/i8259.c
6815+++ b/arch/mips/kernel/i8259.c
6816@@ -205,7 +205,7 @@ spurious_8259A_irq:
6817 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6818 spurious_irq_mask |= irqmask;
6819 }
6820- atomic_inc(&irq_err_count);
6821+ atomic_inc_unchecked(&irq_err_count);
6822 /*
6823 * Theoretically we do not have to handle this IRQ,
6824 * but in Linux this does not cause problems and is
6825diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6826index 44a1f79..2bd6aa3 100644
6827--- a/arch/mips/kernel/irq-gt641xx.c
6828+++ b/arch/mips/kernel/irq-gt641xx.c
6829@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6830 }
6831 }
6832
6833- atomic_inc(&irq_err_count);
6834+ atomic_inc_unchecked(&irq_err_count);
6835 }
6836
6837 void __init gt641xx_irq_init(void)
6838diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6839index d1fea7a..45602ea 100644
6840--- a/arch/mips/kernel/irq.c
6841+++ b/arch/mips/kernel/irq.c
6842@@ -77,17 +77,17 @@ void ack_bad_irq(unsigned int irq)
6843 printk("unexpected IRQ # %d\n", irq);
6844 }
6845
6846-atomic_t irq_err_count;
6847+atomic_unchecked_t irq_err_count;
6848
6849 int arch_show_interrupts(struct seq_file *p, int prec)
6850 {
6851- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6852+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6853 return 0;
6854 }
6855
6856 asmlinkage void spurious_interrupt(void)
6857 {
6858- atomic_inc(&irq_err_count);
6859+ atomic_inc_unchecked(&irq_err_count);
6860 }
6861
6862 void __init init_IRQ(void)
6863diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6864index ddc7610..8c58f17 100644
6865--- a/arch/mips/kernel/process.c
6866+++ b/arch/mips/kernel/process.c
6867@@ -566,15 +566,3 @@ unsigned long get_wchan(struct task_struct *task)
6868 out:
6869 return pc;
6870 }
6871-
6872-/*
6873- * Don't forget that the stack pointer must be aligned on a 8 bytes
6874- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6875- */
6876-unsigned long arch_align_stack(unsigned long sp)
6877-{
6878- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6879- sp -= get_random_int() & ~PAGE_MASK;
6880-
6881- return sp & ALMASK;
6882-}
6883diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6884index b52e1d2..1a3ca09 100644
6885--- a/arch/mips/kernel/ptrace.c
6886+++ b/arch/mips/kernel/ptrace.c
6887@@ -652,6 +652,10 @@ long arch_ptrace(struct task_struct *child, long request,
6888 return ret;
6889 }
6890
6891+#ifdef CONFIG_GRKERNSEC_SETXID
6892+extern void gr_delayed_cred_worker(void);
6893+#endif
6894+
6895 /*
6896 * Notification of system call entry/exit
6897 * - triggered by current->work.syscall_trace
6898@@ -668,6 +672,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
6899 tracehook_report_syscall_entry(regs))
6900 ret = -1;
6901
6902+#ifdef CONFIG_GRKERNSEC_SETXID
6903+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6904+ gr_delayed_cred_worker();
6905+#endif
6906+
6907 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6908 trace_sys_enter(regs, regs->regs[2]);
6909
6910diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6911index 07fc524..b9d7f28 100644
6912--- a/arch/mips/kernel/reset.c
6913+++ b/arch/mips/kernel/reset.c
6914@@ -13,6 +13,7 @@
6915 #include <linux/reboot.h>
6916
6917 #include <asm/reboot.h>
6918+#include <asm/bug.h>
6919
6920 /*
6921 * Urgs ... Too many MIPS machines to handle this in a generic way.
6922@@ -29,16 +30,19 @@ void machine_restart(char *command)
6923 {
6924 if (_machine_restart)
6925 _machine_restart(command);
6926+ BUG();
6927 }
6928
6929 void machine_halt(void)
6930 {
6931 if (_machine_halt)
6932 _machine_halt();
6933+ BUG();
6934 }
6935
6936 void machine_power_off(void)
6937 {
6938 if (pm_power_off)
6939 pm_power_off();
6940+ BUG();
6941 }
6942diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
6943index c10aa84..9ec2e60 100644
6944--- a/arch/mips/kernel/smtc-proc.c
6945+++ b/arch/mips/kernel/smtc-proc.c
6946@@ -31,7 +31,7 @@ unsigned long selfipis[NR_CPUS];
6947
6948 struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6949
6950-atomic_t smtc_fpu_recoveries;
6951+atomic_unchecked_t smtc_fpu_recoveries;
6952
6953 static int smtc_proc_show(struct seq_file *m, void *v)
6954 {
6955@@ -48,7 +48,7 @@ static int smtc_proc_show(struct seq_file *m, void *v)
6956 for(i = 0; i < NR_CPUS; i++)
6957 seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
6958 seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
6959- atomic_read(&smtc_fpu_recoveries));
6960+ atomic_read_unchecked(&smtc_fpu_recoveries));
6961 return 0;
6962 }
6963
6964@@ -73,7 +73,7 @@ void init_smtc_stats(void)
6965 smtc_cpu_stats[i].selfipis = 0;
6966 }
6967
6968- atomic_set(&smtc_fpu_recoveries, 0);
6969+ atomic_set_unchecked(&smtc_fpu_recoveries, 0);
6970
6971 proc_create("smtc", 0444, NULL, &smtc_proc_fops);
6972 }
6973diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
6974index dfc1b91..11a2c07 100644
6975--- a/arch/mips/kernel/smtc.c
6976+++ b/arch/mips/kernel/smtc.c
6977@@ -1359,7 +1359,7 @@ void smtc_soft_dump(void)
6978 }
6979 smtc_ipi_qdump();
6980 printk("%d Recoveries of \"stolen\" FPU\n",
6981- atomic_read(&smtc_fpu_recoveries));
6982+ atomic_read_unchecked(&smtc_fpu_recoveries));
6983 }
6984
6985
6986diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6987index 84536bf..79caa4d 100644
6988--- a/arch/mips/kernel/sync-r4k.c
6989+++ b/arch/mips/kernel/sync-r4k.c
6990@@ -21,8 +21,8 @@
6991 #include <asm/mipsregs.h>
6992
6993 static atomic_t count_start_flag = ATOMIC_INIT(0);
6994-static atomic_t count_count_start = ATOMIC_INIT(0);
6995-static atomic_t count_count_stop = ATOMIC_INIT(0);
6996+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6997+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6998 static atomic_t count_reference = ATOMIC_INIT(0);
6999
7000 #define COUNTON 100
7001@@ -69,13 +69,13 @@ void synchronise_count_master(int cpu)
7002
7003 for (i = 0; i < NR_LOOPS; i++) {
7004 /* slaves loop on '!= 2' */
7005- while (atomic_read(&count_count_start) != 1)
7006+ while (atomic_read_unchecked(&count_count_start) != 1)
7007 mb();
7008- atomic_set(&count_count_stop, 0);
7009+ atomic_set_unchecked(&count_count_stop, 0);
7010 smp_wmb();
7011
7012 /* this lets the slaves write their count register */
7013- atomic_inc(&count_count_start);
7014+ atomic_inc_unchecked(&count_count_start);
7015
7016 /*
7017 * Everyone initialises count in the last loop:
7018@@ -86,11 +86,11 @@ void synchronise_count_master(int cpu)
7019 /*
7020 * Wait for all slaves to leave the synchronization point:
7021 */
7022- while (atomic_read(&count_count_stop) != 1)
7023+ while (atomic_read_unchecked(&count_count_stop) != 1)
7024 mb();
7025- atomic_set(&count_count_start, 0);
7026+ atomic_set_unchecked(&count_count_start, 0);
7027 smp_wmb();
7028- atomic_inc(&count_count_stop);
7029+ atomic_inc_unchecked(&count_count_stop);
7030 }
7031 /* Arrange for an interrupt in a short while */
7032 write_c0_compare(read_c0_count() + COUNTON);
7033@@ -131,8 +131,8 @@ void synchronise_count_slave(int cpu)
7034 initcount = atomic_read(&count_reference);
7035
7036 for (i = 0; i < NR_LOOPS; i++) {
7037- atomic_inc(&count_count_start);
7038- while (atomic_read(&count_count_start) != 2)
7039+ atomic_inc_unchecked(&count_count_start);
7040+ while (atomic_read_unchecked(&count_count_start) != 2)
7041 mb();
7042
7043 /*
7044@@ -141,8 +141,8 @@ void synchronise_count_slave(int cpu)
7045 if (i == NR_LOOPS-1)
7046 write_c0_count(initcount);
7047
7048- atomic_inc(&count_count_stop);
7049- while (atomic_read(&count_count_stop) != 2)
7050+ atomic_inc_unchecked(&count_count_stop);
7051+ while (atomic_read_unchecked(&count_count_stop) != 2)
7052 mb();
7053 }
7054 /* Arrange for an interrupt in a short while */
7055diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7056index f9c8746..78b64e3 100644
7057--- a/arch/mips/kernel/traps.c
7058+++ b/arch/mips/kernel/traps.c
7059@@ -690,7 +690,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7060 siginfo_t info;
7061
7062 prev_state = exception_enter();
7063- die_if_kernel("Integer overflow", regs);
7064+ if (unlikely(!user_mode(regs))) {
7065+
7066+#ifdef CONFIG_PAX_REFCOUNT
7067+ if (fixup_exception(regs)) {
7068+ pax_report_refcount_overflow(regs);
7069+ exception_exit(prev_state);
7070+ return;
7071+ }
7072+#endif
7073+
7074+ die("Integer overflow", regs);
7075+ }
7076
7077 info.si_code = FPE_INTOVF;
7078 info.si_signo = SIGFPE;
7079diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7080index becc42b..9e43d4b 100644
7081--- a/arch/mips/mm/fault.c
7082+++ b/arch/mips/mm/fault.c
7083@@ -28,6 +28,23 @@
7084 #include <asm/highmem.h> /* For VMALLOC_END */
7085 #include <linux/kdebug.h>
7086
7087+#ifdef CONFIG_PAX_PAGEEXEC
7088+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7089+{
7090+ unsigned long i;
7091+
7092+ printk(KERN_ERR "PAX: bytes at PC: ");
7093+ for (i = 0; i < 5; i++) {
7094+ unsigned int c;
7095+ if (get_user(c, (unsigned int *)pc+i))
7096+ printk(KERN_CONT "???????? ");
7097+ else
7098+ printk(KERN_CONT "%08x ", c);
7099+ }
7100+ printk("\n");
7101+}
7102+#endif
7103+
7104 /*
7105 * This routine handles page faults. It determines the address,
7106 * and the problem, and then passes it off to one of the appropriate
7107@@ -199,6 +216,14 @@ bad_area:
7108 bad_area_nosemaphore:
7109 /* User mode accesses just cause a SIGSEGV */
7110 if (user_mode(regs)) {
7111+
7112+#ifdef CONFIG_PAX_PAGEEXEC
7113+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7114+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7115+ do_group_exit(SIGKILL);
7116+ }
7117+#endif
7118+
7119 tsk->thread.cp0_badvaddr = address;
7120 tsk->thread.error_code = write;
7121 #if 0
7122diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7123index f1baadd..5472dca 100644
7124--- a/arch/mips/mm/mmap.c
7125+++ b/arch/mips/mm/mmap.c
7126@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7127 struct vm_area_struct *vma;
7128 unsigned long addr = addr0;
7129 int do_color_align;
7130+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7131 struct vm_unmapped_area_info info;
7132
7133 if (unlikely(len > TASK_SIZE))
7134@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7135 do_color_align = 1;
7136
7137 /* requesting a specific address */
7138+
7139+#ifdef CONFIG_PAX_RANDMMAP
7140+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7141+#endif
7142+
7143 if (addr) {
7144 if (do_color_align)
7145 addr = COLOUR_ALIGN(addr, pgoff);
7146@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7147 addr = PAGE_ALIGN(addr);
7148
7149 vma = find_vma(mm, addr);
7150- if (TASK_SIZE - len >= addr &&
7151- (!vma || addr + len <= vma->vm_start))
7152+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7153 return addr;
7154 }
7155
7156 info.length = len;
7157 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7158 info.align_offset = pgoff << PAGE_SHIFT;
7159+ info.threadstack_offset = offset;
7160
7161 if (dir == DOWN) {
7162 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7163@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7164 {
7165 unsigned long random_factor = 0UL;
7166
7167+#ifdef CONFIG_PAX_RANDMMAP
7168+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7169+#endif
7170+
7171 if (current->flags & PF_RANDOMIZE) {
7172 random_factor = get_random_int();
7173 random_factor = random_factor << PAGE_SHIFT;
7174@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7175
7176 if (mmap_is_legacy()) {
7177 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7178+
7179+#ifdef CONFIG_PAX_RANDMMAP
7180+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7181+ mm->mmap_base += mm->delta_mmap;
7182+#endif
7183+
7184 mm->get_unmapped_area = arch_get_unmapped_area;
7185 } else {
7186 mm->mmap_base = mmap_base(random_factor);
7187+
7188+#ifdef CONFIG_PAX_RANDMMAP
7189+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7190+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7191+#endif
7192+
7193 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7194 }
7195 }
7196
7197-static inline unsigned long brk_rnd(void)
7198-{
7199- unsigned long rnd = get_random_int();
7200-
7201- rnd = rnd << PAGE_SHIFT;
7202- /* 8MB for 32bit, 256MB for 64bit */
7203- if (TASK_IS_32BIT_ADDR)
7204- rnd = rnd & 0x7ffffful;
7205- else
7206- rnd = rnd & 0xffffffful;
7207-
7208- return rnd;
7209-}
7210-
7211-unsigned long arch_randomize_brk(struct mm_struct *mm)
7212-{
7213- unsigned long base = mm->brk;
7214- unsigned long ret;
7215-
7216- ret = PAGE_ALIGN(base + brk_rnd());
7217-
7218- if (ret < mm->brk)
7219- return mm->brk;
7220-
7221- return ret;
7222-}
7223-
7224 int __virt_addr_valid(const volatile void *kaddr)
7225 {
7226 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7227diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7228index 59cccd9..f39ac2f 100644
7229--- a/arch/mips/pci/pci-octeon.c
7230+++ b/arch/mips/pci/pci-octeon.c
7231@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7232
7233
7234 static struct pci_ops octeon_pci_ops = {
7235- octeon_read_config,
7236- octeon_write_config,
7237+ .read = octeon_read_config,
7238+ .write = octeon_write_config,
7239 };
7240
7241 static struct resource octeon_pci_mem_resource = {
7242diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7243index 5e36c33..eb4a17b 100644
7244--- a/arch/mips/pci/pcie-octeon.c
7245+++ b/arch/mips/pci/pcie-octeon.c
7246@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7247 }
7248
7249 static struct pci_ops octeon_pcie0_ops = {
7250- octeon_pcie0_read_config,
7251- octeon_pcie0_write_config,
7252+ .read = octeon_pcie0_read_config,
7253+ .write = octeon_pcie0_write_config,
7254 };
7255
7256 static struct resource octeon_pcie0_mem_resource = {
7257@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7258 };
7259
7260 static struct pci_ops octeon_pcie1_ops = {
7261- octeon_pcie1_read_config,
7262- octeon_pcie1_write_config,
7263+ .read = octeon_pcie1_read_config,
7264+ .write = octeon_pcie1_write_config,
7265 };
7266
7267 static struct resource octeon_pcie1_mem_resource = {
7268@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7269 };
7270
7271 static struct pci_ops octeon_dummy_ops = {
7272- octeon_dummy_read_config,
7273- octeon_dummy_write_config,
7274+ .read = octeon_dummy_read_config,
7275+ .write = octeon_dummy_write_config,
7276 };
7277
7278 static struct resource octeon_dummy_mem_resource = {
7279diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7280index a2358b4..7cead4f 100644
7281--- a/arch/mips/sgi-ip27/ip27-nmi.c
7282+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7283@@ -187,9 +187,9 @@ void
7284 cont_nmi_dump(void)
7285 {
7286 #ifndef REAL_NMI_SIGNAL
7287- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7288+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7289
7290- atomic_inc(&nmied_cpus);
7291+ atomic_inc_unchecked(&nmied_cpus);
7292 #endif
7293 /*
7294 * Only allow 1 cpu to proceed
7295@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7296 udelay(10000);
7297 }
7298 #else
7299- while (atomic_read(&nmied_cpus) != num_online_cpus());
7300+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7301 #endif
7302
7303 /*
7304diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7305index a046b30..6799527 100644
7306--- a/arch/mips/sni/rm200.c
7307+++ b/arch/mips/sni/rm200.c
7308@@ -270,7 +270,7 @@ spurious_8259A_irq:
7309 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7310 spurious_irq_mask |= irqmask;
7311 }
7312- atomic_inc(&irq_err_count);
7313+ atomic_inc_unchecked(&irq_err_count);
7314 /*
7315 * Theoretically we do not have to handle this IRQ,
7316 * but in Linux this does not cause problems and is
7317diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7318index 41e873b..34d33a7 100644
7319--- a/arch/mips/vr41xx/common/icu.c
7320+++ b/arch/mips/vr41xx/common/icu.c
7321@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7322
7323 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7324
7325- atomic_inc(&irq_err_count);
7326+ atomic_inc_unchecked(&irq_err_count);
7327
7328 return -1;
7329 }
7330diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7331index ae0e4ee..e8f0692 100644
7332--- a/arch/mips/vr41xx/common/irq.c
7333+++ b/arch/mips/vr41xx/common/irq.c
7334@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7335 irq_cascade_t *cascade;
7336
7337 if (irq >= NR_IRQS) {
7338- atomic_inc(&irq_err_count);
7339+ atomic_inc_unchecked(&irq_err_count);
7340 return;
7341 }
7342
7343@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7344 ret = cascade->get_irq(irq);
7345 irq = ret;
7346 if (ret < 0)
7347- atomic_inc(&irq_err_count);
7348+ atomic_inc_unchecked(&irq_err_count);
7349 else
7350 irq_dispatch(irq);
7351 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7352diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7353index 967d144..db12197 100644
7354--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7355+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7356@@ -11,12 +11,14 @@
7357 #ifndef _ASM_PROC_CACHE_H
7358 #define _ASM_PROC_CACHE_H
7359
7360+#include <linux/const.h>
7361+
7362 /* L1 cache */
7363
7364 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7365 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7366-#define L1_CACHE_BYTES 16 /* bytes per entry */
7367 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7368+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7369 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7370
7371 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7372diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7373index bcb5df2..84fabd2 100644
7374--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7375+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7376@@ -16,13 +16,15 @@
7377 #ifndef _ASM_PROC_CACHE_H
7378 #define _ASM_PROC_CACHE_H
7379
7380+#include <linux/const.h>
7381+
7382 /*
7383 * L1 cache
7384 */
7385 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7386 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7387-#define L1_CACHE_BYTES 32 /* bytes per entry */
7388 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7389+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7390 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7391
7392 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7393diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7394index 4ce7a01..449202a 100644
7395--- a/arch/openrisc/include/asm/cache.h
7396+++ b/arch/openrisc/include/asm/cache.h
7397@@ -19,11 +19,13 @@
7398 #ifndef __ASM_OPENRISC_CACHE_H
7399 #define __ASM_OPENRISC_CACHE_H
7400
7401+#include <linux/const.h>
7402+
7403 /* FIXME: How can we replace these with values from the CPU...
7404 * they shouldn't be hard-coded!
7405 */
7406
7407-#define L1_CACHE_BYTES 16
7408 #define L1_CACHE_SHIFT 4
7409+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7410
7411 #endif /* __ASM_OPENRISC_CACHE_H */
7412diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7413index 472886c..00e7df9 100644
7414--- a/arch/parisc/include/asm/atomic.h
7415+++ b/arch/parisc/include/asm/atomic.h
7416@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7417 return dec;
7418 }
7419
7420+#define atomic64_read_unchecked(v) atomic64_read(v)
7421+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7422+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7423+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7424+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7425+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7426+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7427+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7428+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7429+
7430 #endif /* !CONFIG_64BIT */
7431
7432
7433diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7434index 47f11c7..3420df2 100644
7435--- a/arch/parisc/include/asm/cache.h
7436+++ b/arch/parisc/include/asm/cache.h
7437@@ -5,6 +5,7 @@
7438 #ifndef __ARCH_PARISC_CACHE_H
7439 #define __ARCH_PARISC_CACHE_H
7440
7441+#include <linux/const.h>
7442
7443 /*
7444 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7445@@ -15,13 +16,13 @@
7446 * just ruin performance.
7447 */
7448 #ifdef CONFIG_PA20
7449-#define L1_CACHE_BYTES 64
7450 #define L1_CACHE_SHIFT 6
7451 #else
7452-#define L1_CACHE_BYTES 32
7453 #define L1_CACHE_SHIFT 5
7454 #endif
7455
7456+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7457+
7458 #ifndef __ASSEMBLY__
7459
7460 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7461diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7462index ad2b503..bdf1651 100644
7463--- a/arch/parisc/include/asm/elf.h
7464+++ b/arch/parisc/include/asm/elf.h
7465@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7466
7467 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7468
7469+#ifdef CONFIG_PAX_ASLR
7470+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7471+
7472+#define PAX_DELTA_MMAP_LEN 16
7473+#define PAX_DELTA_STACK_LEN 16
7474+#endif
7475+
7476 /* This yields a mask that user programs can use to figure out what
7477 instruction set this CPU supports. This could be done in user space,
7478 but it's not easy, and we've already done it here. */
7479diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7480index f213f5b..0af3e8e 100644
7481--- a/arch/parisc/include/asm/pgalloc.h
7482+++ b/arch/parisc/include/asm/pgalloc.h
7483@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7484 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7485 }
7486
7487+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7488+{
7489+ pgd_populate(mm, pgd, pmd);
7490+}
7491+
7492 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7493 {
7494 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7495@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7496 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7497 #define pmd_free(mm, x) do { } while (0)
7498 #define pgd_populate(mm, pmd, pte) BUG()
7499+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7500
7501 #endif
7502
7503diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7504index 34899b5..02dd060 100644
7505--- a/arch/parisc/include/asm/pgtable.h
7506+++ b/arch/parisc/include/asm/pgtable.h
7507@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7508 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7509 #define PAGE_COPY PAGE_EXECREAD
7510 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7511+
7512+#ifdef CONFIG_PAX_PAGEEXEC
7513+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7514+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7515+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7516+#else
7517+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7518+# define PAGE_COPY_NOEXEC PAGE_COPY
7519+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7520+#endif
7521+
7522 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7523 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7524 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7525diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7526index 4006964..fcb3cc2 100644
7527--- a/arch/parisc/include/asm/uaccess.h
7528+++ b/arch/parisc/include/asm/uaccess.h
7529@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7530 const void __user *from,
7531 unsigned long n)
7532 {
7533- int sz = __compiletime_object_size(to);
7534+ size_t sz = __compiletime_object_size(to);
7535 int ret = -EFAULT;
7536
7537- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7538+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7539 ret = __copy_from_user(to, from, n);
7540 else
7541 copy_from_user_overflow();
7542diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7543index 50dfafc..b9fc230 100644
7544--- a/arch/parisc/kernel/module.c
7545+++ b/arch/parisc/kernel/module.c
7546@@ -98,16 +98,38 @@
7547
7548 /* three functions to determine where in the module core
7549 * or init pieces the location is */
7550+static inline int in_init_rx(struct module *me, void *loc)
7551+{
7552+ return (loc >= me->module_init_rx &&
7553+ loc < (me->module_init_rx + me->init_size_rx));
7554+}
7555+
7556+static inline int in_init_rw(struct module *me, void *loc)
7557+{
7558+ return (loc >= me->module_init_rw &&
7559+ loc < (me->module_init_rw + me->init_size_rw));
7560+}
7561+
7562 static inline int in_init(struct module *me, void *loc)
7563 {
7564- return (loc >= me->module_init &&
7565- loc <= (me->module_init + me->init_size));
7566+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7567+}
7568+
7569+static inline int in_core_rx(struct module *me, void *loc)
7570+{
7571+ return (loc >= me->module_core_rx &&
7572+ loc < (me->module_core_rx + me->core_size_rx));
7573+}
7574+
7575+static inline int in_core_rw(struct module *me, void *loc)
7576+{
7577+ return (loc >= me->module_core_rw &&
7578+ loc < (me->module_core_rw + me->core_size_rw));
7579 }
7580
7581 static inline int in_core(struct module *me, void *loc)
7582 {
7583- return (loc >= me->module_core &&
7584- loc <= (me->module_core + me->core_size));
7585+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7586 }
7587
7588 static inline int in_local(struct module *me, void *loc)
7589@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7590 }
7591
7592 /* align things a bit */
7593- me->core_size = ALIGN(me->core_size, 16);
7594- me->arch.got_offset = me->core_size;
7595- me->core_size += gots * sizeof(struct got_entry);
7596+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7597+ me->arch.got_offset = me->core_size_rw;
7598+ me->core_size_rw += gots * sizeof(struct got_entry);
7599
7600- me->core_size = ALIGN(me->core_size, 16);
7601- me->arch.fdesc_offset = me->core_size;
7602- me->core_size += fdescs * sizeof(Elf_Fdesc);
7603+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7604+ me->arch.fdesc_offset = me->core_size_rw;
7605+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7606
7607 me->arch.got_max = gots;
7608 me->arch.fdesc_max = fdescs;
7609@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7610
7611 BUG_ON(value == 0);
7612
7613- got = me->module_core + me->arch.got_offset;
7614+ got = me->module_core_rw + me->arch.got_offset;
7615 for (i = 0; got[i].addr; i++)
7616 if (got[i].addr == value)
7617 goto out;
7618@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7619 #ifdef CONFIG_64BIT
7620 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7621 {
7622- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7623+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7624
7625 if (!value) {
7626 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7627@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7628
7629 /* Create new one */
7630 fdesc->addr = value;
7631- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7632+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7633 return (Elf_Addr)fdesc;
7634 }
7635 #endif /* CONFIG_64BIT */
7636@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7637
7638 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7639 end = table + sechdrs[me->arch.unwind_section].sh_size;
7640- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7641+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7642
7643 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7644 me->arch.unwind_section, table, end, gp);
7645diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7646index 0d3a9d4..20a99b0 100644
7647--- a/arch/parisc/kernel/sys_parisc.c
7648+++ b/arch/parisc/kernel/sys_parisc.c
7649@@ -33,9 +33,11 @@
7650 #include <linux/utsname.h>
7651 #include <linux/personality.h>
7652
7653-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7654+static unsigned long get_unshared_area(unsigned long addr, unsigned long len,
7655+ unsigned long flags)
7656 {
7657 struct vm_unmapped_area_info info;
7658+ unsigned long offset = gr_rand_threadstack_offset(current->mm, NULL, flags);
7659
7660 info.flags = 0;
7661 info.length = len;
7662@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7663 info.high_limit = TASK_SIZE;
7664 info.align_mask = 0;
7665 info.align_offset = 0;
7666+ info.threadstack_offset = offset;
7667 return vm_unmapped_area(&info);
7668 }
7669
7670@@ -69,9 +72,10 @@ static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
7671 }
7672
7673 static unsigned long get_shared_area(struct file *filp, unsigned long addr,
7674- unsigned long len, unsigned long pgoff)
7675+ unsigned long len, unsigned long pgoff, unsigned long flags)
7676 {
7677 struct vm_unmapped_area_info info;
7678+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7679
7680 info.flags = 0;
7681 info.length = len;
7682@@ -79,6 +83,7 @@ static unsigned long get_shared_area(struct file *filp, unsigned long addr,
7683 info.high_limit = TASK_SIZE;
7684 info.align_mask = PAGE_MASK & (SHMLBA - 1);
7685 info.align_offset = shared_align_offset(filp, pgoff);
7686+ info.threadstack_offset = offset;
7687 return vm_unmapped_area(&info);
7688 }
7689
7690@@ -93,13 +98,20 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7691 return -EINVAL;
7692 return addr;
7693 }
7694- if (!addr)
7695+ if (!addr) {
7696 addr = TASK_UNMAPPED_BASE;
7697
7698+#ifdef CONFIG_PAX_RANDMMAP
7699+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
7700+ addr += current->mm->delta_mmap;
7701+#endif
7702+
7703+ }
7704+
7705 if (filp || (flags & MAP_SHARED))
7706- addr = get_shared_area(filp, addr, len, pgoff);
7707+ addr = get_shared_area(filp, addr, len, pgoff, flags);
7708 else
7709- addr = get_unshared_area(addr, len);
7710+ addr = get_unshared_area(addr, len, flags);
7711
7712 return addr;
7713 }
7714diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7715index 1cd1d0c..44ec918 100644
7716--- a/arch/parisc/kernel/traps.c
7717+++ b/arch/parisc/kernel/traps.c
7718@@ -722,9 +722,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7719
7720 down_read(&current->mm->mmap_sem);
7721 vma = find_vma(current->mm,regs->iaoq[0]);
7722- if (vma && (regs->iaoq[0] >= vma->vm_start)
7723- && (vma->vm_flags & VM_EXEC)) {
7724-
7725+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7726 fault_address = regs->iaoq[0];
7727 fault_space = regs->iasq[0];
7728
7729diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7730index 9d08c71..e2b4d20 100644
7731--- a/arch/parisc/mm/fault.c
7732+++ b/arch/parisc/mm/fault.c
7733@@ -15,6 +15,7 @@
7734 #include <linux/sched.h>
7735 #include <linux/interrupt.h>
7736 #include <linux/module.h>
7737+#include <linux/unistd.h>
7738
7739 #include <asm/uaccess.h>
7740 #include <asm/traps.h>
7741@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
7742 static unsigned long
7743 parisc_acctyp(unsigned long code, unsigned int inst)
7744 {
7745- if (code == 6 || code == 16)
7746+ if (code == 6 || code == 7 || code == 16)
7747 return VM_EXEC;
7748
7749 switch (inst & 0xf0000000) {
7750@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7751 }
7752 #endif
7753
7754+#ifdef CONFIG_PAX_PAGEEXEC
7755+/*
7756+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7757+ *
7758+ * returns 1 when task should be killed
7759+ * 2 when rt_sigreturn trampoline was detected
7760+ * 3 when unpatched PLT trampoline was detected
7761+ */
7762+static int pax_handle_fetch_fault(struct pt_regs *regs)
7763+{
7764+
7765+#ifdef CONFIG_PAX_EMUPLT
7766+ int err;
7767+
7768+ do { /* PaX: unpatched PLT emulation */
7769+ unsigned int bl, depwi;
7770+
7771+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7772+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7773+
7774+ if (err)
7775+ break;
7776+
7777+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7778+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7779+
7780+ err = get_user(ldw, (unsigned int *)addr);
7781+ err |= get_user(bv, (unsigned int *)(addr+4));
7782+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7783+
7784+ if (err)
7785+ break;
7786+
7787+ if (ldw == 0x0E801096U &&
7788+ bv == 0xEAC0C000U &&
7789+ ldw2 == 0x0E881095U)
7790+ {
7791+ unsigned int resolver, map;
7792+
7793+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7794+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7795+ if (err)
7796+ break;
7797+
7798+ regs->gr[20] = instruction_pointer(regs)+8;
7799+ regs->gr[21] = map;
7800+ regs->gr[22] = resolver;
7801+ regs->iaoq[0] = resolver | 3UL;
7802+ regs->iaoq[1] = regs->iaoq[0] + 4;
7803+ return 3;
7804+ }
7805+ }
7806+ } while (0);
7807+#endif
7808+
7809+#ifdef CONFIG_PAX_EMUTRAMP
7810+
7811+#ifndef CONFIG_PAX_EMUSIGRT
7812+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7813+ return 1;
7814+#endif
7815+
7816+ do { /* PaX: rt_sigreturn emulation */
7817+ unsigned int ldi1, ldi2, bel, nop;
7818+
7819+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7820+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7821+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7822+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7823+
7824+ if (err)
7825+ break;
7826+
7827+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7828+ ldi2 == 0x3414015AU &&
7829+ bel == 0xE4008200U &&
7830+ nop == 0x08000240U)
7831+ {
7832+ regs->gr[25] = (ldi1 & 2) >> 1;
7833+ regs->gr[20] = __NR_rt_sigreturn;
7834+ regs->gr[31] = regs->iaoq[1] + 16;
7835+ regs->sr[0] = regs->iasq[1];
7836+ regs->iaoq[0] = 0x100UL;
7837+ regs->iaoq[1] = regs->iaoq[0] + 4;
7838+ regs->iasq[0] = regs->sr[2];
7839+ regs->iasq[1] = regs->sr[2];
7840+ return 2;
7841+ }
7842+ } while (0);
7843+#endif
7844+
7845+ return 1;
7846+}
7847+
7848+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7849+{
7850+ unsigned long i;
7851+
7852+ printk(KERN_ERR "PAX: bytes at PC: ");
7853+ for (i = 0; i < 5; i++) {
7854+ unsigned int c;
7855+ if (get_user(c, (unsigned int *)pc+i))
7856+ printk(KERN_CONT "???????? ");
7857+ else
7858+ printk(KERN_CONT "%08x ", c);
7859+ }
7860+ printk("\n");
7861+}
7862+#endif
7863+
7864 int fixup_exception(struct pt_regs *regs)
7865 {
7866 const struct exception_table_entry *fix;
7867@@ -210,8 +321,33 @@ retry:
7868
7869 good_area:
7870
7871- if ((vma->vm_flags & acc_type) != acc_type)
7872+ if ((vma->vm_flags & acc_type) != acc_type) {
7873+
7874+#ifdef CONFIG_PAX_PAGEEXEC
7875+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7876+ (address & ~3UL) == instruction_pointer(regs))
7877+ {
7878+ up_read(&mm->mmap_sem);
7879+ switch (pax_handle_fetch_fault(regs)) {
7880+
7881+#ifdef CONFIG_PAX_EMUPLT
7882+ case 3:
7883+ return;
7884+#endif
7885+
7886+#ifdef CONFIG_PAX_EMUTRAMP
7887+ case 2:
7888+ return;
7889+#endif
7890+
7891+ }
7892+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7893+ do_group_exit(SIGKILL);
7894+ }
7895+#endif
7896+
7897 goto bad_area;
7898+ }
7899
7900 /*
7901 * If for any reason at all we couldn't handle the fault, make
7902diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7903index b44b52c..4cd253c 100644
7904--- a/arch/powerpc/Kconfig
7905+++ b/arch/powerpc/Kconfig
7906@@ -382,6 +382,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
7907 config KEXEC
7908 bool "kexec system call"
7909 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7910+ depends on !GRKERNSEC_KMEM
7911 help
7912 kexec is a system call that implements the ability to shutdown your
7913 current kernel, and to start another kernel. It is like a reboot
7914diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7915index e3b1d41..8e81edf 100644
7916--- a/arch/powerpc/include/asm/atomic.h
7917+++ b/arch/powerpc/include/asm/atomic.h
7918@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
7919 return t1;
7920 }
7921
7922+#define atomic64_read_unchecked(v) atomic64_read(v)
7923+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7924+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7925+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7926+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7927+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7928+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7929+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7930+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7931+
7932 #endif /* __powerpc64__ */
7933
7934 #endif /* __KERNEL__ */
7935diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
7936index 9e495c9..b6878e5 100644
7937--- a/arch/powerpc/include/asm/cache.h
7938+++ b/arch/powerpc/include/asm/cache.h
7939@@ -3,6 +3,7 @@
7940
7941 #ifdef __KERNEL__
7942
7943+#include <linux/const.h>
7944
7945 /* bytes per L1 cache line */
7946 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
7947@@ -22,7 +23,7 @@
7948 #define L1_CACHE_SHIFT 7
7949 #endif
7950
7951-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7952+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7953
7954 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7955
7956diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
7957index 935b5e7..7001d2d 100644
7958--- a/arch/powerpc/include/asm/elf.h
7959+++ b/arch/powerpc/include/asm/elf.h
7960@@ -28,8 +28,19 @@
7961 the loader. We need to make sure that it is out of the way of the program
7962 that it will "exec", and that there is sufficient room for the brk. */
7963
7964-extern unsigned long randomize_et_dyn(unsigned long base);
7965-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
7966+#define ELF_ET_DYN_BASE (0x20000000)
7967+
7968+#ifdef CONFIG_PAX_ASLR
7969+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
7970+
7971+#ifdef __powerpc64__
7972+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
7973+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
7974+#else
7975+#define PAX_DELTA_MMAP_LEN 15
7976+#define PAX_DELTA_STACK_LEN 15
7977+#endif
7978+#endif
7979
7980 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
7981
7982@@ -127,10 +138,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7983 (0x7ff >> (PAGE_SHIFT - 12)) : \
7984 (0x3ffff >> (PAGE_SHIFT - 12)))
7985
7986-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7987-#define arch_randomize_brk arch_randomize_brk
7988-
7989-
7990 #ifdef CONFIG_SPU_BASE
7991 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
7992 #define NT_SPU 1
7993diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
7994index 8196e9c..d83a9f3 100644
7995--- a/arch/powerpc/include/asm/exec.h
7996+++ b/arch/powerpc/include/asm/exec.h
7997@@ -4,6 +4,6 @@
7998 #ifndef _ASM_POWERPC_EXEC_H
7999 #define _ASM_POWERPC_EXEC_H
8000
8001-extern unsigned long arch_align_stack(unsigned long sp);
8002+#define arch_align_stack(x) ((x) & ~0xfUL)
8003
8004 #endif /* _ASM_POWERPC_EXEC_H */
8005diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8006index 5acabbd..7ea14fa 100644
8007--- a/arch/powerpc/include/asm/kmap_types.h
8008+++ b/arch/powerpc/include/asm/kmap_types.h
8009@@ -10,7 +10,7 @@
8010 * 2 of the License, or (at your option) any later version.
8011 */
8012
8013-#define KM_TYPE_NR 16
8014+#define KM_TYPE_NR 17
8015
8016 #endif /* __KERNEL__ */
8017 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8018diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8019index b8da913..60b608a 100644
8020--- a/arch/powerpc/include/asm/local.h
8021+++ b/arch/powerpc/include/asm/local.h
8022@@ -9,15 +9,26 @@ typedef struct
8023 atomic_long_t a;
8024 } local_t;
8025
8026+typedef struct
8027+{
8028+ atomic_long_unchecked_t a;
8029+} local_unchecked_t;
8030+
8031 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8032
8033 #define local_read(l) atomic_long_read(&(l)->a)
8034+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8035 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8036+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8037
8038 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8039+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8040 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8041+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8042 #define local_inc(l) atomic_long_inc(&(l)->a)
8043+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8044 #define local_dec(l) atomic_long_dec(&(l)->a)
8045+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8046
8047 static __inline__ long local_add_return(long a, local_t *l)
8048 {
8049@@ -35,6 +46,7 @@ static __inline__ long local_add_return(long a, local_t *l)
8050
8051 return t;
8052 }
8053+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
8054
8055 #define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
8056
8057@@ -54,6 +66,7 @@ static __inline__ long local_sub_return(long a, local_t *l)
8058
8059 return t;
8060 }
8061+#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a))
8062
8063 static __inline__ long local_inc_return(local_t *l)
8064 {
8065@@ -101,6 +114,8 @@ static __inline__ long local_dec_return(local_t *l)
8066
8067 #define local_cmpxchg(l, o, n) \
8068 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8069+#define local_cmpxchg_unchecked(l, o, n) \
8070+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8071 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8072
8073 /**
8074diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8075index 8565c25..2865190 100644
8076--- a/arch/powerpc/include/asm/mman.h
8077+++ b/arch/powerpc/include/asm/mman.h
8078@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8079 }
8080 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8081
8082-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8083+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8084 {
8085 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8086 }
8087diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8088index 32e4e21..62afb12 100644
8089--- a/arch/powerpc/include/asm/page.h
8090+++ b/arch/powerpc/include/asm/page.h
8091@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
8092 * and needs to be executable. This means the whole heap ends
8093 * up being executable.
8094 */
8095-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8096- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8097+#define VM_DATA_DEFAULT_FLAGS32 \
8098+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8099+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8100
8101 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8102 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8103@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
8104 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8105 #endif
8106
8107+#define ktla_ktva(addr) (addr)
8108+#define ktva_ktla(addr) (addr)
8109+
8110 #ifndef CONFIG_PPC_BOOK3S_64
8111 /*
8112 * Use the top bit of the higher-level page table entries to indicate whether
8113diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8114index 88693ce..ac6f9ab 100644
8115--- a/arch/powerpc/include/asm/page_64.h
8116+++ b/arch/powerpc/include/asm/page_64.h
8117@@ -153,15 +153,18 @@ do { \
8118 * stack by default, so in the absence of a PT_GNU_STACK program header
8119 * we turn execute permission off.
8120 */
8121-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8122- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8123+#define VM_STACK_DEFAULT_FLAGS32 \
8124+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8125+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8126
8127 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8128 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8129
8130+#ifndef CONFIG_PAX_PAGEEXEC
8131 #define VM_STACK_DEFAULT_FLAGS \
8132 (is_32bit_task() ? \
8133 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8134+#endif
8135
8136 #include <asm-generic/getorder.h>
8137
8138diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8139index 4b0be20..c15a27d 100644
8140--- a/arch/powerpc/include/asm/pgalloc-64.h
8141+++ b/arch/powerpc/include/asm/pgalloc-64.h
8142@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8143 #ifndef CONFIG_PPC_64K_PAGES
8144
8145 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8146+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8147
8148 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8149 {
8150@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8151 pud_set(pud, (unsigned long)pmd);
8152 }
8153
8154+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8155+{
8156+ pud_populate(mm, pud, pmd);
8157+}
8158+
8159 #define pmd_populate(mm, pmd, pte_page) \
8160 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8161 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8162@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8163 #endif
8164
8165 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8166+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8167
8168 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8169 pte_t *pte)
8170diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8171index 7d6eacf..14c0240 100644
8172--- a/arch/powerpc/include/asm/pgtable.h
8173+++ b/arch/powerpc/include/asm/pgtable.h
8174@@ -2,6 +2,7 @@
8175 #define _ASM_POWERPC_PGTABLE_H
8176 #ifdef __KERNEL__
8177
8178+#include <linux/const.h>
8179 #ifndef __ASSEMBLY__
8180 #include <asm/processor.h> /* For TASK_SIZE */
8181 #include <asm/mmu.h>
8182diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8183index 4aad413..85d86bf 100644
8184--- a/arch/powerpc/include/asm/pte-hash32.h
8185+++ b/arch/powerpc/include/asm/pte-hash32.h
8186@@ -21,6 +21,7 @@
8187 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8188 #define _PAGE_USER 0x004 /* usermode access allowed */
8189 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8190+#define _PAGE_EXEC _PAGE_GUARDED
8191 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8192 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8193 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8194diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8195index fa8388e..f985549 100644
8196--- a/arch/powerpc/include/asm/reg.h
8197+++ b/arch/powerpc/include/asm/reg.h
8198@@ -239,6 +239,7 @@
8199 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8200 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8201 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8202+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8203 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8204 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8205 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8206diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8207index 084e080..9415a3d 100644
8208--- a/arch/powerpc/include/asm/smp.h
8209+++ b/arch/powerpc/include/asm/smp.h
8210@@ -51,7 +51,7 @@ struct smp_ops_t {
8211 int (*cpu_disable)(void);
8212 void (*cpu_die)(unsigned int nr);
8213 int (*cpu_bootable)(unsigned int nr);
8214-};
8215+} __no_const;
8216
8217 extern void smp_send_debugger_break(void);
8218 extern void start_secondary_resume(void);
8219diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8220index 9854c56..7517190 100644
8221--- a/arch/powerpc/include/asm/thread_info.h
8222+++ b/arch/powerpc/include/asm/thread_info.h
8223@@ -91,7 +91,6 @@ static inline struct thread_info *current_thread_info(void)
8224 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
8225 TIF_NEED_RESCHED */
8226 #define TIF_32BIT 4 /* 32 bit binary */
8227-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
8228 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
8229 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
8230 #define TIF_SINGLESTEP 8 /* singlestepping active */
8231@@ -108,6 +107,9 @@ static inline struct thread_info *current_thread_info(void)
8232 #if defined(CONFIG_PPC64)
8233 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8234 #endif
8235+#define TIF_PERFMON_WORK 19 /* work for pfm_handle_work() */
8236+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8237+#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
8238
8239 /* as above, but as bit values */
8240 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8241@@ -127,9 +129,10 @@ static inline struct thread_info *current_thread_info(void)
8242 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8243 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8244 #define _TIF_NOHZ (1<<TIF_NOHZ)
8245+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8246 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8247 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8248- _TIF_NOHZ)
8249+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8250
8251 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8252 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
8253diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8254index 9485b43..3bd3c16 100644
8255--- a/arch/powerpc/include/asm/uaccess.h
8256+++ b/arch/powerpc/include/asm/uaccess.h
8257@@ -58,6 +58,7 @@
8258
8259 #endif
8260
8261+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8262 #define access_ok(type, addr, size) \
8263 (__chk_user_ptr(addr), \
8264 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8265@@ -318,52 +319,6 @@ do { \
8266 extern unsigned long __copy_tofrom_user(void __user *to,
8267 const void __user *from, unsigned long size);
8268
8269-#ifndef __powerpc64__
8270-
8271-static inline unsigned long copy_from_user(void *to,
8272- const void __user *from, unsigned long n)
8273-{
8274- unsigned long over;
8275-
8276- if (access_ok(VERIFY_READ, from, n))
8277- return __copy_tofrom_user((__force void __user *)to, from, n);
8278- if ((unsigned long)from < TASK_SIZE) {
8279- over = (unsigned long)from + n - TASK_SIZE;
8280- return __copy_tofrom_user((__force void __user *)to, from,
8281- n - over) + over;
8282- }
8283- return n;
8284-}
8285-
8286-static inline unsigned long copy_to_user(void __user *to,
8287- const void *from, unsigned long n)
8288-{
8289- unsigned long over;
8290-
8291- if (access_ok(VERIFY_WRITE, to, n))
8292- return __copy_tofrom_user(to, (__force void __user *)from, n);
8293- if ((unsigned long)to < TASK_SIZE) {
8294- over = (unsigned long)to + n - TASK_SIZE;
8295- return __copy_tofrom_user(to, (__force void __user *)from,
8296- n - over) + over;
8297- }
8298- return n;
8299-}
8300-
8301-#else /* __powerpc64__ */
8302-
8303-#define __copy_in_user(to, from, size) \
8304- __copy_tofrom_user((to), (from), (size))
8305-
8306-extern unsigned long copy_from_user(void *to, const void __user *from,
8307- unsigned long n);
8308-extern unsigned long copy_to_user(void __user *to, const void *from,
8309- unsigned long n);
8310-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8311- unsigned long n);
8312-
8313-#endif /* __powerpc64__ */
8314-
8315 static inline unsigned long __copy_from_user_inatomic(void *to,
8316 const void __user *from, unsigned long n)
8317 {
8318@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8319 if (ret == 0)
8320 return 0;
8321 }
8322+
8323+ if (!__builtin_constant_p(n))
8324+ check_object_size(to, n, false);
8325+
8326 return __copy_tofrom_user((__force void __user *)to, from, n);
8327 }
8328
8329@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8330 if (ret == 0)
8331 return 0;
8332 }
8333+
8334+ if (!__builtin_constant_p(n))
8335+ check_object_size(from, n, true);
8336+
8337 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8338 }
8339
8340@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8341 return __copy_to_user_inatomic(to, from, size);
8342 }
8343
8344+#ifndef __powerpc64__
8345+
8346+static inline unsigned long __must_check copy_from_user(void *to,
8347+ const void __user *from, unsigned long n)
8348+{
8349+ unsigned long over;
8350+
8351+ if ((long)n < 0)
8352+ return n;
8353+
8354+ if (access_ok(VERIFY_READ, from, n)) {
8355+ if (!__builtin_constant_p(n))
8356+ check_object_size(to, n, false);
8357+ return __copy_tofrom_user((__force void __user *)to, from, n);
8358+ }
8359+ if ((unsigned long)from < TASK_SIZE) {
8360+ over = (unsigned long)from + n - TASK_SIZE;
8361+ if (!__builtin_constant_p(n - over))
8362+ check_object_size(to, n - over, false);
8363+ return __copy_tofrom_user((__force void __user *)to, from,
8364+ n - over) + over;
8365+ }
8366+ return n;
8367+}
8368+
8369+static inline unsigned long __must_check copy_to_user(void __user *to,
8370+ const void *from, unsigned long n)
8371+{
8372+ unsigned long over;
8373+
8374+ if ((long)n < 0)
8375+ return n;
8376+
8377+ if (access_ok(VERIFY_WRITE, to, n)) {
8378+ if (!__builtin_constant_p(n))
8379+ check_object_size(from, n, true);
8380+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8381+ }
8382+ if ((unsigned long)to < TASK_SIZE) {
8383+ over = (unsigned long)to + n - TASK_SIZE;
8384+ if (!__builtin_constant_p(n))
8385+ check_object_size(from, n - over, true);
8386+ return __copy_tofrom_user(to, (__force void __user *)from,
8387+ n - over) + over;
8388+ }
8389+ return n;
8390+}
8391+
8392+#else /* __powerpc64__ */
8393+
8394+#define __copy_in_user(to, from, size) \
8395+ __copy_tofrom_user((to), (from), (size))
8396+
8397+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8398+{
8399+ if ((long)n < 0 || n > INT_MAX)
8400+ return n;
8401+
8402+ if (!__builtin_constant_p(n))
8403+ check_object_size(to, n, false);
8404+
8405+ if (likely(access_ok(VERIFY_READ, from, n)))
8406+ n = __copy_from_user(to, from, n);
8407+ else
8408+ memset(to, 0, n);
8409+ return n;
8410+}
8411+
8412+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8413+{
8414+ if ((long)n < 0 || n > INT_MAX)
8415+ return n;
8416+
8417+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8418+ if (!__builtin_constant_p(n))
8419+ check_object_size(from, n, true);
8420+ n = __copy_to_user(to, from, n);
8421+ }
8422+ return n;
8423+}
8424+
8425+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8426+ unsigned long n);
8427+
8428+#endif /* __powerpc64__ */
8429+
8430 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8431
8432 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8433diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8434index 445cb6e..4f80c5d 100644
8435--- a/arch/powerpc/kernel/Makefile
8436+++ b/arch/powerpc/kernel/Makefile
8437@@ -26,6 +26,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8438 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8439 endif
8440
8441+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8442+
8443 obj-y := cputable.o ptrace.o syscalls.o \
8444 irq.o align.o signal_32.o pmc.o vdso.o \
8445 process.o systbl.o idle.o \
8446diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8447index e775156..af2d1c0 100644
8448--- a/arch/powerpc/kernel/exceptions-64e.S
8449+++ b/arch/powerpc/kernel/exceptions-64e.S
8450@@ -759,6 +759,7 @@ storage_fault_common:
8451 std r14,_DAR(r1)
8452 std r15,_DSISR(r1)
8453 addi r3,r1,STACK_FRAME_OVERHEAD
8454+ bl .save_nvgprs
8455 mr r4,r14
8456 mr r5,r15
8457 ld r14,PACA_EXGEN+EX_R14(r13)
8458@@ -767,8 +768,7 @@ storage_fault_common:
8459 cmpdi r3,0
8460 bne- 1f
8461 b .ret_from_except_lite
8462-1: bl .save_nvgprs
8463- mr r5,r3
8464+1: mr r5,r3
8465 addi r3,r1,STACK_FRAME_OVERHEAD
8466 ld r4,_DAR(r1)
8467 bl .bad_page_fault
8468diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8469index 9f905e4..1d6b3d2 100644
8470--- a/arch/powerpc/kernel/exceptions-64s.S
8471+++ b/arch/powerpc/kernel/exceptions-64s.S
8472@@ -1390,10 +1390,10 @@ handle_page_fault:
8473 11: ld r4,_DAR(r1)
8474 ld r5,_DSISR(r1)
8475 addi r3,r1,STACK_FRAME_OVERHEAD
8476+ bl .save_nvgprs
8477 bl .do_page_fault
8478 cmpdi r3,0
8479 beq+ 12f
8480- bl .save_nvgprs
8481 mr r5,r3
8482 addi r3,r1,STACK_FRAME_OVERHEAD
8483 lwz r4,_DAR(r1)
8484diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8485index 6cff040..74ac5d1 100644
8486--- a/arch/powerpc/kernel/module_32.c
8487+++ b/arch/powerpc/kernel/module_32.c
8488@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8489 me->arch.core_plt_section = i;
8490 }
8491 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8492- printk("Module doesn't contain .plt or .init.plt sections.\n");
8493+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
8494 return -ENOEXEC;
8495 }
8496
8497@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
8498
8499 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8500 /* Init, or core PLT? */
8501- if (location >= mod->module_core
8502- && location < mod->module_core + mod->core_size)
8503+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8504+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8505 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8506- else
8507+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8508+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8509 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8510+ else {
8511+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8512+ return ~0UL;
8513+ }
8514
8515 /* Find this entry, or if that fails, the next avail. entry */
8516 while (entry->jump[0]) {
8517@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8518 }
8519 #ifdef CONFIG_DYNAMIC_FTRACE
8520 module->arch.tramp =
8521- do_plt_call(module->module_core,
8522+ do_plt_call(module->module_core_rx,
8523 (unsigned long)ftrace_caller,
8524 sechdrs, module);
8525 #endif
8526diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8527index ea2f6a3..dbb2be3 100644
8528--- a/arch/powerpc/kernel/process.c
8529+++ b/arch/powerpc/kernel/process.c
8530@@ -888,8 +888,8 @@ void show_regs(struct pt_regs * regs)
8531 * Lookup NIP late so we have the best change of getting the
8532 * above info out without failing
8533 */
8534- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8535- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8536+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8537+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8538 #endif
8539 show_stack(current, (unsigned long *) regs->gpr[1]);
8540 if (!user_mode(regs))
8541@@ -1385,10 +1385,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8542 newsp = stack[0];
8543 ip = stack[STACK_FRAME_LR_SAVE];
8544 if (!firstframe || ip != lr) {
8545- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8546+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8547 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8548 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
8549- printk(" (%pS)",
8550+ printk(" (%pA)",
8551 (void *)current->ret_stack[curr_frame].ret);
8552 curr_frame--;
8553 }
8554@@ -1408,7 +1408,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8555 struct pt_regs *regs = (struct pt_regs *)
8556 (sp + STACK_FRAME_OVERHEAD);
8557 lr = regs->link;
8558- printk("--- Exception: %lx at %pS\n LR = %pS\n",
8559+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
8560 regs->trap, (void *)regs->nip, (void *)lr);
8561 firstframe = 1;
8562 }
8563@@ -1444,58 +1444,3 @@ void notrace __ppc64_runlatch_off(void)
8564 mtspr(SPRN_CTRLT, ctrl);
8565 }
8566 #endif /* CONFIG_PPC64 */
8567-
8568-unsigned long arch_align_stack(unsigned long sp)
8569-{
8570- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8571- sp -= get_random_int() & ~PAGE_MASK;
8572- return sp & ~0xf;
8573-}
8574-
8575-static inline unsigned long brk_rnd(void)
8576-{
8577- unsigned long rnd = 0;
8578-
8579- /* 8MB for 32bit, 1GB for 64bit */
8580- if (is_32bit_task())
8581- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8582- else
8583- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8584-
8585- return rnd << PAGE_SHIFT;
8586-}
8587-
8588-unsigned long arch_randomize_brk(struct mm_struct *mm)
8589-{
8590- unsigned long base = mm->brk;
8591- unsigned long ret;
8592-
8593-#ifdef CONFIG_PPC_STD_MMU_64
8594- /*
8595- * If we are using 1TB segments and we are allowed to randomise
8596- * the heap, we can put it above 1TB so it is backed by a 1TB
8597- * segment. Otherwise the heap will be in the bottom 1TB
8598- * which always uses 256MB segments and this may result in a
8599- * performance penalty.
8600- */
8601- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8602- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8603-#endif
8604-
8605- ret = PAGE_ALIGN(base + brk_rnd());
8606-
8607- if (ret < mm->brk)
8608- return mm->brk;
8609-
8610- return ret;
8611-}
8612-
8613-unsigned long randomize_et_dyn(unsigned long base)
8614-{
8615- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8616-
8617- if (ret < base)
8618- return base;
8619-
8620- return ret;
8621-}
8622diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
8623index 2e3d2bf..35df241 100644
8624--- a/arch/powerpc/kernel/ptrace.c
8625+++ b/arch/powerpc/kernel/ptrace.c
8626@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
8627 return ret;
8628 }
8629
8630+#ifdef CONFIG_GRKERNSEC_SETXID
8631+extern void gr_delayed_cred_worker(void);
8632+#endif
8633+
8634 /*
8635 * We must return the syscall number to actually look up in the table.
8636 * This can be -1L to skip running any syscall at all.
8637@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
8638
8639 secure_computing_strict(regs->gpr[0]);
8640
8641+#ifdef CONFIG_GRKERNSEC_SETXID
8642+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8643+ gr_delayed_cred_worker();
8644+#endif
8645+
8646 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
8647 tracehook_report_syscall_entry(regs))
8648 /*
8649@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
8650 {
8651 int step;
8652
8653+#ifdef CONFIG_GRKERNSEC_SETXID
8654+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8655+ gr_delayed_cred_worker();
8656+#endif
8657+
8658 audit_syscall_exit(regs);
8659
8660 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8661diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
8662index 68027bf..b26fd31 100644
8663--- a/arch/powerpc/kernel/signal_32.c
8664+++ b/arch/powerpc/kernel/signal_32.c
8665@@ -1004,7 +1004,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
8666 /* Save user registers on the stack */
8667 frame = &rt_sf->uc.uc_mcontext;
8668 addr = frame;
8669- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
8670+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8671 sigret = 0;
8672 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
8673 } else {
8674diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
8675index 448245f..b9bae83 100644
8676--- a/arch/powerpc/kernel/signal_64.c
8677+++ b/arch/powerpc/kernel/signal_64.c
8678@@ -758,7 +758,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
8679 #endif
8680
8681 /* Set up to return from userspace. */
8682- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
8683+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8684 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
8685 } else {
8686 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
8687diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
8688index 907a472..4ba206f 100644
8689--- a/arch/powerpc/kernel/traps.c
8690+++ b/arch/powerpc/kernel/traps.c
8691@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
8692 return flags;
8693 }
8694
8695+extern void gr_handle_kernel_exploit(void);
8696+
8697 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8698 int signr)
8699 {
8700@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8701 panic("Fatal exception in interrupt");
8702 if (panic_on_oops)
8703 panic("Fatal exception");
8704+
8705+ gr_handle_kernel_exploit();
8706+
8707 do_exit(signr);
8708 }
8709
8710diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
8711index 094e45c..d82b848 100644
8712--- a/arch/powerpc/kernel/vdso.c
8713+++ b/arch/powerpc/kernel/vdso.c
8714@@ -35,6 +35,7 @@
8715 #include <asm/vdso.h>
8716 #include <asm/vdso_datapage.h>
8717 #include <asm/setup.h>
8718+#include <asm/mman.h>
8719
8720 #undef DEBUG
8721
8722@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8723 vdso_base = VDSO32_MBASE;
8724 #endif
8725
8726- current->mm->context.vdso_base = 0;
8727+ current->mm->context.vdso_base = ~0UL;
8728
8729 /* vDSO has a problem and was disabled, just don't "enable" it for the
8730 * process
8731@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8732 vdso_base = get_unmapped_area(NULL, vdso_base,
8733 (vdso_pages << PAGE_SHIFT) +
8734 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
8735- 0, 0);
8736+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
8737 if (IS_ERR_VALUE(vdso_base)) {
8738 rc = vdso_base;
8739 goto fail_mmapsem;
8740diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
8741index 9ae9768..87c3448 100644
8742--- a/arch/powerpc/kvm/powerpc.c
8743+++ b/arch/powerpc/kvm/powerpc.c
8744@@ -1141,7 +1141,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
8745 }
8746 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
8747
8748-int kvm_arch_init(void *opaque)
8749+int kvm_arch_init(const void *opaque)
8750 {
8751 return 0;
8752 }
8753diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
8754index 5eea6f3..5d10396 100644
8755--- a/arch/powerpc/lib/usercopy_64.c
8756+++ b/arch/powerpc/lib/usercopy_64.c
8757@@ -9,22 +9,6 @@
8758 #include <linux/module.h>
8759 #include <asm/uaccess.h>
8760
8761-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8762-{
8763- if (likely(access_ok(VERIFY_READ, from, n)))
8764- n = __copy_from_user(to, from, n);
8765- else
8766- memset(to, 0, n);
8767- return n;
8768-}
8769-
8770-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8771-{
8772- if (likely(access_ok(VERIFY_WRITE, to, n)))
8773- n = __copy_to_user(to, from, n);
8774- return n;
8775-}
8776-
8777 unsigned long copy_in_user(void __user *to, const void __user *from,
8778 unsigned long n)
8779 {
8780@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
8781 return n;
8782 }
8783
8784-EXPORT_SYMBOL(copy_from_user);
8785-EXPORT_SYMBOL(copy_to_user);
8786 EXPORT_SYMBOL(copy_in_user);
8787
8788diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
8789index 51ab9e7..7d3c78b 100644
8790--- a/arch/powerpc/mm/fault.c
8791+++ b/arch/powerpc/mm/fault.c
8792@@ -33,6 +33,10 @@
8793 #include <linux/magic.h>
8794 #include <linux/ratelimit.h>
8795 #include <linux/context_tracking.h>
8796+#include <linux/slab.h>
8797+#include <linux/pagemap.h>
8798+#include <linux/compiler.h>
8799+#include <linux/unistd.h>
8800
8801 #include <asm/firmware.h>
8802 #include <asm/page.h>
8803@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
8804 }
8805 #endif
8806
8807+#ifdef CONFIG_PAX_PAGEEXEC
8808+/*
8809+ * PaX: decide what to do with offenders (regs->nip = fault address)
8810+ *
8811+ * returns 1 when task should be killed
8812+ */
8813+static int pax_handle_fetch_fault(struct pt_regs *regs)
8814+{
8815+ return 1;
8816+}
8817+
8818+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8819+{
8820+ unsigned long i;
8821+
8822+ printk(KERN_ERR "PAX: bytes at PC: ");
8823+ for (i = 0; i < 5; i++) {
8824+ unsigned int c;
8825+ if (get_user(c, (unsigned int __user *)pc+i))
8826+ printk(KERN_CONT "???????? ");
8827+ else
8828+ printk(KERN_CONT "%08x ", c);
8829+ }
8830+ printk("\n");
8831+}
8832+#endif
8833+
8834 /*
8835 * Check whether the instruction at regs->nip is a store using
8836 * an update addressing form which will update r1.
8837@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
8838 * indicate errors in DSISR but can validly be set in SRR1.
8839 */
8840 if (trap == 0x400)
8841- error_code &= 0x48200000;
8842+ error_code &= 0x58200000;
8843 else
8844 is_write = error_code & DSISR_ISSTORE;
8845 #else
8846@@ -378,7 +409,7 @@ good_area:
8847 * "undefined". Of those that can be set, this is the only
8848 * one which seems bad.
8849 */
8850- if (error_code & 0x10000000)
8851+ if (error_code & DSISR_GUARDED)
8852 /* Guarded storage error. */
8853 goto bad_area;
8854 #endif /* CONFIG_8xx */
8855@@ -393,7 +424,7 @@ good_area:
8856 * processors use the same I/D cache coherency mechanism
8857 * as embedded.
8858 */
8859- if (error_code & DSISR_PROTFAULT)
8860+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
8861 goto bad_area;
8862 #endif /* CONFIG_PPC_STD_MMU */
8863
8864@@ -483,6 +514,23 @@ bad_area:
8865 bad_area_nosemaphore:
8866 /* User mode accesses cause a SIGSEGV */
8867 if (user_mode(regs)) {
8868+
8869+#ifdef CONFIG_PAX_PAGEEXEC
8870+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
8871+#ifdef CONFIG_PPC_STD_MMU
8872+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
8873+#else
8874+ if (is_exec && regs->nip == address) {
8875+#endif
8876+ switch (pax_handle_fetch_fault(regs)) {
8877+ }
8878+
8879+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
8880+ do_group_exit(SIGKILL);
8881+ }
8882+ }
8883+#endif
8884+
8885 _exception(SIGSEGV, regs, code, address);
8886 goto bail;
8887 }
8888diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
8889index cb8bdbe..cde4bc7 100644
8890--- a/arch/powerpc/mm/mmap.c
8891+++ b/arch/powerpc/mm/mmap.c
8892@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
8893 return sysctl_legacy_va_layout;
8894 }
8895
8896-static unsigned long mmap_rnd(void)
8897+static unsigned long mmap_rnd(struct mm_struct *mm)
8898 {
8899 unsigned long rnd = 0;
8900
8901+#ifdef CONFIG_PAX_RANDMMAP
8902+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8903+#endif
8904+
8905 if (current->flags & PF_RANDOMIZE) {
8906 /* 8MB for 32bit, 1GB for 64bit */
8907 if (is_32bit_task())
8908@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
8909 return rnd << PAGE_SHIFT;
8910 }
8911
8912-static inline unsigned long mmap_base(void)
8913+static inline unsigned long mmap_base(struct mm_struct *mm)
8914 {
8915 unsigned long gap = rlimit(RLIMIT_STACK);
8916
8917@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
8918 else if (gap > MAX_GAP)
8919 gap = MAX_GAP;
8920
8921- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
8922+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
8923 }
8924
8925 /*
8926@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8927 */
8928 if (mmap_is_legacy()) {
8929 mm->mmap_base = TASK_UNMAPPED_BASE;
8930+
8931+#ifdef CONFIG_PAX_RANDMMAP
8932+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8933+ mm->mmap_base += mm->delta_mmap;
8934+#endif
8935+
8936 mm->get_unmapped_area = arch_get_unmapped_area;
8937 } else {
8938- mm->mmap_base = mmap_base();
8939+ mm->mmap_base = mmap_base(mm);
8940+
8941+#ifdef CONFIG_PAX_RANDMMAP
8942+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8943+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8944+#endif
8945+
8946 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8947 }
8948 }
8949diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
8950index 7ce9cf3..a964087 100644
8951--- a/arch/powerpc/mm/slice.c
8952+++ b/arch/powerpc/mm/slice.c
8953@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
8954 if ((mm->task_size - len) < addr)
8955 return 0;
8956 vma = find_vma(mm, addr);
8957- return (!vma || (addr + len) <= vma->vm_start);
8958+ return check_heap_stack_gap(vma, addr, len, 0);
8959 }
8960
8961 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
8962@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
8963 info.align_offset = 0;
8964
8965 addr = TASK_UNMAPPED_BASE;
8966+
8967+#ifdef CONFIG_PAX_RANDMMAP
8968+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8969+ addr += mm->delta_mmap;
8970+#endif
8971+
8972 while (addr < TASK_SIZE) {
8973 info.low_limit = addr;
8974 if (!slice_scan_available(addr, available, 1, &addr))
8975@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
8976 if (fixed && addr > (mm->task_size - len))
8977 return -EINVAL;
8978
8979+#ifdef CONFIG_PAX_RANDMMAP
8980+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
8981+ addr = 0;
8982+#endif
8983+
8984 /* If hint, make sure it matches our alignment restrictions */
8985 if (!fixed && addr) {
8986 addr = _ALIGN_UP(addr, 1ul << pshift);
8987diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
8988index 4278acf..67fd0e6 100644
8989--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
8990+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
8991@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
8992 }
8993
8994 static struct pci_ops scc_pciex_pci_ops = {
8995- scc_pciex_read_config,
8996- scc_pciex_write_config,
8997+ .read = scc_pciex_read_config,
8998+ .write = scc_pciex_write_config,
8999 };
9000
9001 static void pciex_clear_intr_all(unsigned int __iomem *base)
9002diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9003index 9098692..3d54cd1 100644
9004--- a/arch/powerpc/platforms/cell/spufs/file.c
9005+++ b/arch/powerpc/platforms/cell/spufs/file.c
9006@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9007 return VM_FAULT_NOPAGE;
9008 }
9009
9010-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9011+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9012 unsigned long address,
9013- void *buf, int len, int write)
9014+ void *buf, size_t len, int write)
9015 {
9016 struct spu_context *ctx = vma->vm_file->private_data;
9017 unsigned long offset = address - vma->vm_start;
9018diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9019index fa9aaf7..3f5d836 100644
9020--- a/arch/s390/include/asm/atomic.h
9021+++ b/arch/s390/include/asm/atomic.h
9022@@ -398,6 +398,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9023 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9024 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9025
9026+#define atomic64_read_unchecked(v) atomic64_read(v)
9027+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9028+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9029+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9030+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9031+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9032+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9033+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9034+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9035+
9036 #define smp_mb__before_atomic_dec() smp_mb()
9037 #define smp_mb__after_atomic_dec() smp_mb()
9038 #define smp_mb__before_atomic_inc() smp_mb()
9039diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9040index 4d7ccac..d03d0ad 100644
9041--- a/arch/s390/include/asm/cache.h
9042+++ b/arch/s390/include/asm/cache.h
9043@@ -9,8 +9,10 @@
9044 #ifndef __ARCH_S390_CACHE_H
9045 #define __ARCH_S390_CACHE_H
9046
9047-#define L1_CACHE_BYTES 256
9048+#include <linux/const.h>
9049+
9050 #define L1_CACHE_SHIFT 8
9051+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9052 #define NET_SKB_PAD 32
9053
9054 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9055diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9056index 78f4f87..598ce39 100644
9057--- a/arch/s390/include/asm/elf.h
9058+++ b/arch/s390/include/asm/elf.h
9059@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
9060 the loader. We need to make sure that it is out of the way of the program
9061 that it will "exec", and that there is sufficient room for the brk. */
9062
9063-extern unsigned long randomize_et_dyn(unsigned long base);
9064-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9065+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9066+
9067+#ifdef CONFIG_PAX_ASLR
9068+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9069+
9070+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9071+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9072+#endif
9073
9074 /* This yields a mask that user programs can use to figure out what
9075 instruction set this CPU supports. */
9076@@ -222,9 +228,6 @@ struct linux_binprm;
9077 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9078 int arch_setup_additional_pages(struct linux_binprm *, int);
9079
9080-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9081-#define arch_randomize_brk arch_randomize_brk
9082-
9083 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
9084
9085 #endif
9086diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9087index c4a93d6..4d2a9b4 100644
9088--- a/arch/s390/include/asm/exec.h
9089+++ b/arch/s390/include/asm/exec.h
9090@@ -7,6 +7,6 @@
9091 #ifndef __ASM_EXEC_H
9092 #define __ASM_EXEC_H
9093
9094-extern unsigned long arch_align_stack(unsigned long sp);
9095+#define arch_align_stack(x) ((x) & ~0xfUL)
9096
9097 #endif /* __ASM_EXEC_H */
9098diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9099index 79330af..254cf37 100644
9100--- a/arch/s390/include/asm/uaccess.h
9101+++ b/arch/s390/include/asm/uaccess.h
9102@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9103 __range_ok((unsigned long)(addr), (size)); \
9104 })
9105
9106+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9107 #define access_ok(type, addr, size) __access_ok(addr, size)
9108
9109 /*
9110@@ -245,6 +246,10 @@ static inline unsigned long __must_check
9111 copy_to_user(void __user *to, const void *from, unsigned long n)
9112 {
9113 might_fault();
9114+
9115+ if ((long)n < 0)
9116+ return n;
9117+
9118 return __copy_to_user(to, from, n);
9119 }
9120
9121@@ -268,6 +273,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
9122 static inline unsigned long __must_check
9123 __copy_from_user(void *to, const void __user *from, unsigned long n)
9124 {
9125+ if ((long)n < 0)
9126+ return n;
9127+
9128 return uaccess.copy_from_user(n, from, to);
9129 }
9130
9131@@ -296,10 +304,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9132 static inline unsigned long __must_check
9133 copy_from_user(void *to, const void __user *from, unsigned long n)
9134 {
9135- unsigned int sz = __compiletime_object_size(to);
9136+ size_t sz = __compiletime_object_size(to);
9137
9138 might_fault();
9139- if (unlikely(sz != -1 && sz < n)) {
9140+
9141+ if ((long)n < 0)
9142+ return n;
9143+
9144+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9145 copy_from_user_overflow();
9146 return n;
9147 }
9148diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9149index b89b591..fd9609d 100644
9150--- a/arch/s390/kernel/module.c
9151+++ b/arch/s390/kernel/module.c
9152@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9153
9154 /* Increase core size by size of got & plt and set start
9155 offsets for got and plt. */
9156- me->core_size = ALIGN(me->core_size, 4);
9157- me->arch.got_offset = me->core_size;
9158- me->core_size += me->arch.got_size;
9159- me->arch.plt_offset = me->core_size;
9160- me->core_size += me->arch.plt_size;
9161+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9162+ me->arch.got_offset = me->core_size_rw;
9163+ me->core_size_rw += me->arch.got_size;
9164+ me->arch.plt_offset = me->core_size_rx;
9165+ me->core_size_rx += me->arch.plt_size;
9166 return 0;
9167 }
9168
9169@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9170 if (info->got_initialized == 0) {
9171 Elf_Addr *gotent;
9172
9173- gotent = me->module_core + me->arch.got_offset +
9174+ gotent = me->module_core_rw + me->arch.got_offset +
9175 info->got_offset;
9176 *gotent = val;
9177 info->got_initialized = 1;
9178@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9179 rc = apply_rela_bits(loc, val, 0, 64, 0);
9180 else if (r_type == R_390_GOTENT ||
9181 r_type == R_390_GOTPLTENT) {
9182- val += (Elf_Addr) me->module_core - loc;
9183+ val += (Elf_Addr) me->module_core_rw - loc;
9184 rc = apply_rela_bits(loc, val, 1, 32, 1);
9185 }
9186 break;
9187@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9188 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9189 if (info->plt_initialized == 0) {
9190 unsigned int *ip;
9191- ip = me->module_core + me->arch.plt_offset +
9192+ ip = me->module_core_rx + me->arch.plt_offset +
9193 info->plt_offset;
9194 #ifndef CONFIG_64BIT
9195 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9196@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9197 val - loc + 0xffffUL < 0x1ffffeUL) ||
9198 (r_type == R_390_PLT32DBL &&
9199 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9200- val = (Elf_Addr) me->module_core +
9201+ val = (Elf_Addr) me->module_core_rx +
9202 me->arch.plt_offset +
9203 info->plt_offset;
9204 val += rela->r_addend - loc;
9205@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9206 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9207 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9208 val = val + rela->r_addend -
9209- ((Elf_Addr) me->module_core + me->arch.got_offset);
9210+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9211 if (r_type == R_390_GOTOFF16)
9212 rc = apply_rela_bits(loc, val, 0, 16, 0);
9213 else if (r_type == R_390_GOTOFF32)
9214@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9215 break;
9216 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9217 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9218- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9219+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9220 rela->r_addend - loc;
9221 if (r_type == R_390_GOTPC)
9222 rc = apply_rela_bits(loc, val, 1, 32, 0);
9223diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9224index 7ed0d4e..1dfc145 100644
9225--- a/arch/s390/kernel/process.c
9226+++ b/arch/s390/kernel/process.c
9227@@ -242,39 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
9228 }
9229 return 0;
9230 }
9231-
9232-unsigned long arch_align_stack(unsigned long sp)
9233-{
9234- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9235- sp -= get_random_int() & ~PAGE_MASK;
9236- return sp & ~0xf;
9237-}
9238-
9239-static inline unsigned long brk_rnd(void)
9240-{
9241- /* 8MB for 32bit, 1GB for 64bit */
9242- if (is_32bit_task())
9243- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9244- else
9245- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9246-}
9247-
9248-unsigned long arch_randomize_brk(struct mm_struct *mm)
9249-{
9250- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
9251-
9252- if (ret < mm->brk)
9253- return mm->brk;
9254- return ret;
9255-}
9256-
9257-unsigned long randomize_et_dyn(unsigned long base)
9258-{
9259- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
9260-
9261- if (!(current->flags & PF_RANDOMIZE))
9262- return base;
9263- if (ret < base)
9264- return base;
9265- return ret;
9266-}
9267diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9268index 9b436c2..54fbf0a 100644
9269--- a/arch/s390/mm/mmap.c
9270+++ b/arch/s390/mm/mmap.c
9271@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9272 */
9273 if (mmap_is_legacy()) {
9274 mm->mmap_base = mmap_base_legacy();
9275+
9276+#ifdef CONFIG_PAX_RANDMMAP
9277+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9278+ mm->mmap_base += mm->delta_mmap;
9279+#endif
9280+
9281 mm->get_unmapped_area = arch_get_unmapped_area;
9282 } else {
9283 mm->mmap_base = mmap_base();
9284+
9285+#ifdef CONFIG_PAX_RANDMMAP
9286+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9287+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9288+#endif
9289+
9290 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9291 }
9292 }
9293@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9294 */
9295 if (mmap_is_legacy()) {
9296 mm->mmap_base = mmap_base_legacy();
9297+
9298+#ifdef CONFIG_PAX_RANDMMAP
9299+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9300+ mm->mmap_base += mm->delta_mmap;
9301+#endif
9302+
9303 mm->get_unmapped_area = s390_get_unmapped_area;
9304 } else {
9305 mm->mmap_base = mmap_base();
9306+
9307+#ifdef CONFIG_PAX_RANDMMAP
9308+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9309+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9310+#endif
9311+
9312 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9313 }
9314 }
9315diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9316index ae3d59f..f65f075 100644
9317--- a/arch/score/include/asm/cache.h
9318+++ b/arch/score/include/asm/cache.h
9319@@ -1,7 +1,9 @@
9320 #ifndef _ASM_SCORE_CACHE_H
9321 #define _ASM_SCORE_CACHE_H
9322
9323+#include <linux/const.h>
9324+
9325 #define L1_CACHE_SHIFT 4
9326-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9327+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9328
9329 #endif /* _ASM_SCORE_CACHE_H */
9330diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9331index f9f3cd5..58ff438 100644
9332--- a/arch/score/include/asm/exec.h
9333+++ b/arch/score/include/asm/exec.h
9334@@ -1,6 +1,6 @@
9335 #ifndef _ASM_SCORE_EXEC_H
9336 #define _ASM_SCORE_EXEC_H
9337
9338-extern unsigned long arch_align_stack(unsigned long sp);
9339+#define arch_align_stack(x) (x)
9340
9341 #endif /* _ASM_SCORE_EXEC_H */
9342diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9343index a1519ad3..e8ac1ff 100644
9344--- a/arch/score/kernel/process.c
9345+++ b/arch/score/kernel/process.c
9346@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9347
9348 return task_pt_regs(task)->cp0_epc;
9349 }
9350-
9351-unsigned long arch_align_stack(unsigned long sp)
9352-{
9353- return sp;
9354-}
9355diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9356index ef9e555..331bd29 100644
9357--- a/arch/sh/include/asm/cache.h
9358+++ b/arch/sh/include/asm/cache.h
9359@@ -9,10 +9,11 @@
9360 #define __ASM_SH_CACHE_H
9361 #ifdef __KERNEL__
9362
9363+#include <linux/const.h>
9364 #include <linux/init.h>
9365 #include <cpu/cache.h>
9366
9367-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9368+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9369
9370 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9371
9372diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9373index 6777177..cb5e44f 100644
9374--- a/arch/sh/mm/mmap.c
9375+++ b/arch/sh/mm/mmap.c
9376@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9377 struct mm_struct *mm = current->mm;
9378 struct vm_area_struct *vma;
9379 int do_colour_align;
9380+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9381 struct vm_unmapped_area_info info;
9382
9383 if (flags & MAP_FIXED) {
9384@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9385 if (filp || (flags & MAP_SHARED))
9386 do_colour_align = 1;
9387
9388+#ifdef CONFIG_PAX_RANDMMAP
9389+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9390+#endif
9391+
9392 if (addr) {
9393 if (do_colour_align)
9394 addr = COLOUR_ALIGN(addr, pgoff);
9395@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9396 addr = PAGE_ALIGN(addr);
9397
9398 vma = find_vma(mm, addr);
9399- if (TASK_SIZE - len >= addr &&
9400- (!vma || addr + len <= vma->vm_start))
9401+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9402 return addr;
9403 }
9404
9405 info.flags = 0;
9406 info.length = len;
9407- info.low_limit = TASK_UNMAPPED_BASE;
9408+ info.low_limit = mm->mmap_base;
9409 info.high_limit = TASK_SIZE;
9410 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9411 info.align_offset = pgoff << PAGE_SHIFT;
9412@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9413 struct mm_struct *mm = current->mm;
9414 unsigned long addr = addr0;
9415 int do_colour_align;
9416+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9417 struct vm_unmapped_area_info info;
9418
9419 if (flags & MAP_FIXED) {
9420@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9421 if (filp || (flags & MAP_SHARED))
9422 do_colour_align = 1;
9423
9424+#ifdef CONFIG_PAX_RANDMMAP
9425+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9426+#endif
9427+
9428 /* requesting a specific address */
9429 if (addr) {
9430 if (do_colour_align)
9431@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9432 addr = PAGE_ALIGN(addr);
9433
9434 vma = find_vma(mm, addr);
9435- if (TASK_SIZE - len >= addr &&
9436- (!vma || addr + len <= vma->vm_start))
9437+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9438 return addr;
9439 }
9440
9441@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9442 VM_BUG_ON(addr != -ENOMEM);
9443 info.flags = 0;
9444 info.low_limit = TASK_UNMAPPED_BASE;
9445+
9446+#ifdef CONFIG_PAX_RANDMMAP
9447+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9448+ info.low_limit += mm->delta_mmap;
9449+#endif
9450+
9451 info.high_limit = TASK_SIZE;
9452 addr = vm_unmapped_area(&info);
9453 }
9454diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9455index be56a24..443328f 100644
9456--- a/arch/sparc/include/asm/atomic_64.h
9457+++ b/arch/sparc/include/asm/atomic_64.h
9458@@ -14,18 +14,40 @@
9459 #define ATOMIC64_INIT(i) { (i) }
9460
9461 #define atomic_read(v) (*(volatile int *)&(v)->counter)
9462+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9463+{
9464+ return v->counter;
9465+}
9466 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
9467+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9468+{
9469+ return v->counter;
9470+}
9471
9472 #define atomic_set(v, i) (((v)->counter) = i)
9473+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9474+{
9475+ v->counter = i;
9476+}
9477 #define atomic64_set(v, i) (((v)->counter) = i)
9478+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9479+{
9480+ v->counter = i;
9481+}
9482
9483 extern void atomic_add(int, atomic_t *);
9484+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
9485 extern void atomic64_add(long, atomic64_t *);
9486+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
9487 extern void atomic_sub(int, atomic_t *);
9488+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
9489 extern void atomic64_sub(long, atomic64_t *);
9490+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
9491
9492 extern int atomic_add_ret(int, atomic_t *);
9493+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
9494 extern long atomic64_add_ret(long, atomic64_t *);
9495+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
9496 extern int atomic_sub_ret(int, atomic_t *);
9497 extern long atomic64_sub_ret(long, atomic64_t *);
9498
9499@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9500 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
9501
9502 #define atomic_inc_return(v) atomic_add_ret(1, v)
9503+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9504+{
9505+ return atomic_add_ret_unchecked(1, v);
9506+}
9507 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
9508+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9509+{
9510+ return atomic64_add_ret_unchecked(1, v);
9511+}
9512
9513 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
9514 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
9515
9516 #define atomic_add_return(i, v) atomic_add_ret(i, v)
9517+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9518+{
9519+ return atomic_add_ret_unchecked(i, v);
9520+}
9521 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
9522+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9523+{
9524+ return atomic64_add_ret_unchecked(i, v);
9525+}
9526
9527 /*
9528 * atomic_inc_and_test - increment and test
9529@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9530 * other cases.
9531 */
9532 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9533+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9534+{
9535+ return atomic_inc_return_unchecked(v) == 0;
9536+}
9537 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9538
9539 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
9540@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9541 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
9542
9543 #define atomic_inc(v) atomic_add(1, v)
9544+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9545+{
9546+ atomic_add_unchecked(1, v);
9547+}
9548 #define atomic64_inc(v) atomic64_add(1, v)
9549+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9550+{
9551+ atomic64_add_unchecked(1, v);
9552+}
9553
9554 #define atomic_dec(v) atomic_sub(1, v)
9555+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9556+{
9557+ atomic_sub_unchecked(1, v);
9558+}
9559 #define atomic64_dec(v) atomic64_sub(1, v)
9560+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9561+{
9562+ atomic64_sub_unchecked(1, v);
9563+}
9564
9565 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
9566 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
9567
9568 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9569+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9570+{
9571+ return cmpxchg(&v->counter, old, new);
9572+}
9573 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9574+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9575+{
9576+ return xchg(&v->counter, new);
9577+}
9578
9579 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9580 {
9581- int c, old;
9582+ int c, old, new;
9583 c = atomic_read(v);
9584 for (;;) {
9585- if (unlikely(c == (u)))
9586+ if (unlikely(c == u))
9587 break;
9588- old = atomic_cmpxchg((v), c, c + (a));
9589+
9590+ asm volatile("addcc %2, %0, %0\n"
9591+
9592+#ifdef CONFIG_PAX_REFCOUNT
9593+ "tvs %%icc, 6\n"
9594+#endif
9595+
9596+ : "=r" (new)
9597+ : "0" (c), "ir" (a)
9598+ : "cc");
9599+
9600+ old = atomic_cmpxchg(v, c, new);
9601 if (likely(old == c))
9602 break;
9603 c = old;
9604@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9605 #define atomic64_cmpxchg(v, o, n) \
9606 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9607 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9608+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9609+{
9610+ return xchg(&v->counter, new);
9611+}
9612
9613 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9614 {
9615- long c, old;
9616+ long c, old, new;
9617 c = atomic64_read(v);
9618 for (;;) {
9619- if (unlikely(c == (u)))
9620+ if (unlikely(c == u))
9621 break;
9622- old = atomic64_cmpxchg((v), c, c + (a));
9623+
9624+ asm volatile("addcc %2, %0, %0\n"
9625+
9626+#ifdef CONFIG_PAX_REFCOUNT
9627+ "tvs %%xcc, 6\n"
9628+#endif
9629+
9630+ : "=r" (new)
9631+ : "0" (c), "ir" (a)
9632+ : "cc");
9633+
9634+ old = atomic64_cmpxchg(v, c, new);
9635 if (likely(old == c))
9636 break;
9637 c = old;
9638 }
9639- return c != (u);
9640+ return c != u;
9641 }
9642
9643 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9644diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
9645index 5bb6991..5c2132e 100644
9646--- a/arch/sparc/include/asm/cache.h
9647+++ b/arch/sparc/include/asm/cache.h
9648@@ -7,10 +7,12 @@
9649 #ifndef _SPARC_CACHE_H
9650 #define _SPARC_CACHE_H
9651
9652+#include <linux/const.h>
9653+
9654 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
9655
9656 #define L1_CACHE_SHIFT 5
9657-#define L1_CACHE_BYTES 32
9658+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9659
9660 #ifdef CONFIG_SPARC32
9661 #define SMP_CACHE_BYTES_SHIFT 5
9662diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
9663index a24e41f..47677ff 100644
9664--- a/arch/sparc/include/asm/elf_32.h
9665+++ b/arch/sparc/include/asm/elf_32.h
9666@@ -114,6 +114,13 @@ typedef struct {
9667
9668 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
9669
9670+#ifdef CONFIG_PAX_ASLR
9671+#define PAX_ELF_ET_DYN_BASE 0x10000UL
9672+
9673+#define PAX_DELTA_MMAP_LEN 16
9674+#define PAX_DELTA_STACK_LEN 16
9675+#endif
9676+
9677 /* This yields a mask that user programs can use to figure out what
9678 instruction set this cpu supports. This can NOT be done in userspace
9679 on Sparc. */
9680diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
9681index 370ca1e..d4f4a98 100644
9682--- a/arch/sparc/include/asm/elf_64.h
9683+++ b/arch/sparc/include/asm/elf_64.h
9684@@ -189,6 +189,13 @@ typedef struct {
9685 #define ELF_ET_DYN_BASE 0x0000010000000000UL
9686 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
9687
9688+#ifdef CONFIG_PAX_ASLR
9689+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
9690+
9691+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
9692+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
9693+#endif
9694+
9695 extern unsigned long sparc64_elf_hwcap;
9696 #define ELF_HWCAP sparc64_elf_hwcap
9697
9698diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
9699index 9b1c36d..209298b 100644
9700--- a/arch/sparc/include/asm/pgalloc_32.h
9701+++ b/arch/sparc/include/asm/pgalloc_32.h
9702@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
9703 }
9704
9705 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
9706+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
9707
9708 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
9709 unsigned long address)
9710diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
9711index bcfe063..b333142 100644
9712--- a/arch/sparc/include/asm/pgalloc_64.h
9713+++ b/arch/sparc/include/asm/pgalloc_64.h
9714@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9715 }
9716
9717 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
9718+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
9719
9720 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
9721 {
9722diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
9723index 59ba6f6..4518128 100644
9724--- a/arch/sparc/include/asm/pgtable.h
9725+++ b/arch/sparc/include/asm/pgtable.h
9726@@ -5,4 +5,8 @@
9727 #else
9728 #include <asm/pgtable_32.h>
9729 #endif
9730+
9731+#define ktla_ktva(addr) (addr)
9732+#define ktva_ktla(addr) (addr)
9733+
9734 #endif
9735diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
9736index 502f632..da1917f 100644
9737--- a/arch/sparc/include/asm/pgtable_32.h
9738+++ b/arch/sparc/include/asm/pgtable_32.h
9739@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
9740 #define PAGE_SHARED SRMMU_PAGE_SHARED
9741 #define PAGE_COPY SRMMU_PAGE_COPY
9742 #define PAGE_READONLY SRMMU_PAGE_RDONLY
9743+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
9744+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
9745+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
9746 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
9747
9748 /* Top-level page directory - dummy used by init-mm.
9749@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
9750
9751 /* xwr */
9752 #define __P000 PAGE_NONE
9753-#define __P001 PAGE_READONLY
9754-#define __P010 PAGE_COPY
9755-#define __P011 PAGE_COPY
9756+#define __P001 PAGE_READONLY_NOEXEC
9757+#define __P010 PAGE_COPY_NOEXEC
9758+#define __P011 PAGE_COPY_NOEXEC
9759 #define __P100 PAGE_READONLY
9760 #define __P101 PAGE_READONLY
9761 #define __P110 PAGE_COPY
9762 #define __P111 PAGE_COPY
9763
9764 #define __S000 PAGE_NONE
9765-#define __S001 PAGE_READONLY
9766-#define __S010 PAGE_SHARED
9767-#define __S011 PAGE_SHARED
9768+#define __S001 PAGE_READONLY_NOEXEC
9769+#define __S010 PAGE_SHARED_NOEXEC
9770+#define __S011 PAGE_SHARED_NOEXEC
9771 #define __S100 PAGE_READONLY
9772 #define __S101 PAGE_READONLY
9773 #define __S110 PAGE_SHARED
9774diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
9775index 79da178..c2eede8 100644
9776--- a/arch/sparc/include/asm/pgtsrmmu.h
9777+++ b/arch/sparc/include/asm/pgtsrmmu.h
9778@@ -115,6 +115,11 @@
9779 SRMMU_EXEC | SRMMU_REF)
9780 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
9781 SRMMU_EXEC | SRMMU_REF)
9782+
9783+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
9784+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9785+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9786+
9787 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
9788 SRMMU_DIRTY | SRMMU_REF)
9789
9790diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
9791index 9689176..63c18ea 100644
9792--- a/arch/sparc/include/asm/spinlock_64.h
9793+++ b/arch/sparc/include/asm/spinlock_64.h
9794@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
9795
9796 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
9797
9798-static void inline arch_read_lock(arch_rwlock_t *lock)
9799+static inline void arch_read_lock(arch_rwlock_t *lock)
9800 {
9801 unsigned long tmp1, tmp2;
9802
9803 __asm__ __volatile__ (
9804 "1: ldsw [%2], %0\n"
9805 " brlz,pn %0, 2f\n"
9806-"4: add %0, 1, %1\n"
9807+"4: addcc %0, 1, %1\n"
9808+
9809+#ifdef CONFIG_PAX_REFCOUNT
9810+" tvs %%icc, 6\n"
9811+#endif
9812+
9813 " cas [%2], %0, %1\n"
9814 " cmp %0, %1\n"
9815 " bne,pn %%icc, 1b\n"
9816@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
9817 " .previous"
9818 : "=&r" (tmp1), "=&r" (tmp2)
9819 : "r" (lock)
9820- : "memory");
9821+ : "memory", "cc");
9822 }
9823
9824-static int inline arch_read_trylock(arch_rwlock_t *lock)
9825+static inline int arch_read_trylock(arch_rwlock_t *lock)
9826 {
9827 int tmp1, tmp2;
9828
9829@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9830 "1: ldsw [%2], %0\n"
9831 " brlz,a,pn %0, 2f\n"
9832 " mov 0, %0\n"
9833-" add %0, 1, %1\n"
9834+" addcc %0, 1, %1\n"
9835+
9836+#ifdef CONFIG_PAX_REFCOUNT
9837+" tvs %%icc, 6\n"
9838+#endif
9839+
9840 " cas [%2], %0, %1\n"
9841 " cmp %0, %1\n"
9842 " bne,pn %%icc, 1b\n"
9843@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9844 return tmp1;
9845 }
9846
9847-static void inline arch_read_unlock(arch_rwlock_t *lock)
9848+static inline void arch_read_unlock(arch_rwlock_t *lock)
9849 {
9850 unsigned long tmp1, tmp2;
9851
9852 __asm__ __volatile__(
9853 "1: lduw [%2], %0\n"
9854-" sub %0, 1, %1\n"
9855+" subcc %0, 1, %1\n"
9856+
9857+#ifdef CONFIG_PAX_REFCOUNT
9858+" tvs %%icc, 6\n"
9859+#endif
9860+
9861 " cas [%2], %0, %1\n"
9862 " cmp %0, %1\n"
9863 " bne,pn %%xcc, 1b\n"
9864@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
9865 : "memory");
9866 }
9867
9868-static void inline arch_write_lock(arch_rwlock_t *lock)
9869+static inline void arch_write_lock(arch_rwlock_t *lock)
9870 {
9871 unsigned long mask, tmp1, tmp2;
9872
9873@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
9874 : "memory");
9875 }
9876
9877-static void inline arch_write_unlock(arch_rwlock_t *lock)
9878+static inline void arch_write_unlock(arch_rwlock_t *lock)
9879 {
9880 __asm__ __volatile__(
9881 " stw %%g0, [%0]"
9882@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
9883 : "memory");
9884 }
9885
9886-static int inline arch_write_trylock(arch_rwlock_t *lock)
9887+static inline int arch_write_trylock(arch_rwlock_t *lock)
9888 {
9889 unsigned long mask, tmp1, tmp2, result;
9890
9891diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
9892index 96efa7a..16858bf 100644
9893--- a/arch/sparc/include/asm/thread_info_32.h
9894+++ b/arch/sparc/include/asm/thread_info_32.h
9895@@ -49,6 +49,8 @@ struct thread_info {
9896 unsigned long w_saved;
9897
9898 struct restart_block restart_block;
9899+
9900+ unsigned long lowest_stack;
9901 };
9902
9903 /*
9904diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
9905index a5f01ac..703b554 100644
9906--- a/arch/sparc/include/asm/thread_info_64.h
9907+++ b/arch/sparc/include/asm/thread_info_64.h
9908@@ -63,6 +63,8 @@ struct thread_info {
9909 struct pt_regs *kern_una_regs;
9910 unsigned int kern_una_insn;
9911
9912+ unsigned long lowest_stack;
9913+
9914 unsigned long fpregs[0] __attribute__ ((aligned(64)));
9915 };
9916
9917@@ -188,12 +190,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
9918 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
9919 /* flag bit 4 is available */
9920 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
9921-/* flag bit 6 is available */
9922+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
9923 #define TIF_32BIT 7 /* 32-bit binary */
9924 #define TIF_NOHZ 8 /* in adaptive nohz mode */
9925 #define TIF_SECCOMP 9 /* secure computing */
9926 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
9927 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
9928+
9929 /* NOTE: Thread flags >= 12 should be ones we have no interest
9930 * in using in assembly, else we can't use the mask as
9931 * an immediate value in instructions such as andcc.
9932@@ -213,12 +216,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
9933 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
9934 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9935 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
9936+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9937
9938 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
9939 _TIF_DO_NOTIFY_RESUME_MASK | \
9940 _TIF_NEED_RESCHED)
9941 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
9942
9943+#define _TIF_WORK_SYSCALL \
9944+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
9945+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
9946+
9947+
9948 /*
9949 * Thread-synchronous status.
9950 *
9951diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
9952index 0167d26..767bb0c 100644
9953--- a/arch/sparc/include/asm/uaccess.h
9954+++ b/arch/sparc/include/asm/uaccess.h
9955@@ -1,5 +1,6 @@
9956 #ifndef ___ASM_SPARC_UACCESS_H
9957 #define ___ASM_SPARC_UACCESS_H
9958+
9959 #if defined(__sparc__) && defined(__arch64__)
9960 #include <asm/uaccess_64.h>
9961 #else
9962diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
9963index 53a28dd..50c38c3 100644
9964--- a/arch/sparc/include/asm/uaccess_32.h
9965+++ b/arch/sparc/include/asm/uaccess_32.h
9966@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
9967
9968 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9969 {
9970- if (n && __access_ok((unsigned long) to, n))
9971+ if ((long)n < 0)
9972+ return n;
9973+
9974+ if (n && __access_ok((unsigned long) to, n)) {
9975+ if (!__builtin_constant_p(n))
9976+ check_object_size(from, n, true);
9977 return __copy_user(to, (__force void __user *) from, n);
9978- else
9979+ } else
9980 return n;
9981 }
9982
9983 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
9984 {
9985+ if ((long)n < 0)
9986+ return n;
9987+
9988+ if (!__builtin_constant_p(n))
9989+ check_object_size(from, n, true);
9990+
9991 return __copy_user(to, (__force void __user *) from, n);
9992 }
9993
9994 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9995 {
9996- if (n && __access_ok((unsigned long) from, n))
9997+ if ((long)n < 0)
9998+ return n;
9999+
10000+ if (n && __access_ok((unsigned long) from, n)) {
10001+ if (!__builtin_constant_p(n))
10002+ check_object_size(to, n, false);
10003 return __copy_user((__force void __user *) to, from, n);
10004- else
10005+ } else
10006 return n;
10007 }
10008
10009 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10010 {
10011+ if ((long)n < 0)
10012+ return n;
10013+
10014 return __copy_user((__force void __user *) to, from, n);
10015 }
10016
10017diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10018index ad7e178..c9e7423 100644
10019--- a/arch/sparc/include/asm/uaccess_64.h
10020+++ b/arch/sparc/include/asm/uaccess_64.h
10021@@ -10,6 +10,7 @@
10022 #include <linux/compiler.h>
10023 #include <linux/string.h>
10024 #include <linux/thread_info.h>
10025+#include <linux/kernel.h>
10026 #include <asm/asi.h>
10027 #include <asm/spitfire.h>
10028 #include <asm-generic/uaccess-unaligned.h>
10029@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
10030 static inline unsigned long __must_check
10031 copy_from_user(void *to, const void __user *from, unsigned long size)
10032 {
10033- unsigned long ret = ___copy_from_user(to, from, size);
10034+ unsigned long ret;
10035
10036+ if ((long)size < 0 || size > INT_MAX)
10037+ return size;
10038+
10039+ if (!__builtin_constant_p(size))
10040+ check_object_size(to, size, false);
10041+
10042+ ret = ___copy_from_user(to, from, size);
10043 if (unlikely(ret))
10044 ret = copy_from_user_fixup(to, from, size);
10045
10046@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
10047 static inline unsigned long __must_check
10048 copy_to_user(void __user *to, const void *from, unsigned long size)
10049 {
10050- unsigned long ret = ___copy_to_user(to, from, size);
10051+ unsigned long ret;
10052
10053+ if ((long)size < 0 || size > INT_MAX)
10054+ return size;
10055+
10056+ if (!__builtin_constant_p(size))
10057+ check_object_size(from, size, true);
10058+
10059+ ret = ___copy_to_user(to, from, size);
10060 if (unlikely(ret))
10061 ret = copy_to_user_fixup(to, from, size);
10062 return ret;
10063diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10064index d15cc17..d0ae796 100644
10065--- a/arch/sparc/kernel/Makefile
10066+++ b/arch/sparc/kernel/Makefile
10067@@ -4,7 +4,7 @@
10068 #
10069
10070 asflags-y := -ansi
10071-ccflags-y := -Werror
10072+#ccflags-y := -Werror
10073
10074 extra-y := head_$(BITS).o
10075
10076diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10077index fdd819d..5af08c8 100644
10078--- a/arch/sparc/kernel/process_32.c
10079+++ b/arch/sparc/kernel/process_32.c
10080@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
10081
10082 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10083 r->psr, r->pc, r->npc, r->y, print_tainted());
10084- printk("PC: <%pS>\n", (void *) r->pc);
10085+ printk("PC: <%pA>\n", (void *) r->pc);
10086 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10087 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10088 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10089 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10090 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10091 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10092- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10093+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10094
10095 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10096 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10097@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10098 rw = (struct reg_window32 *) fp;
10099 pc = rw->ins[7];
10100 printk("[%08lx : ", pc);
10101- printk("%pS ] ", (void *) pc);
10102+ printk("%pA ] ", (void *) pc);
10103 fp = rw->ins[6];
10104 } while (++count < 16);
10105 printk("\n");
10106diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10107index 32a280e..84fc6a9 100644
10108--- a/arch/sparc/kernel/process_64.c
10109+++ b/arch/sparc/kernel/process_64.c
10110@@ -159,7 +159,7 @@ static void show_regwindow(struct pt_regs *regs)
10111 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10112 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10113 if (regs->tstate & TSTATE_PRIV)
10114- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10115+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10116 }
10117
10118 void show_regs(struct pt_regs *regs)
10119@@ -168,7 +168,7 @@ void show_regs(struct pt_regs *regs)
10120
10121 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10122 regs->tpc, regs->tnpc, regs->y, print_tainted());
10123- printk("TPC: <%pS>\n", (void *) regs->tpc);
10124+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10125 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10126 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10127 regs->u_regs[3]);
10128@@ -181,7 +181,7 @@ void show_regs(struct pt_regs *regs)
10129 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10130 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10131 regs->u_regs[15]);
10132- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10133+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10134 show_regwindow(regs);
10135 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10136 }
10137@@ -270,7 +270,7 @@ void arch_trigger_all_cpu_backtrace(void)
10138 ((tp && tp->task) ? tp->task->pid : -1));
10139
10140 if (gp->tstate & TSTATE_PRIV) {
10141- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10142+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10143 (void *) gp->tpc,
10144 (void *) gp->o7,
10145 (void *) gp->i7,
10146diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10147index 79cc0d1..ec62734 100644
10148--- a/arch/sparc/kernel/prom_common.c
10149+++ b/arch/sparc/kernel/prom_common.c
10150@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10151
10152 unsigned int prom_early_allocated __initdata;
10153
10154-static struct of_pdt_ops prom_sparc_ops __initdata = {
10155+static struct of_pdt_ops prom_sparc_ops __initconst = {
10156 .nextprop = prom_common_nextprop,
10157 .getproplen = prom_getproplen,
10158 .getproperty = prom_getproperty,
10159diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10160index c13c9f2..d572c34 100644
10161--- a/arch/sparc/kernel/ptrace_64.c
10162+++ b/arch/sparc/kernel/ptrace_64.c
10163@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10164 return ret;
10165 }
10166
10167+#ifdef CONFIG_GRKERNSEC_SETXID
10168+extern void gr_delayed_cred_worker(void);
10169+#endif
10170+
10171 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10172 {
10173 int ret = 0;
10174@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10175 if (test_thread_flag(TIF_NOHZ))
10176 user_exit();
10177
10178+#ifdef CONFIG_GRKERNSEC_SETXID
10179+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10180+ gr_delayed_cred_worker();
10181+#endif
10182+
10183 if (test_thread_flag(TIF_SYSCALL_TRACE))
10184 ret = tracehook_report_syscall_entry(regs);
10185
10186@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10187 if (test_thread_flag(TIF_NOHZ))
10188 user_exit();
10189
10190+#ifdef CONFIG_GRKERNSEC_SETXID
10191+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10192+ gr_delayed_cred_worker();
10193+#endif
10194+
10195 audit_syscall_exit(regs);
10196
10197 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10198diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10199index b085311..6f885f7 100644
10200--- a/arch/sparc/kernel/smp_64.c
10201+++ b/arch/sparc/kernel/smp_64.c
10202@@ -870,8 +870,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
10203 extern unsigned long xcall_flush_dcache_page_spitfire;
10204
10205 #ifdef CONFIG_DEBUG_DCFLUSH
10206-extern atomic_t dcpage_flushes;
10207-extern atomic_t dcpage_flushes_xcall;
10208+extern atomic_unchecked_t dcpage_flushes;
10209+extern atomic_unchecked_t dcpage_flushes_xcall;
10210 #endif
10211
10212 static inline void __local_flush_dcache_page(struct page *page)
10213@@ -895,7 +895,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10214 return;
10215
10216 #ifdef CONFIG_DEBUG_DCFLUSH
10217- atomic_inc(&dcpage_flushes);
10218+ atomic_inc_unchecked(&dcpage_flushes);
10219 #endif
10220
10221 this_cpu = get_cpu();
10222@@ -919,7 +919,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10223 xcall_deliver(data0, __pa(pg_addr),
10224 (u64) pg_addr, cpumask_of(cpu));
10225 #ifdef CONFIG_DEBUG_DCFLUSH
10226- atomic_inc(&dcpage_flushes_xcall);
10227+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10228 #endif
10229 }
10230 }
10231@@ -938,7 +938,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10232 preempt_disable();
10233
10234 #ifdef CONFIG_DEBUG_DCFLUSH
10235- atomic_inc(&dcpage_flushes);
10236+ atomic_inc_unchecked(&dcpage_flushes);
10237 #endif
10238 data0 = 0;
10239 pg_addr = page_address(page);
10240@@ -955,7 +955,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10241 xcall_deliver(data0, __pa(pg_addr),
10242 (u64) pg_addr, cpu_online_mask);
10243 #ifdef CONFIG_DEBUG_DCFLUSH
10244- atomic_inc(&dcpage_flushes_xcall);
10245+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10246 #endif
10247 }
10248 __local_flush_dcache_page(page);
10249diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10250index 3a8d184..49498a8 100644
10251--- a/arch/sparc/kernel/sys_sparc_32.c
10252+++ b/arch/sparc/kernel/sys_sparc_32.c
10253@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10254 if (len > TASK_SIZE - PAGE_SIZE)
10255 return -ENOMEM;
10256 if (!addr)
10257- addr = TASK_UNMAPPED_BASE;
10258+ addr = current->mm->mmap_base;
10259
10260 info.flags = 0;
10261 info.length = len;
10262diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10263index beb0b5a..5a153f7 100644
10264--- a/arch/sparc/kernel/sys_sparc_64.c
10265+++ b/arch/sparc/kernel/sys_sparc_64.c
10266@@ -88,13 +88,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10267 struct vm_area_struct * vma;
10268 unsigned long task_size = TASK_SIZE;
10269 int do_color_align;
10270+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10271 struct vm_unmapped_area_info info;
10272
10273 if (flags & MAP_FIXED) {
10274 /* We do not accept a shared mapping if it would violate
10275 * cache aliasing constraints.
10276 */
10277- if ((flags & MAP_SHARED) &&
10278+ if ((filp || (flags & MAP_SHARED)) &&
10279 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10280 return -EINVAL;
10281 return addr;
10282@@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10283 if (filp || (flags & MAP_SHARED))
10284 do_color_align = 1;
10285
10286+#ifdef CONFIG_PAX_RANDMMAP
10287+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10288+#endif
10289+
10290 if (addr) {
10291 if (do_color_align)
10292 addr = COLOR_ALIGN(addr, pgoff);
10293@@ -116,22 +121,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10294 addr = PAGE_ALIGN(addr);
10295
10296 vma = find_vma(mm, addr);
10297- if (task_size - len >= addr &&
10298- (!vma || addr + len <= vma->vm_start))
10299+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10300 return addr;
10301 }
10302
10303 info.flags = 0;
10304 info.length = len;
10305- info.low_limit = TASK_UNMAPPED_BASE;
10306+ info.low_limit = mm->mmap_base;
10307 info.high_limit = min(task_size, VA_EXCLUDE_START);
10308 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10309 info.align_offset = pgoff << PAGE_SHIFT;
10310+ info.threadstack_offset = offset;
10311 addr = vm_unmapped_area(&info);
10312
10313 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10314 VM_BUG_ON(addr != -ENOMEM);
10315 info.low_limit = VA_EXCLUDE_END;
10316+
10317+#ifdef CONFIG_PAX_RANDMMAP
10318+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10319+ info.low_limit += mm->delta_mmap;
10320+#endif
10321+
10322 info.high_limit = task_size;
10323 addr = vm_unmapped_area(&info);
10324 }
10325@@ -149,6 +160,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10326 unsigned long task_size = STACK_TOP32;
10327 unsigned long addr = addr0;
10328 int do_color_align;
10329+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10330 struct vm_unmapped_area_info info;
10331
10332 /* This should only ever run for 32-bit processes. */
10333@@ -158,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10334 /* We do not accept a shared mapping if it would violate
10335 * cache aliasing constraints.
10336 */
10337- if ((flags & MAP_SHARED) &&
10338+ if ((filp || (flags & MAP_SHARED)) &&
10339 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10340 return -EINVAL;
10341 return addr;
10342@@ -171,6 +183,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10343 if (filp || (flags & MAP_SHARED))
10344 do_color_align = 1;
10345
10346+#ifdef CONFIG_PAX_RANDMMAP
10347+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10348+#endif
10349+
10350 /* requesting a specific address */
10351 if (addr) {
10352 if (do_color_align)
10353@@ -179,8 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10354 addr = PAGE_ALIGN(addr);
10355
10356 vma = find_vma(mm, addr);
10357- if (task_size - len >= addr &&
10358- (!vma || addr + len <= vma->vm_start))
10359+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10360 return addr;
10361 }
10362
10363@@ -190,6 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10364 info.high_limit = mm->mmap_base;
10365 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10366 info.align_offset = pgoff << PAGE_SHIFT;
10367+ info.threadstack_offset = offset;
10368 addr = vm_unmapped_area(&info);
10369
10370 /*
10371@@ -202,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10372 VM_BUG_ON(addr != -ENOMEM);
10373 info.flags = 0;
10374 info.low_limit = TASK_UNMAPPED_BASE;
10375+
10376+#ifdef CONFIG_PAX_RANDMMAP
10377+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10378+ info.low_limit += mm->delta_mmap;
10379+#endif
10380+
10381 info.high_limit = STACK_TOP32;
10382 addr = vm_unmapped_area(&info);
10383 }
10384@@ -258,10 +280,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10385 EXPORT_SYMBOL(get_fb_unmapped_area);
10386
10387 /* Essentially the same as PowerPC. */
10388-static unsigned long mmap_rnd(void)
10389+static unsigned long mmap_rnd(struct mm_struct *mm)
10390 {
10391 unsigned long rnd = 0UL;
10392
10393+#ifdef CONFIG_PAX_RANDMMAP
10394+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10395+#endif
10396+
10397 if (current->flags & PF_RANDOMIZE) {
10398 unsigned long val = get_random_int();
10399 if (test_thread_flag(TIF_32BIT))
10400@@ -274,7 +300,7 @@ static unsigned long mmap_rnd(void)
10401
10402 void arch_pick_mmap_layout(struct mm_struct *mm)
10403 {
10404- unsigned long random_factor = mmap_rnd();
10405+ unsigned long random_factor = mmap_rnd(mm);
10406 unsigned long gap;
10407
10408 /*
10409@@ -287,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10410 gap == RLIM_INFINITY ||
10411 sysctl_legacy_va_layout) {
10412 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10413+
10414+#ifdef CONFIG_PAX_RANDMMAP
10415+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10416+ mm->mmap_base += mm->delta_mmap;
10417+#endif
10418+
10419 mm->get_unmapped_area = arch_get_unmapped_area;
10420 } else {
10421 /* We know it's 32-bit */
10422@@ -298,6 +330,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10423 gap = (task_size / 6 * 5);
10424
10425 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10426+
10427+#ifdef CONFIG_PAX_RANDMMAP
10428+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10429+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10430+#endif
10431+
10432 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10433 }
10434 }
10435diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10436index 87729ff..d87fb1f 100644
10437--- a/arch/sparc/kernel/syscalls.S
10438+++ b/arch/sparc/kernel/syscalls.S
10439@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
10440 #endif
10441 .align 32
10442 1: ldx [%g6 + TI_FLAGS], %l5
10443- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10444+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10445 be,pt %icc, rtrap
10446 nop
10447 call syscall_trace_leave
10448@@ -184,12 +184,13 @@ linux_sparc_syscall32:
10449
10450 srl %i3, 0, %o3 ! IEU0
10451 srl %i2, 0, %o2 ! IEU0 Group
10452- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10453+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10454 bne,pn %icc, linux_syscall_trace32 ! CTI
10455 mov %i0, %l5 ! IEU1
10456 5: call %l7 ! CTI Group brk forced
10457 srl %i5, 0, %o5 ! IEU1
10458- ba,a,pt %xcc, 3f
10459+ ba,pt %xcc, 3f
10460+ sra %o0, 0, %o0
10461
10462 /* Linux native system calls enter here... */
10463 .align 32
10464@@ -207,7 +208,7 @@ linux_sparc_syscall:
10465
10466 mov %i3, %o3 ! IEU1
10467 mov %i4, %o4 ! IEU0 Group
10468- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10469+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10470 bne,pn %icc, linux_syscall_trace ! CTI Group
10471 mov %i0, %l5 ! IEU0
10472 2: call %l7 ! CTI Group brk forced
10473@@ -217,13 +218,12 @@ linux_sparc_syscall:
10474 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
10475 ret_sys_call:
10476 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
10477- sra %o0, 0, %o0
10478 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
10479 sllx %g2, 32, %g2
10480
10481 cmp %o0, -ERESTART_RESTARTBLOCK
10482 bgeu,pn %xcc, 1f
10483- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10484+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10485 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10486
10487 2:
10488diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10489index 6629829..036032d 100644
10490--- a/arch/sparc/kernel/traps_32.c
10491+++ b/arch/sparc/kernel/traps_32.c
10492@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10493 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10494 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10495
10496+extern void gr_handle_kernel_exploit(void);
10497+
10498 void die_if_kernel(char *str, struct pt_regs *regs)
10499 {
10500 static int die_counter;
10501@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10502 count++ < 30 &&
10503 (((unsigned long) rw) >= PAGE_OFFSET) &&
10504 !(((unsigned long) rw) & 0x7)) {
10505- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10506+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10507 (void *) rw->ins[7]);
10508 rw = (struct reg_window32 *)rw->ins[6];
10509 }
10510 }
10511 printk("Instruction DUMP:");
10512 instruction_dump ((unsigned long *) regs->pc);
10513- if(regs->psr & PSR_PS)
10514+ if(regs->psr & PSR_PS) {
10515+ gr_handle_kernel_exploit();
10516 do_exit(SIGKILL);
10517+ }
10518 do_exit(SIGSEGV);
10519 }
10520
10521diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10522index 4ced92f..965eeed 100644
10523--- a/arch/sparc/kernel/traps_64.c
10524+++ b/arch/sparc/kernel/traps_64.c
10525@@ -77,7 +77,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10526 i + 1,
10527 p->trapstack[i].tstate, p->trapstack[i].tpc,
10528 p->trapstack[i].tnpc, p->trapstack[i].tt);
10529- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10530+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10531 }
10532 }
10533
10534@@ -97,6 +97,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10535
10536 lvl -= 0x100;
10537 if (regs->tstate & TSTATE_PRIV) {
10538+
10539+#ifdef CONFIG_PAX_REFCOUNT
10540+ if (lvl == 6)
10541+ pax_report_refcount_overflow(regs);
10542+#endif
10543+
10544 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10545 die_if_kernel(buffer, regs);
10546 }
10547@@ -115,11 +121,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10548 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10549 {
10550 char buffer[32];
10551-
10552+
10553 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10554 0, lvl, SIGTRAP) == NOTIFY_STOP)
10555 return;
10556
10557+#ifdef CONFIG_PAX_REFCOUNT
10558+ if (lvl == 6)
10559+ pax_report_refcount_overflow(regs);
10560+#endif
10561+
10562 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10563
10564 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10565@@ -1149,7 +1160,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10566 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10567 printk("%s" "ERROR(%d): ",
10568 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10569- printk("TPC<%pS>\n", (void *) regs->tpc);
10570+ printk("TPC<%pA>\n", (void *) regs->tpc);
10571 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10572 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10573 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10574@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10575 smp_processor_id(),
10576 (type & 0x1) ? 'I' : 'D',
10577 regs->tpc);
10578- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
10579+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
10580 panic("Irrecoverable Cheetah+ parity error.");
10581 }
10582
10583@@ -1764,7 +1775,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10584 smp_processor_id(),
10585 (type & 0x1) ? 'I' : 'D',
10586 regs->tpc);
10587- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
10588+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
10589 }
10590
10591 struct sun4v_error_entry {
10592@@ -1837,8 +1848,8 @@ struct sun4v_error_entry {
10593 /*0x38*/u64 reserved_5;
10594 };
10595
10596-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10597-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10598+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10599+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10600
10601 static const char *sun4v_err_type_to_str(u8 type)
10602 {
10603@@ -1930,7 +1941,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
10604 }
10605
10606 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10607- int cpu, const char *pfx, atomic_t *ocnt)
10608+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
10609 {
10610 u64 *raw_ptr = (u64 *) ent;
10611 u32 attrs;
10612@@ -1988,8 +1999,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10613
10614 show_regs(regs);
10615
10616- if ((cnt = atomic_read(ocnt)) != 0) {
10617- atomic_set(ocnt, 0);
10618+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
10619+ atomic_set_unchecked(ocnt, 0);
10620 wmb();
10621 printk("%s: Queue overflowed %d times.\n",
10622 pfx, cnt);
10623@@ -2046,7 +2057,7 @@ out:
10624 */
10625 void sun4v_resum_overflow(struct pt_regs *regs)
10626 {
10627- atomic_inc(&sun4v_resum_oflow_cnt);
10628+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
10629 }
10630
10631 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
10632@@ -2099,7 +2110,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
10633 /* XXX Actually even this can make not that much sense. Perhaps
10634 * XXX we should just pull the plug and panic directly from here?
10635 */
10636- atomic_inc(&sun4v_nonresum_oflow_cnt);
10637+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
10638 }
10639
10640 unsigned long sun4v_err_itlb_vaddr;
10641@@ -2114,9 +2125,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
10642
10643 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
10644 regs->tpc, tl);
10645- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
10646+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
10647 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10648- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
10649+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
10650 (void *) regs->u_regs[UREG_I7]);
10651 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
10652 "pte[%lx] error[%lx]\n",
10653@@ -2138,9 +2149,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
10654
10655 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
10656 regs->tpc, tl);
10657- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
10658+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
10659 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10660- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
10661+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
10662 (void *) regs->u_regs[UREG_I7]);
10663 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
10664 "pte[%lx] error[%lx]\n",
10665@@ -2359,13 +2370,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10666 fp = (unsigned long)sf->fp + STACK_BIAS;
10667 }
10668
10669- printk(" [%016lx] %pS\n", pc, (void *) pc);
10670+ printk(" [%016lx] %pA\n", pc, (void *) pc);
10671 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
10672 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
10673 int index = tsk->curr_ret_stack;
10674 if (tsk->ret_stack && index >= graph) {
10675 pc = tsk->ret_stack[index - graph].ret;
10676- printk(" [%016lx] %pS\n", pc, (void *) pc);
10677+ printk(" [%016lx] %pA\n", pc, (void *) pc);
10678 graph++;
10679 }
10680 }
10681@@ -2383,6 +2394,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
10682 return (struct reg_window *) (fp + STACK_BIAS);
10683 }
10684
10685+extern void gr_handle_kernel_exploit(void);
10686+
10687 void die_if_kernel(char *str, struct pt_regs *regs)
10688 {
10689 static int die_counter;
10690@@ -2411,7 +2424,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10691 while (rw &&
10692 count++ < 30 &&
10693 kstack_valid(tp, (unsigned long) rw)) {
10694- printk("Caller[%016lx]: %pS\n", rw->ins[7],
10695+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
10696 (void *) rw->ins[7]);
10697
10698 rw = kernel_stack_up(rw);
10699@@ -2424,8 +2437,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10700 }
10701 user_instruction_dump ((unsigned int __user *) regs->tpc);
10702 }
10703- if (regs->tstate & TSTATE_PRIV)
10704+ if (regs->tstate & TSTATE_PRIV) {
10705+ gr_handle_kernel_exploit();
10706 do_exit(SIGKILL);
10707+ }
10708 do_exit(SIGSEGV);
10709 }
10710 EXPORT_SYMBOL(die_if_kernel);
10711diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
10712index 3c1a7cb..73e1923 100644
10713--- a/arch/sparc/kernel/unaligned_64.c
10714+++ b/arch/sparc/kernel/unaligned_64.c
10715@@ -289,7 +289,7 @@ static void log_unaligned(struct pt_regs *regs)
10716 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
10717
10718 if (__ratelimit(&ratelimit)) {
10719- printk("Kernel unaligned access at TPC[%lx] %pS\n",
10720+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
10721 regs->tpc, (void *) regs->tpc);
10722 }
10723 }
10724diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
10725index dbe119b..089c7c1 100644
10726--- a/arch/sparc/lib/Makefile
10727+++ b/arch/sparc/lib/Makefile
10728@@ -2,7 +2,7 @@
10729 #
10730
10731 asflags-y := -ansi -DST_DIV0=0x02
10732-ccflags-y := -Werror
10733+#ccflags-y := -Werror
10734
10735 lib-$(CONFIG_SPARC32) += ashrdi3.o
10736 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
10737diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
10738index 85c233d..68500e0 100644
10739--- a/arch/sparc/lib/atomic_64.S
10740+++ b/arch/sparc/lib/atomic_64.S
10741@@ -17,7 +17,12 @@
10742 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10743 BACKOFF_SETUP(%o2)
10744 1: lduw [%o1], %g1
10745- add %g1, %o0, %g7
10746+ addcc %g1, %o0, %g7
10747+
10748+#ifdef CONFIG_PAX_REFCOUNT
10749+ tvs %icc, 6
10750+#endif
10751+
10752 cas [%o1], %g1, %g7
10753 cmp %g1, %g7
10754 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10755@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10756 2: BACKOFF_SPIN(%o2, %o3, 1b)
10757 ENDPROC(atomic_add)
10758
10759+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10760+ BACKOFF_SETUP(%o2)
10761+1: lduw [%o1], %g1
10762+ add %g1, %o0, %g7
10763+ cas [%o1], %g1, %g7
10764+ cmp %g1, %g7
10765+ bne,pn %icc, 2f
10766+ nop
10767+ retl
10768+ nop
10769+2: BACKOFF_SPIN(%o2, %o3, 1b)
10770+ENDPROC(atomic_add_unchecked)
10771+
10772 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10773 BACKOFF_SETUP(%o2)
10774 1: lduw [%o1], %g1
10775- sub %g1, %o0, %g7
10776+ subcc %g1, %o0, %g7
10777+
10778+#ifdef CONFIG_PAX_REFCOUNT
10779+ tvs %icc, 6
10780+#endif
10781+
10782 cas [%o1], %g1, %g7
10783 cmp %g1, %g7
10784 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10785@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10786 2: BACKOFF_SPIN(%o2, %o3, 1b)
10787 ENDPROC(atomic_sub)
10788
10789+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10790+ BACKOFF_SETUP(%o2)
10791+1: lduw [%o1], %g1
10792+ sub %g1, %o0, %g7
10793+ cas [%o1], %g1, %g7
10794+ cmp %g1, %g7
10795+ bne,pn %icc, 2f
10796+ nop
10797+ retl
10798+ nop
10799+2: BACKOFF_SPIN(%o2, %o3, 1b)
10800+ENDPROC(atomic_sub_unchecked)
10801+
10802 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10803 BACKOFF_SETUP(%o2)
10804 1: lduw [%o1], %g1
10805- add %g1, %o0, %g7
10806+ addcc %g1, %o0, %g7
10807+
10808+#ifdef CONFIG_PAX_REFCOUNT
10809+ tvs %icc, 6
10810+#endif
10811+
10812 cas [%o1], %g1, %g7
10813 cmp %g1, %g7
10814 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10815@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10816 2: BACKOFF_SPIN(%o2, %o3, 1b)
10817 ENDPROC(atomic_add_ret)
10818
10819+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10820+ BACKOFF_SETUP(%o2)
10821+1: lduw [%o1], %g1
10822+ addcc %g1, %o0, %g7
10823+ cas [%o1], %g1, %g7
10824+ cmp %g1, %g7
10825+ bne,pn %icc, 2f
10826+ add %g7, %o0, %g7
10827+ sra %g7, 0, %o0
10828+ retl
10829+ nop
10830+2: BACKOFF_SPIN(%o2, %o3, 1b)
10831+ENDPROC(atomic_add_ret_unchecked)
10832+
10833 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10834 BACKOFF_SETUP(%o2)
10835 1: lduw [%o1], %g1
10836- sub %g1, %o0, %g7
10837+ subcc %g1, %o0, %g7
10838+
10839+#ifdef CONFIG_PAX_REFCOUNT
10840+ tvs %icc, 6
10841+#endif
10842+
10843 cas [%o1], %g1, %g7
10844 cmp %g1, %g7
10845 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10846@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
10847 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10848 BACKOFF_SETUP(%o2)
10849 1: ldx [%o1], %g1
10850- add %g1, %o0, %g7
10851+ addcc %g1, %o0, %g7
10852+
10853+#ifdef CONFIG_PAX_REFCOUNT
10854+ tvs %xcc, 6
10855+#endif
10856+
10857 casx [%o1], %g1, %g7
10858 cmp %g1, %g7
10859 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10860@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10861 2: BACKOFF_SPIN(%o2, %o3, 1b)
10862 ENDPROC(atomic64_add)
10863
10864+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10865+ BACKOFF_SETUP(%o2)
10866+1: ldx [%o1], %g1
10867+ addcc %g1, %o0, %g7
10868+ casx [%o1], %g1, %g7
10869+ cmp %g1, %g7
10870+ bne,pn %xcc, 2f
10871+ nop
10872+ retl
10873+ nop
10874+2: BACKOFF_SPIN(%o2, %o3, 1b)
10875+ENDPROC(atomic64_add_unchecked)
10876+
10877 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10878 BACKOFF_SETUP(%o2)
10879 1: ldx [%o1], %g1
10880- sub %g1, %o0, %g7
10881+ subcc %g1, %o0, %g7
10882+
10883+#ifdef CONFIG_PAX_REFCOUNT
10884+ tvs %xcc, 6
10885+#endif
10886+
10887 casx [%o1], %g1, %g7
10888 cmp %g1, %g7
10889 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10890@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10891 2: BACKOFF_SPIN(%o2, %o3, 1b)
10892 ENDPROC(atomic64_sub)
10893
10894+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10895+ BACKOFF_SETUP(%o2)
10896+1: ldx [%o1], %g1
10897+ subcc %g1, %o0, %g7
10898+ casx [%o1], %g1, %g7
10899+ cmp %g1, %g7
10900+ bne,pn %xcc, 2f
10901+ nop
10902+ retl
10903+ nop
10904+2: BACKOFF_SPIN(%o2, %o3, 1b)
10905+ENDPROC(atomic64_sub_unchecked)
10906+
10907 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10908 BACKOFF_SETUP(%o2)
10909 1: ldx [%o1], %g1
10910- add %g1, %o0, %g7
10911+ addcc %g1, %o0, %g7
10912+
10913+#ifdef CONFIG_PAX_REFCOUNT
10914+ tvs %xcc, 6
10915+#endif
10916+
10917 casx [%o1], %g1, %g7
10918 cmp %g1, %g7
10919 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10920@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10921 2: BACKOFF_SPIN(%o2, %o3, 1b)
10922 ENDPROC(atomic64_add_ret)
10923
10924+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10925+ BACKOFF_SETUP(%o2)
10926+1: ldx [%o1], %g1
10927+ addcc %g1, %o0, %g7
10928+ casx [%o1], %g1, %g7
10929+ cmp %g1, %g7
10930+ bne,pn %xcc, 2f
10931+ add %g7, %o0, %g7
10932+ mov %g7, %o0
10933+ retl
10934+ nop
10935+2: BACKOFF_SPIN(%o2, %o3, 1b)
10936+ENDPROC(atomic64_add_ret_unchecked)
10937+
10938 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10939 BACKOFF_SETUP(%o2)
10940 1: ldx [%o1], %g1
10941- sub %g1, %o0, %g7
10942+ subcc %g1, %o0, %g7
10943+
10944+#ifdef CONFIG_PAX_REFCOUNT
10945+ tvs %xcc, 6
10946+#endif
10947+
10948 casx [%o1], %g1, %g7
10949 cmp %g1, %g7
10950 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10951diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
10952index 323335b..ed85ea2 100644
10953--- a/arch/sparc/lib/ksyms.c
10954+++ b/arch/sparc/lib/ksyms.c
10955@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
10956
10957 /* Atomic counter implementation. */
10958 EXPORT_SYMBOL(atomic_add);
10959+EXPORT_SYMBOL(atomic_add_unchecked);
10960 EXPORT_SYMBOL(atomic_add_ret);
10961+EXPORT_SYMBOL(atomic_add_ret_unchecked);
10962 EXPORT_SYMBOL(atomic_sub);
10963+EXPORT_SYMBOL(atomic_sub_unchecked);
10964 EXPORT_SYMBOL(atomic_sub_ret);
10965 EXPORT_SYMBOL(atomic64_add);
10966+EXPORT_SYMBOL(atomic64_add_unchecked);
10967 EXPORT_SYMBOL(atomic64_add_ret);
10968+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
10969 EXPORT_SYMBOL(atomic64_sub);
10970+EXPORT_SYMBOL(atomic64_sub_unchecked);
10971 EXPORT_SYMBOL(atomic64_sub_ret);
10972 EXPORT_SYMBOL(atomic64_dec_if_positive);
10973
10974diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
10975index 30c3ecc..736f015 100644
10976--- a/arch/sparc/mm/Makefile
10977+++ b/arch/sparc/mm/Makefile
10978@@ -2,7 +2,7 @@
10979 #
10980
10981 asflags-y := -ansi
10982-ccflags-y := -Werror
10983+#ccflags-y := -Werror
10984
10985 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
10986 obj-y += fault_$(BITS).o
10987diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
10988index 59dbd46..1dd7f5e 100644
10989--- a/arch/sparc/mm/fault_32.c
10990+++ b/arch/sparc/mm/fault_32.c
10991@@ -21,6 +21,9 @@
10992 #include <linux/perf_event.h>
10993 #include <linux/interrupt.h>
10994 #include <linux/kdebug.h>
10995+#include <linux/slab.h>
10996+#include <linux/pagemap.h>
10997+#include <linux/compiler.h>
10998
10999 #include <asm/page.h>
11000 #include <asm/pgtable.h>
11001@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11002 return safe_compute_effective_address(regs, insn);
11003 }
11004
11005+#ifdef CONFIG_PAX_PAGEEXEC
11006+#ifdef CONFIG_PAX_DLRESOLVE
11007+static void pax_emuplt_close(struct vm_area_struct *vma)
11008+{
11009+ vma->vm_mm->call_dl_resolve = 0UL;
11010+}
11011+
11012+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11013+{
11014+ unsigned int *kaddr;
11015+
11016+ vmf->page = alloc_page(GFP_HIGHUSER);
11017+ if (!vmf->page)
11018+ return VM_FAULT_OOM;
11019+
11020+ kaddr = kmap(vmf->page);
11021+ memset(kaddr, 0, PAGE_SIZE);
11022+ kaddr[0] = 0x9DE3BFA8U; /* save */
11023+ flush_dcache_page(vmf->page);
11024+ kunmap(vmf->page);
11025+ return VM_FAULT_MAJOR;
11026+}
11027+
11028+static const struct vm_operations_struct pax_vm_ops = {
11029+ .close = pax_emuplt_close,
11030+ .fault = pax_emuplt_fault
11031+};
11032+
11033+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11034+{
11035+ int ret;
11036+
11037+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11038+ vma->vm_mm = current->mm;
11039+ vma->vm_start = addr;
11040+ vma->vm_end = addr + PAGE_SIZE;
11041+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11042+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11043+ vma->vm_ops = &pax_vm_ops;
11044+
11045+ ret = insert_vm_struct(current->mm, vma);
11046+ if (ret)
11047+ return ret;
11048+
11049+ ++current->mm->total_vm;
11050+ return 0;
11051+}
11052+#endif
11053+
11054+/*
11055+ * PaX: decide what to do with offenders (regs->pc = fault address)
11056+ *
11057+ * returns 1 when task should be killed
11058+ * 2 when patched PLT trampoline was detected
11059+ * 3 when unpatched PLT trampoline was detected
11060+ */
11061+static int pax_handle_fetch_fault(struct pt_regs *regs)
11062+{
11063+
11064+#ifdef CONFIG_PAX_EMUPLT
11065+ int err;
11066+
11067+ do { /* PaX: patched PLT emulation #1 */
11068+ unsigned int sethi1, sethi2, jmpl;
11069+
11070+ err = get_user(sethi1, (unsigned int *)regs->pc);
11071+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11072+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11073+
11074+ if (err)
11075+ break;
11076+
11077+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11078+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11079+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11080+ {
11081+ unsigned int addr;
11082+
11083+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11084+ addr = regs->u_regs[UREG_G1];
11085+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11086+ regs->pc = addr;
11087+ regs->npc = addr+4;
11088+ return 2;
11089+ }
11090+ } while (0);
11091+
11092+ do { /* PaX: patched PLT emulation #2 */
11093+ unsigned int ba;
11094+
11095+ err = get_user(ba, (unsigned int *)regs->pc);
11096+
11097+ if (err)
11098+ break;
11099+
11100+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11101+ unsigned int addr;
11102+
11103+ if ((ba & 0xFFC00000U) == 0x30800000U)
11104+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11105+ else
11106+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11107+ regs->pc = addr;
11108+ regs->npc = addr+4;
11109+ return 2;
11110+ }
11111+ } while (0);
11112+
11113+ do { /* PaX: patched PLT emulation #3 */
11114+ unsigned int sethi, bajmpl, nop;
11115+
11116+ err = get_user(sethi, (unsigned int *)regs->pc);
11117+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11118+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11119+
11120+ if (err)
11121+ break;
11122+
11123+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11124+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11125+ nop == 0x01000000U)
11126+ {
11127+ unsigned int addr;
11128+
11129+ addr = (sethi & 0x003FFFFFU) << 10;
11130+ regs->u_regs[UREG_G1] = addr;
11131+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11132+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11133+ else
11134+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11135+ regs->pc = addr;
11136+ regs->npc = addr+4;
11137+ return 2;
11138+ }
11139+ } while (0);
11140+
11141+ do { /* PaX: unpatched PLT emulation step 1 */
11142+ unsigned int sethi, ba, nop;
11143+
11144+ err = get_user(sethi, (unsigned int *)regs->pc);
11145+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11146+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11147+
11148+ if (err)
11149+ break;
11150+
11151+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11152+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11153+ nop == 0x01000000U)
11154+ {
11155+ unsigned int addr, save, call;
11156+
11157+ if ((ba & 0xFFC00000U) == 0x30800000U)
11158+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11159+ else
11160+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11161+
11162+ err = get_user(save, (unsigned int *)addr);
11163+ err |= get_user(call, (unsigned int *)(addr+4));
11164+ err |= get_user(nop, (unsigned int *)(addr+8));
11165+ if (err)
11166+ break;
11167+
11168+#ifdef CONFIG_PAX_DLRESOLVE
11169+ if (save == 0x9DE3BFA8U &&
11170+ (call & 0xC0000000U) == 0x40000000U &&
11171+ nop == 0x01000000U)
11172+ {
11173+ struct vm_area_struct *vma;
11174+ unsigned long call_dl_resolve;
11175+
11176+ down_read(&current->mm->mmap_sem);
11177+ call_dl_resolve = current->mm->call_dl_resolve;
11178+ up_read(&current->mm->mmap_sem);
11179+ if (likely(call_dl_resolve))
11180+ goto emulate;
11181+
11182+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11183+
11184+ down_write(&current->mm->mmap_sem);
11185+ if (current->mm->call_dl_resolve) {
11186+ call_dl_resolve = current->mm->call_dl_resolve;
11187+ up_write(&current->mm->mmap_sem);
11188+ if (vma)
11189+ kmem_cache_free(vm_area_cachep, vma);
11190+ goto emulate;
11191+ }
11192+
11193+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11194+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11195+ up_write(&current->mm->mmap_sem);
11196+ if (vma)
11197+ kmem_cache_free(vm_area_cachep, vma);
11198+ return 1;
11199+ }
11200+
11201+ if (pax_insert_vma(vma, call_dl_resolve)) {
11202+ up_write(&current->mm->mmap_sem);
11203+ kmem_cache_free(vm_area_cachep, vma);
11204+ return 1;
11205+ }
11206+
11207+ current->mm->call_dl_resolve = call_dl_resolve;
11208+ up_write(&current->mm->mmap_sem);
11209+
11210+emulate:
11211+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11212+ regs->pc = call_dl_resolve;
11213+ regs->npc = addr+4;
11214+ return 3;
11215+ }
11216+#endif
11217+
11218+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11219+ if ((save & 0xFFC00000U) == 0x05000000U &&
11220+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11221+ nop == 0x01000000U)
11222+ {
11223+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11224+ regs->u_regs[UREG_G2] = addr + 4;
11225+ addr = (save & 0x003FFFFFU) << 10;
11226+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11227+ regs->pc = addr;
11228+ regs->npc = addr+4;
11229+ return 3;
11230+ }
11231+ }
11232+ } while (0);
11233+
11234+ do { /* PaX: unpatched PLT emulation step 2 */
11235+ unsigned int save, call, nop;
11236+
11237+ err = get_user(save, (unsigned int *)(regs->pc-4));
11238+ err |= get_user(call, (unsigned int *)regs->pc);
11239+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11240+ if (err)
11241+ break;
11242+
11243+ if (save == 0x9DE3BFA8U &&
11244+ (call & 0xC0000000U) == 0x40000000U &&
11245+ nop == 0x01000000U)
11246+ {
11247+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11248+
11249+ regs->u_regs[UREG_RETPC] = regs->pc;
11250+ regs->pc = dl_resolve;
11251+ regs->npc = dl_resolve+4;
11252+ return 3;
11253+ }
11254+ } while (0);
11255+#endif
11256+
11257+ return 1;
11258+}
11259+
11260+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11261+{
11262+ unsigned long i;
11263+
11264+ printk(KERN_ERR "PAX: bytes at PC: ");
11265+ for (i = 0; i < 8; i++) {
11266+ unsigned int c;
11267+ if (get_user(c, (unsigned int *)pc+i))
11268+ printk(KERN_CONT "???????? ");
11269+ else
11270+ printk(KERN_CONT "%08x ", c);
11271+ }
11272+ printk("\n");
11273+}
11274+#endif
11275+
11276 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11277 int text_fault)
11278 {
11279@@ -229,6 +503,24 @@ good_area:
11280 if (!(vma->vm_flags & VM_WRITE))
11281 goto bad_area;
11282 } else {
11283+
11284+#ifdef CONFIG_PAX_PAGEEXEC
11285+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11286+ up_read(&mm->mmap_sem);
11287+ switch (pax_handle_fetch_fault(regs)) {
11288+
11289+#ifdef CONFIG_PAX_EMUPLT
11290+ case 2:
11291+ case 3:
11292+ return;
11293+#endif
11294+
11295+ }
11296+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11297+ do_group_exit(SIGKILL);
11298+ }
11299+#endif
11300+
11301 /* Allow reads even for write-only mappings */
11302 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11303 goto bad_area;
11304diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11305index 69bb818..6ca35c8 100644
11306--- a/arch/sparc/mm/fault_64.c
11307+++ b/arch/sparc/mm/fault_64.c
11308@@ -22,6 +22,9 @@
11309 #include <linux/kdebug.h>
11310 #include <linux/percpu.h>
11311 #include <linux/context_tracking.h>
11312+#include <linux/slab.h>
11313+#include <linux/pagemap.h>
11314+#include <linux/compiler.h>
11315
11316 #include <asm/page.h>
11317 #include <asm/pgtable.h>
11318@@ -75,7 +78,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11319 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11320 regs->tpc);
11321 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11322- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11323+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11324 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11325 dump_stack();
11326 unhandled_fault(regs->tpc, current, regs);
11327@@ -271,6 +274,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
11328 show_regs(regs);
11329 }
11330
11331+#ifdef CONFIG_PAX_PAGEEXEC
11332+#ifdef CONFIG_PAX_DLRESOLVE
11333+static void pax_emuplt_close(struct vm_area_struct *vma)
11334+{
11335+ vma->vm_mm->call_dl_resolve = 0UL;
11336+}
11337+
11338+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11339+{
11340+ unsigned int *kaddr;
11341+
11342+ vmf->page = alloc_page(GFP_HIGHUSER);
11343+ if (!vmf->page)
11344+ return VM_FAULT_OOM;
11345+
11346+ kaddr = kmap(vmf->page);
11347+ memset(kaddr, 0, PAGE_SIZE);
11348+ kaddr[0] = 0x9DE3BFA8U; /* save */
11349+ flush_dcache_page(vmf->page);
11350+ kunmap(vmf->page);
11351+ return VM_FAULT_MAJOR;
11352+}
11353+
11354+static const struct vm_operations_struct pax_vm_ops = {
11355+ .close = pax_emuplt_close,
11356+ .fault = pax_emuplt_fault
11357+};
11358+
11359+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11360+{
11361+ int ret;
11362+
11363+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11364+ vma->vm_mm = current->mm;
11365+ vma->vm_start = addr;
11366+ vma->vm_end = addr + PAGE_SIZE;
11367+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11368+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11369+ vma->vm_ops = &pax_vm_ops;
11370+
11371+ ret = insert_vm_struct(current->mm, vma);
11372+ if (ret)
11373+ return ret;
11374+
11375+ ++current->mm->total_vm;
11376+ return 0;
11377+}
11378+#endif
11379+
11380+/*
11381+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11382+ *
11383+ * returns 1 when task should be killed
11384+ * 2 when patched PLT trampoline was detected
11385+ * 3 when unpatched PLT trampoline was detected
11386+ */
11387+static int pax_handle_fetch_fault(struct pt_regs *regs)
11388+{
11389+
11390+#ifdef CONFIG_PAX_EMUPLT
11391+ int err;
11392+
11393+ do { /* PaX: patched PLT emulation #1 */
11394+ unsigned int sethi1, sethi2, jmpl;
11395+
11396+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11397+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11398+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11399+
11400+ if (err)
11401+ break;
11402+
11403+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11404+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11405+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11406+ {
11407+ unsigned long addr;
11408+
11409+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11410+ addr = regs->u_regs[UREG_G1];
11411+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11412+
11413+ if (test_thread_flag(TIF_32BIT))
11414+ addr &= 0xFFFFFFFFUL;
11415+
11416+ regs->tpc = addr;
11417+ regs->tnpc = addr+4;
11418+ return 2;
11419+ }
11420+ } while (0);
11421+
11422+ do { /* PaX: patched PLT emulation #2 */
11423+ unsigned int ba;
11424+
11425+ err = get_user(ba, (unsigned int *)regs->tpc);
11426+
11427+ if (err)
11428+ break;
11429+
11430+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11431+ unsigned long addr;
11432+
11433+ if ((ba & 0xFFC00000U) == 0x30800000U)
11434+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11435+ else
11436+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11437+
11438+ if (test_thread_flag(TIF_32BIT))
11439+ addr &= 0xFFFFFFFFUL;
11440+
11441+ regs->tpc = addr;
11442+ regs->tnpc = addr+4;
11443+ return 2;
11444+ }
11445+ } while (0);
11446+
11447+ do { /* PaX: patched PLT emulation #3 */
11448+ unsigned int sethi, bajmpl, nop;
11449+
11450+ err = get_user(sethi, (unsigned int *)regs->tpc);
11451+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11452+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11453+
11454+ if (err)
11455+ break;
11456+
11457+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11458+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11459+ nop == 0x01000000U)
11460+ {
11461+ unsigned long addr;
11462+
11463+ addr = (sethi & 0x003FFFFFU) << 10;
11464+ regs->u_regs[UREG_G1] = addr;
11465+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11466+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11467+ else
11468+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11469+
11470+ if (test_thread_flag(TIF_32BIT))
11471+ addr &= 0xFFFFFFFFUL;
11472+
11473+ regs->tpc = addr;
11474+ regs->tnpc = addr+4;
11475+ return 2;
11476+ }
11477+ } while (0);
11478+
11479+ do { /* PaX: patched PLT emulation #4 */
11480+ unsigned int sethi, mov1, call, mov2;
11481+
11482+ err = get_user(sethi, (unsigned int *)regs->tpc);
11483+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11484+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11485+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11486+
11487+ if (err)
11488+ break;
11489+
11490+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11491+ mov1 == 0x8210000FU &&
11492+ (call & 0xC0000000U) == 0x40000000U &&
11493+ mov2 == 0x9E100001U)
11494+ {
11495+ unsigned long addr;
11496+
11497+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11498+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11499+
11500+ if (test_thread_flag(TIF_32BIT))
11501+ addr &= 0xFFFFFFFFUL;
11502+
11503+ regs->tpc = addr;
11504+ regs->tnpc = addr+4;
11505+ return 2;
11506+ }
11507+ } while (0);
11508+
11509+ do { /* PaX: patched PLT emulation #5 */
11510+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11511+
11512+ err = get_user(sethi, (unsigned int *)regs->tpc);
11513+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11514+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11515+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11516+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11517+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11518+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11519+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11520+
11521+ if (err)
11522+ break;
11523+
11524+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11525+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11526+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11527+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11528+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11529+ sllx == 0x83287020U &&
11530+ jmpl == 0x81C04005U &&
11531+ nop == 0x01000000U)
11532+ {
11533+ unsigned long addr;
11534+
11535+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11536+ regs->u_regs[UREG_G1] <<= 32;
11537+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11538+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11539+ regs->tpc = addr;
11540+ regs->tnpc = addr+4;
11541+ return 2;
11542+ }
11543+ } while (0);
11544+
11545+ do { /* PaX: patched PLT emulation #6 */
11546+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11547+
11548+ err = get_user(sethi, (unsigned int *)regs->tpc);
11549+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11550+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11551+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11552+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11553+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11554+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11555+
11556+ if (err)
11557+ break;
11558+
11559+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11560+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11561+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11562+ sllx == 0x83287020U &&
11563+ (or & 0xFFFFE000U) == 0x8A116000U &&
11564+ jmpl == 0x81C04005U &&
11565+ nop == 0x01000000U)
11566+ {
11567+ unsigned long addr;
11568+
11569+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11570+ regs->u_regs[UREG_G1] <<= 32;
11571+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11572+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11573+ regs->tpc = addr;
11574+ regs->tnpc = addr+4;
11575+ return 2;
11576+ }
11577+ } while (0);
11578+
11579+ do { /* PaX: unpatched PLT emulation step 1 */
11580+ unsigned int sethi, ba, nop;
11581+
11582+ err = get_user(sethi, (unsigned int *)regs->tpc);
11583+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11584+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11585+
11586+ if (err)
11587+ break;
11588+
11589+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11590+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11591+ nop == 0x01000000U)
11592+ {
11593+ unsigned long addr;
11594+ unsigned int save, call;
11595+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11596+
11597+ if ((ba & 0xFFC00000U) == 0x30800000U)
11598+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11599+ else
11600+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11601+
11602+ if (test_thread_flag(TIF_32BIT))
11603+ addr &= 0xFFFFFFFFUL;
11604+
11605+ err = get_user(save, (unsigned int *)addr);
11606+ err |= get_user(call, (unsigned int *)(addr+4));
11607+ err |= get_user(nop, (unsigned int *)(addr+8));
11608+ if (err)
11609+ break;
11610+
11611+#ifdef CONFIG_PAX_DLRESOLVE
11612+ if (save == 0x9DE3BFA8U &&
11613+ (call & 0xC0000000U) == 0x40000000U &&
11614+ nop == 0x01000000U)
11615+ {
11616+ struct vm_area_struct *vma;
11617+ unsigned long call_dl_resolve;
11618+
11619+ down_read(&current->mm->mmap_sem);
11620+ call_dl_resolve = current->mm->call_dl_resolve;
11621+ up_read(&current->mm->mmap_sem);
11622+ if (likely(call_dl_resolve))
11623+ goto emulate;
11624+
11625+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11626+
11627+ down_write(&current->mm->mmap_sem);
11628+ if (current->mm->call_dl_resolve) {
11629+ call_dl_resolve = current->mm->call_dl_resolve;
11630+ up_write(&current->mm->mmap_sem);
11631+ if (vma)
11632+ kmem_cache_free(vm_area_cachep, vma);
11633+ goto emulate;
11634+ }
11635+
11636+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11637+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11638+ up_write(&current->mm->mmap_sem);
11639+ if (vma)
11640+ kmem_cache_free(vm_area_cachep, vma);
11641+ return 1;
11642+ }
11643+
11644+ if (pax_insert_vma(vma, call_dl_resolve)) {
11645+ up_write(&current->mm->mmap_sem);
11646+ kmem_cache_free(vm_area_cachep, vma);
11647+ return 1;
11648+ }
11649+
11650+ current->mm->call_dl_resolve = call_dl_resolve;
11651+ up_write(&current->mm->mmap_sem);
11652+
11653+emulate:
11654+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11655+ regs->tpc = call_dl_resolve;
11656+ regs->tnpc = addr+4;
11657+ return 3;
11658+ }
11659+#endif
11660+
11661+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11662+ if ((save & 0xFFC00000U) == 0x05000000U &&
11663+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11664+ nop == 0x01000000U)
11665+ {
11666+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11667+ regs->u_regs[UREG_G2] = addr + 4;
11668+ addr = (save & 0x003FFFFFU) << 10;
11669+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11670+
11671+ if (test_thread_flag(TIF_32BIT))
11672+ addr &= 0xFFFFFFFFUL;
11673+
11674+ regs->tpc = addr;
11675+ regs->tnpc = addr+4;
11676+ return 3;
11677+ }
11678+
11679+ /* PaX: 64-bit PLT stub */
11680+ err = get_user(sethi1, (unsigned int *)addr);
11681+ err |= get_user(sethi2, (unsigned int *)(addr+4));
11682+ err |= get_user(or1, (unsigned int *)(addr+8));
11683+ err |= get_user(or2, (unsigned int *)(addr+12));
11684+ err |= get_user(sllx, (unsigned int *)(addr+16));
11685+ err |= get_user(add, (unsigned int *)(addr+20));
11686+ err |= get_user(jmpl, (unsigned int *)(addr+24));
11687+ err |= get_user(nop, (unsigned int *)(addr+28));
11688+ if (err)
11689+ break;
11690+
11691+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
11692+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11693+ (or1 & 0xFFFFE000U) == 0x88112000U &&
11694+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11695+ sllx == 0x89293020U &&
11696+ add == 0x8A010005U &&
11697+ jmpl == 0x89C14000U &&
11698+ nop == 0x01000000U)
11699+ {
11700+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11701+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11702+ regs->u_regs[UREG_G4] <<= 32;
11703+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11704+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
11705+ regs->u_regs[UREG_G4] = addr + 24;
11706+ addr = regs->u_regs[UREG_G5];
11707+ regs->tpc = addr;
11708+ regs->tnpc = addr+4;
11709+ return 3;
11710+ }
11711+ }
11712+ } while (0);
11713+
11714+#ifdef CONFIG_PAX_DLRESOLVE
11715+ do { /* PaX: unpatched PLT emulation step 2 */
11716+ unsigned int save, call, nop;
11717+
11718+ err = get_user(save, (unsigned int *)(regs->tpc-4));
11719+ err |= get_user(call, (unsigned int *)regs->tpc);
11720+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
11721+ if (err)
11722+ break;
11723+
11724+ if (save == 0x9DE3BFA8U &&
11725+ (call & 0xC0000000U) == 0x40000000U &&
11726+ nop == 0x01000000U)
11727+ {
11728+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11729+
11730+ if (test_thread_flag(TIF_32BIT))
11731+ dl_resolve &= 0xFFFFFFFFUL;
11732+
11733+ regs->u_regs[UREG_RETPC] = regs->tpc;
11734+ regs->tpc = dl_resolve;
11735+ regs->tnpc = dl_resolve+4;
11736+ return 3;
11737+ }
11738+ } while (0);
11739+#endif
11740+
11741+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
11742+ unsigned int sethi, ba, nop;
11743+
11744+ err = get_user(sethi, (unsigned int *)regs->tpc);
11745+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11746+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11747+
11748+ if (err)
11749+ break;
11750+
11751+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11752+ (ba & 0xFFF00000U) == 0x30600000U &&
11753+ nop == 0x01000000U)
11754+ {
11755+ unsigned long addr;
11756+
11757+ addr = (sethi & 0x003FFFFFU) << 10;
11758+ regs->u_regs[UREG_G1] = addr;
11759+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11760+
11761+ if (test_thread_flag(TIF_32BIT))
11762+ addr &= 0xFFFFFFFFUL;
11763+
11764+ regs->tpc = addr;
11765+ regs->tnpc = addr+4;
11766+ return 2;
11767+ }
11768+ } while (0);
11769+
11770+#endif
11771+
11772+ return 1;
11773+}
11774+
11775+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11776+{
11777+ unsigned long i;
11778+
11779+ printk(KERN_ERR "PAX: bytes at PC: ");
11780+ for (i = 0; i < 8; i++) {
11781+ unsigned int c;
11782+ if (get_user(c, (unsigned int *)pc+i))
11783+ printk(KERN_CONT "???????? ");
11784+ else
11785+ printk(KERN_CONT "%08x ", c);
11786+ }
11787+ printk("\n");
11788+}
11789+#endif
11790+
11791 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
11792 {
11793 enum ctx_state prev_state = exception_enter();
11794@@ -344,6 +807,29 @@ retry:
11795 if (!vma)
11796 goto bad_area;
11797
11798+#ifdef CONFIG_PAX_PAGEEXEC
11799+ /* PaX: detect ITLB misses on non-exec pages */
11800+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
11801+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
11802+ {
11803+ if (address != regs->tpc)
11804+ goto good_area;
11805+
11806+ up_read(&mm->mmap_sem);
11807+ switch (pax_handle_fetch_fault(regs)) {
11808+
11809+#ifdef CONFIG_PAX_EMUPLT
11810+ case 2:
11811+ case 3:
11812+ return;
11813+#endif
11814+
11815+ }
11816+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
11817+ do_group_exit(SIGKILL);
11818+ }
11819+#endif
11820+
11821 /* Pure DTLB misses do not tell us whether the fault causing
11822 * load/store/atomic was a write or not, it only says that there
11823 * was no match. So in such a case we (carefully) read the
11824diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
11825index 3096317..a7b7654 100644
11826--- a/arch/sparc/mm/hugetlbpage.c
11827+++ b/arch/sparc/mm/hugetlbpage.c
11828@@ -26,7 +26,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11829 unsigned long addr,
11830 unsigned long len,
11831 unsigned long pgoff,
11832- unsigned long flags)
11833+ unsigned long flags,
11834+ unsigned long offset)
11835 {
11836 unsigned long task_size = TASK_SIZE;
11837 struct vm_unmapped_area_info info;
11838@@ -36,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11839
11840 info.flags = 0;
11841 info.length = len;
11842- info.low_limit = TASK_UNMAPPED_BASE;
11843+ info.low_limit = mm->mmap_base;
11844 info.high_limit = min(task_size, VA_EXCLUDE_START);
11845 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11846 info.align_offset = 0;
11847+ info.threadstack_offset = offset;
11848 addr = vm_unmapped_area(&info);
11849
11850 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11851 VM_BUG_ON(addr != -ENOMEM);
11852 info.low_limit = VA_EXCLUDE_END;
11853+
11854+#ifdef CONFIG_PAX_RANDMMAP
11855+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11856+ info.low_limit += mm->delta_mmap;
11857+#endif
11858+
11859 info.high_limit = task_size;
11860 addr = vm_unmapped_area(&info);
11861 }
11862@@ -56,7 +64,8 @@ static unsigned long
11863 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11864 const unsigned long len,
11865 const unsigned long pgoff,
11866- const unsigned long flags)
11867+ const unsigned long flags,
11868+ const unsigned long offset)
11869 {
11870 struct mm_struct *mm = current->mm;
11871 unsigned long addr = addr0;
11872@@ -71,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11873 info.high_limit = mm->mmap_base;
11874 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11875 info.align_offset = 0;
11876+ info.threadstack_offset = offset;
11877 addr = vm_unmapped_area(&info);
11878
11879 /*
11880@@ -83,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11881 VM_BUG_ON(addr != -ENOMEM);
11882 info.flags = 0;
11883 info.low_limit = TASK_UNMAPPED_BASE;
11884+
11885+#ifdef CONFIG_PAX_RANDMMAP
11886+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11887+ info.low_limit += mm->delta_mmap;
11888+#endif
11889+
11890 info.high_limit = STACK_TOP32;
11891 addr = vm_unmapped_area(&info);
11892 }
11893@@ -97,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11894 struct mm_struct *mm = current->mm;
11895 struct vm_area_struct *vma;
11896 unsigned long task_size = TASK_SIZE;
11897+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
11898
11899 if (test_thread_flag(TIF_32BIT))
11900 task_size = STACK_TOP32;
11901@@ -112,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11902 return addr;
11903 }
11904
11905+#ifdef CONFIG_PAX_RANDMMAP
11906+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11907+#endif
11908+
11909 if (addr) {
11910 addr = ALIGN(addr, HPAGE_SIZE);
11911 vma = find_vma(mm, addr);
11912- if (task_size - len >= addr &&
11913- (!vma || addr + len <= vma->vm_start))
11914+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11915 return addr;
11916 }
11917 if (mm->get_unmapped_area == arch_get_unmapped_area)
11918 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
11919- pgoff, flags);
11920+ pgoff, flags, offset);
11921 else
11922 return hugetlb_get_unmapped_area_topdown(file, addr, len,
11923- pgoff, flags);
11924+ pgoff, flags, offset);
11925 }
11926
11927 pte_t *huge_pte_alloc(struct mm_struct *mm,
11928diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
11929index 5322e53..f820c5e 100644
11930--- a/arch/sparc/mm/init_64.c
11931+++ b/arch/sparc/mm/init_64.c
11932@@ -188,9 +188,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
11933 int num_kernel_image_mappings;
11934
11935 #ifdef CONFIG_DEBUG_DCFLUSH
11936-atomic_t dcpage_flushes = ATOMIC_INIT(0);
11937+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
11938 #ifdef CONFIG_SMP
11939-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
11940+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
11941 #endif
11942 #endif
11943
11944@@ -198,7 +198,7 @@ inline void flush_dcache_page_impl(struct page *page)
11945 {
11946 BUG_ON(tlb_type == hypervisor);
11947 #ifdef CONFIG_DEBUG_DCFLUSH
11948- atomic_inc(&dcpage_flushes);
11949+ atomic_inc_unchecked(&dcpage_flushes);
11950 #endif
11951
11952 #ifdef DCACHE_ALIASING_POSSIBLE
11953@@ -466,10 +466,10 @@ void mmu_info(struct seq_file *m)
11954
11955 #ifdef CONFIG_DEBUG_DCFLUSH
11956 seq_printf(m, "DCPageFlushes\t: %d\n",
11957- atomic_read(&dcpage_flushes));
11958+ atomic_read_unchecked(&dcpage_flushes));
11959 #ifdef CONFIG_SMP
11960 seq_printf(m, "DCPageFlushesXC\t: %d\n",
11961- atomic_read(&dcpage_flushes_xcall));
11962+ atomic_read_unchecked(&dcpage_flushes_xcall));
11963 #endif /* CONFIG_SMP */
11964 #endif /* CONFIG_DEBUG_DCFLUSH */
11965 }
11966diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
11967index b3692ce..e4517c9 100644
11968--- a/arch/tile/Kconfig
11969+++ b/arch/tile/Kconfig
11970@@ -184,6 +184,7 @@ source "kernel/Kconfig.hz"
11971
11972 config KEXEC
11973 bool "kexec system call"
11974+ depends on !GRKERNSEC_KMEM
11975 ---help---
11976 kexec is a system call that implements the ability to shutdown your
11977 current kernel, and to start another kernel. It is like a reboot
11978diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
11979index ad220ee..2f537b3 100644
11980--- a/arch/tile/include/asm/atomic_64.h
11981+++ b/arch/tile/include/asm/atomic_64.h
11982@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
11983
11984 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
11985
11986+#define atomic64_read_unchecked(v) atomic64_read(v)
11987+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
11988+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
11989+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
11990+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
11991+#define atomic64_inc_unchecked(v) atomic64_inc(v)
11992+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
11993+#define atomic64_dec_unchecked(v) atomic64_dec(v)
11994+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
11995+
11996 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
11997 #define smp_mb__before_atomic_dec() smp_mb()
11998 #define smp_mb__after_atomic_dec() smp_mb()
11999diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12000index 6160761..00cac88 100644
12001--- a/arch/tile/include/asm/cache.h
12002+++ b/arch/tile/include/asm/cache.h
12003@@ -15,11 +15,12 @@
12004 #ifndef _ASM_TILE_CACHE_H
12005 #define _ASM_TILE_CACHE_H
12006
12007+#include <linux/const.h>
12008 #include <arch/chip.h>
12009
12010 /* bytes per L1 data cache line */
12011 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12012-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12013+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12014
12015 /* bytes per L2 cache line */
12016 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12017diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12018index b6cde32..c0cb736 100644
12019--- a/arch/tile/include/asm/uaccess.h
12020+++ b/arch/tile/include/asm/uaccess.h
12021@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12022 const void __user *from,
12023 unsigned long n)
12024 {
12025- int sz = __compiletime_object_size(to);
12026+ size_t sz = __compiletime_object_size(to);
12027
12028- if (likely(sz == -1 || sz >= n))
12029+ if (likely(sz == (size_t)-1 || sz >= n))
12030 n = _copy_from_user(to, from, n);
12031 else
12032 copy_from_user_overflow();
12033diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12034index 0cb3bba..7338b2d 100644
12035--- a/arch/tile/mm/hugetlbpage.c
12036+++ b/arch/tile/mm/hugetlbpage.c
12037@@ -212,6 +212,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12038 info.high_limit = TASK_SIZE;
12039 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12040 info.align_offset = 0;
12041+ info.threadstack_offset = 0;
12042 return vm_unmapped_area(&info);
12043 }
12044
12045@@ -229,6 +230,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12046 info.high_limit = current->mm->mmap_base;
12047 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12048 info.align_offset = 0;
12049+ info.threadstack_offset = 0;
12050 addr = vm_unmapped_area(&info);
12051
12052 /*
12053diff --git a/arch/um/Makefile b/arch/um/Makefile
12054index 36e658a..71a5c5a 100644
12055--- a/arch/um/Makefile
12056+++ b/arch/um/Makefile
12057@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12058 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12059 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12060
12061+ifdef CONSTIFY_PLUGIN
12062+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12063+endif
12064+
12065 #This will adjust *FLAGS accordingly to the platform.
12066 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12067
12068diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12069index 19e1bdd..3665b77 100644
12070--- a/arch/um/include/asm/cache.h
12071+++ b/arch/um/include/asm/cache.h
12072@@ -1,6 +1,7 @@
12073 #ifndef __UM_CACHE_H
12074 #define __UM_CACHE_H
12075
12076+#include <linux/const.h>
12077
12078 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12079 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12080@@ -12,6 +13,6 @@
12081 # define L1_CACHE_SHIFT 5
12082 #endif
12083
12084-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12085+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12086
12087 #endif
12088diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12089index 2e0a6b1..a64d0f5 100644
12090--- a/arch/um/include/asm/kmap_types.h
12091+++ b/arch/um/include/asm/kmap_types.h
12092@@ -8,6 +8,6 @@
12093
12094 /* No more #include "asm/arch/kmap_types.h" ! */
12095
12096-#define KM_TYPE_NR 14
12097+#define KM_TYPE_NR 15
12098
12099 #endif
12100diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12101index 5ff53d9..5850cdf 100644
12102--- a/arch/um/include/asm/page.h
12103+++ b/arch/um/include/asm/page.h
12104@@ -14,6 +14,9 @@
12105 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12106 #define PAGE_MASK (~(PAGE_SIZE-1))
12107
12108+#define ktla_ktva(addr) (addr)
12109+#define ktva_ktla(addr) (addr)
12110+
12111 #ifndef __ASSEMBLY__
12112
12113 struct page;
12114diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12115index 0032f92..cd151e0 100644
12116--- a/arch/um/include/asm/pgtable-3level.h
12117+++ b/arch/um/include/asm/pgtable-3level.h
12118@@ -58,6 +58,7 @@
12119 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12120 #define pud_populate(mm, pud, pmd) \
12121 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12122+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12123
12124 #ifdef CONFIG_64BIT
12125 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12126diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12127index eecc414..48adb87 100644
12128--- a/arch/um/kernel/process.c
12129+++ b/arch/um/kernel/process.c
12130@@ -356,22 +356,6 @@ int singlestepping(void * t)
12131 return 2;
12132 }
12133
12134-/*
12135- * Only x86 and x86_64 have an arch_align_stack().
12136- * All other arches have "#define arch_align_stack(x) (x)"
12137- * in their asm/system.h
12138- * As this is included in UML from asm-um/system-generic.h,
12139- * we can use it to behave as the subarch does.
12140- */
12141-#ifndef arch_align_stack
12142-unsigned long arch_align_stack(unsigned long sp)
12143-{
12144- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12145- sp -= get_random_int() % 8192;
12146- return sp & ~0xf;
12147-}
12148-#endif
12149-
12150 unsigned long get_wchan(struct task_struct *p)
12151 {
12152 unsigned long stack_page, sp, ip;
12153diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12154index ad8f795..2c7eec6 100644
12155--- a/arch/unicore32/include/asm/cache.h
12156+++ b/arch/unicore32/include/asm/cache.h
12157@@ -12,8 +12,10 @@
12158 #ifndef __UNICORE_CACHE_H__
12159 #define __UNICORE_CACHE_H__
12160
12161-#define L1_CACHE_SHIFT (5)
12162-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12163+#include <linux/const.h>
12164+
12165+#define L1_CACHE_SHIFT 5
12166+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12167
12168 /*
12169 * Memory returned by kmalloc() may be used for DMA, so we must make
12170diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12171index 0952ecd..9cf578c 100644
12172--- a/arch/x86/Kconfig
12173+++ b/arch/x86/Kconfig
12174@@ -249,7 +249,7 @@ config X86_HT
12175
12176 config X86_32_LAZY_GS
12177 def_bool y
12178- depends on X86_32 && !CC_STACKPROTECTOR
12179+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12180
12181 config ARCH_HWEIGHT_CFLAGS
12182 string
12183@@ -602,6 +602,7 @@ config SCHED_OMIT_FRAME_POINTER
12184
12185 menuconfig HYPERVISOR_GUEST
12186 bool "Linux guest support"
12187+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12188 ---help---
12189 Say Y here to enable options for running Linux under various hyper-
12190 visors. This option enables basic hypervisor detection and platform
12191@@ -1127,7 +1128,7 @@ choice
12192
12193 config NOHIGHMEM
12194 bool "off"
12195- depends on !X86_NUMAQ
12196+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12197 ---help---
12198 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12199 However, the address space of 32-bit x86 processors is only 4
12200@@ -1164,7 +1165,7 @@ config NOHIGHMEM
12201
12202 config HIGHMEM4G
12203 bool "4GB"
12204- depends on !X86_NUMAQ
12205+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12206 ---help---
12207 Select this if you have a 32-bit processor and between 1 and 4
12208 gigabytes of physical RAM.
12209@@ -1217,7 +1218,7 @@ config PAGE_OFFSET
12210 hex
12211 default 0xB0000000 if VMSPLIT_3G_OPT
12212 default 0x80000000 if VMSPLIT_2G
12213- default 0x78000000 if VMSPLIT_2G_OPT
12214+ default 0x70000000 if VMSPLIT_2G_OPT
12215 default 0x40000000 if VMSPLIT_1G
12216 default 0xC0000000
12217 depends on X86_32
12218@@ -1619,6 +1620,7 @@ config SECCOMP
12219
12220 config CC_STACKPROTECTOR
12221 bool "Enable -fstack-protector buffer overflow detection"
12222+ depends on X86_64 || !PAX_MEMORY_UDEREF
12223 ---help---
12224 This option turns on the -fstack-protector GCC feature. This
12225 feature puts, at the beginning of functions, a canary value on
12226@@ -1637,6 +1639,7 @@ source kernel/Kconfig.hz
12227
12228 config KEXEC
12229 bool "kexec system call"
12230+ depends on !GRKERNSEC_KMEM
12231 ---help---
12232 kexec is a system call that implements the ability to shutdown your
12233 current kernel, and to start another kernel. It is like a reboot
12234@@ -1738,6 +1741,8 @@ config X86_NEED_RELOCS
12235 config PHYSICAL_ALIGN
12236 hex "Alignment value to which kernel should be aligned"
12237 default "0x1000000"
12238+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12239+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12240 range 0x2000 0x1000000 if X86_32
12241 range 0x200000 0x1000000 if X86_64
12242 ---help---
12243@@ -1817,9 +1822,10 @@ config DEBUG_HOTPLUG_CPU0
12244 If unsure, say N.
12245
12246 config COMPAT_VDSO
12247- def_bool y
12248+ def_bool n
12249 prompt "Compat VDSO support"
12250 depends on X86_32 || IA32_EMULATION
12251+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12252 ---help---
12253 Map the 32-bit VDSO to the predictable old-style address too.
12254
12255diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12256index c026cca..14657ae 100644
12257--- a/arch/x86/Kconfig.cpu
12258+++ b/arch/x86/Kconfig.cpu
12259@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12260
12261 config X86_F00F_BUG
12262 def_bool y
12263- depends on M586MMX || M586TSC || M586 || M486
12264+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12265
12266 config X86_INVD_BUG
12267 def_bool y
12268@@ -327,7 +327,7 @@ config X86_INVD_BUG
12269
12270 config X86_ALIGNMENT_16
12271 def_bool y
12272- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12273+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12274
12275 config X86_INTEL_USERCOPY
12276 def_bool y
12277@@ -373,7 +373,7 @@ config X86_CMPXCHG64
12278 # generates cmov.
12279 config X86_CMOV
12280 def_bool y
12281- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12282+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12283
12284 config X86_MINIMUM_CPU_FAMILY
12285 int
12286diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12287index 0f3621e..282f24b 100644
12288--- a/arch/x86/Kconfig.debug
12289+++ b/arch/x86/Kconfig.debug
12290@@ -84,7 +84,7 @@ config X86_PTDUMP
12291 config DEBUG_RODATA
12292 bool "Write protect kernel read-only data structures"
12293 default y
12294- depends on DEBUG_KERNEL
12295+ depends on DEBUG_KERNEL && BROKEN
12296 ---help---
12297 Mark the kernel read-only data as write-protected in the pagetables,
12298 in order to catch accidental (and incorrect) writes to such const
12299@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
12300
12301 config DEBUG_SET_MODULE_RONX
12302 bool "Set loadable kernel module data as NX and text as RO"
12303- depends on MODULES
12304+ depends on MODULES && BROKEN
12305 ---help---
12306 This option helps catch unintended modifications to loadable
12307 kernel module's text and read-only data. It also prevents execution
12308diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12309index 57d0215..b4373fb 100644
12310--- a/arch/x86/Makefile
12311+++ b/arch/x86/Makefile
12312@@ -49,14 +49,12 @@ ifeq ($(CONFIG_X86_32),y)
12313 # CPU-specific tuning. Anything which can be shared with UML should go here.
12314 include $(srctree)/arch/x86/Makefile_32.cpu
12315 KBUILD_CFLAGS += $(cflags-y)
12316-
12317- # temporary until string.h is fixed
12318- KBUILD_CFLAGS += -ffreestanding
12319 else
12320 BITS := 64
12321 UTS_MACHINE := x86_64
12322 CHECKFLAGS += -D__x86_64__ -m64
12323
12324+ biarch := $(call cc-option,-m64)
12325 KBUILD_AFLAGS += -m64
12326 KBUILD_CFLAGS += -m64
12327
12328@@ -89,6 +87,9 @@ else
12329 KBUILD_CFLAGS += -maccumulate-outgoing-args
12330 endif
12331
12332+# temporary until string.h is fixed
12333+KBUILD_CFLAGS += -ffreestanding
12334+
12335 ifdef CONFIG_CC_STACKPROTECTOR
12336 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12337 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
12338@@ -247,3 +248,12 @@ define archhelp
12339 echo ' FDINITRD=file initrd for the booted kernel'
12340 echo ' kvmconfig - Enable additional options for guest kernel support'
12341 endef
12342+
12343+define OLD_LD
12344+
12345+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12346+*** Please upgrade your binutils to 2.18 or newer
12347+endef
12348+
12349+archprepare:
12350+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12351diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12352index d9c1195..a26ca0d 100644
12353--- a/arch/x86/boot/Makefile
12354+++ b/arch/x86/boot/Makefile
12355@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
12356 $(call cc-option, -fno-unit-at-a-time)) \
12357 $(call cc-option, -fno-stack-protector) \
12358 $(call cc-option, -mpreferred-stack-boundary=2)
12359+ifdef CONSTIFY_PLUGIN
12360+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12361+endif
12362 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12363 GCOV_PROFILE := n
12364
12365diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12366index 878e4b9..20537ab 100644
12367--- a/arch/x86/boot/bitops.h
12368+++ b/arch/x86/boot/bitops.h
12369@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12370 u8 v;
12371 const u32 *p = (const u32 *)addr;
12372
12373- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12374+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12375 return v;
12376 }
12377
12378@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12379
12380 static inline void set_bit(int nr, void *addr)
12381 {
12382- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12383+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12384 }
12385
12386 #endif /* BOOT_BITOPS_H */
12387diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12388index ef72bae..353a184 100644
12389--- a/arch/x86/boot/boot.h
12390+++ b/arch/x86/boot/boot.h
12391@@ -85,7 +85,7 @@ static inline void io_delay(void)
12392 static inline u16 ds(void)
12393 {
12394 u16 seg;
12395- asm("movw %%ds,%0" : "=rm" (seg));
12396+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12397 return seg;
12398 }
12399
12400@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
12401 static inline int memcmp(const void *s1, const void *s2, size_t len)
12402 {
12403 u8 diff;
12404- asm("repe; cmpsb; setnz %0"
12405+ asm volatile("repe; cmpsb; setnz %0"
12406 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
12407 return diff;
12408 }
12409diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12410index c8a6792..2402765 100644
12411--- a/arch/x86/boot/compressed/Makefile
12412+++ b/arch/x86/boot/compressed/Makefile
12413@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
12414 KBUILD_CFLAGS += -mno-mmx -mno-sse
12415 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12416 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12417+ifdef CONSTIFY_PLUGIN
12418+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12419+endif
12420
12421 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12422 GCOV_PROFILE := n
12423diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12424index a53440e..c3dbf1e 100644
12425--- a/arch/x86/boot/compressed/efi_stub_32.S
12426+++ b/arch/x86/boot/compressed/efi_stub_32.S
12427@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12428 * parameter 2, ..., param n. To make things easy, we save the return
12429 * address of efi_call_phys in a global variable.
12430 */
12431- popl %ecx
12432- movl %ecx, saved_return_addr(%edx)
12433- /* get the function pointer into ECX*/
12434- popl %ecx
12435- movl %ecx, efi_rt_function_ptr(%edx)
12436+ popl saved_return_addr(%edx)
12437+ popl efi_rt_function_ptr(%edx)
12438
12439 /*
12440 * 3. Call the physical function.
12441 */
12442- call *%ecx
12443+ call *efi_rt_function_ptr(%edx)
12444
12445 /*
12446 * 4. Balance the stack. And because EAX contain the return value,
12447@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12448 1: popl %edx
12449 subl $1b, %edx
12450
12451- movl efi_rt_function_ptr(%edx), %ecx
12452- pushl %ecx
12453+ pushl efi_rt_function_ptr(%edx)
12454
12455 /*
12456 * 10. Push the saved return address onto the stack and return.
12457 */
12458- movl saved_return_addr(%edx), %ecx
12459- pushl %ecx
12460- ret
12461+ jmpl *saved_return_addr(%edx)
12462 ENDPROC(efi_call_phys)
12463 .previous
12464
12465diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12466index 5d6f689..9d06730 100644
12467--- a/arch/x86/boot/compressed/head_32.S
12468+++ b/arch/x86/boot/compressed/head_32.S
12469@@ -118,7 +118,7 @@ preferred_addr:
12470 notl %eax
12471 andl %eax, %ebx
12472 #else
12473- movl $LOAD_PHYSICAL_ADDR, %ebx
12474+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12475 #endif
12476
12477 /* Target address to relocate to for decompression */
12478diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12479index c337422..2c5be72 100644
12480--- a/arch/x86/boot/compressed/head_64.S
12481+++ b/arch/x86/boot/compressed/head_64.S
12482@@ -95,7 +95,7 @@ ENTRY(startup_32)
12483 notl %eax
12484 andl %eax, %ebx
12485 #else
12486- movl $LOAD_PHYSICAL_ADDR, %ebx
12487+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12488 #endif
12489
12490 /* Target address to relocate to for decompression */
12491@@ -270,7 +270,7 @@ preferred_addr:
12492 notq %rax
12493 andq %rax, %rbp
12494 #else
12495- movq $LOAD_PHYSICAL_ADDR, %rbp
12496+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12497 #endif
12498
12499 /* Target address to relocate to for decompression */
12500@@ -362,8 +362,8 @@ gdt:
12501 .long gdt
12502 .word 0
12503 .quad 0x0000000000000000 /* NULL descriptor */
12504- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12505- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12506+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12507+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12508 .quad 0x0080890000000000 /* TS descriptor */
12509 .quad 0x0000000000000000 /* TS continued */
12510 gdt_end:
12511diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12512index 434f077..b6b4b38 100644
12513--- a/arch/x86/boot/compressed/misc.c
12514+++ b/arch/x86/boot/compressed/misc.c
12515@@ -283,7 +283,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12516 * Calculate the delta between where vmlinux was linked to load
12517 * and where it was actually loaded.
12518 */
12519- delta = min_addr - LOAD_PHYSICAL_ADDR;
12520+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12521 if (!delta) {
12522 debug_putstr("No relocation needed... ");
12523 return;
12524@@ -380,7 +380,7 @@ static void parse_elf(void *output)
12525 case PT_LOAD:
12526 #ifdef CONFIG_RELOCATABLE
12527 dest = output;
12528- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12529+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12530 #else
12531 dest = (void *)(phdr->p_paddr);
12532 #endif
12533@@ -432,7 +432,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
12534 error("Destination address too large");
12535 #endif
12536 #ifndef CONFIG_RELOCATABLE
12537- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12538+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12539 error("Wrong destination address");
12540 #endif
12541
12542diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12543index 4d3ff03..e4972ff 100644
12544--- a/arch/x86/boot/cpucheck.c
12545+++ b/arch/x86/boot/cpucheck.c
12546@@ -74,7 +74,7 @@ static int has_fpu(void)
12547 u16 fcw = -1, fsw = -1;
12548 u32 cr0;
12549
12550- asm("movl %%cr0,%0" : "=r" (cr0));
12551+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
12552 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
12553 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
12554 asm volatile("movl %0,%%cr0" : : "r" (cr0));
12555@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
12556 {
12557 u32 f0, f1;
12558
12559- asm("pushfl ; "
12560+ asm volatile("pushfl ; "
12561 "pushfl ; "
12562 "popl %0 ; "
12563 "movl %0,%1 ; "
12564@@ -115,7 +115,7 @@ static void get_flags(void)
12565 set_bit(X86_FEATURE_FPU, cpu.flags);
12566
12567 if (has_eflag(X86_EFLAGS_ID)) {
12568- asm("cpuid"
12569+ asm volatile("cpuid"
12570 : "=a" (max_intel_level),
12571 "=b" (cpu_vendor[0]),
12572 "=d" (cpu_vendor[1]),
12573@@ -124,7 +124,7 @@ static void get_flags(void)
12574
12575 if (max_intel_level >= 0x00000001 &&
12576 max_intel_level <= 0x0000ffff) {
12577- asm("cpuid"
12578+ asm volatile("cpuid"
12579 : "=a" (tfms),
12580 "=c" (cpu.flags[4]),
12581 "=d" (cpu.flags[0])
12582@@ -136,7 +136,7 @@ static void get_flags(void)
12583 cpu.model += ((tfms >> 16) & 0xf) << 4;
12584 }
12585
12586- asm("cpuid"
12587+ asm volatile("cpuid"
12588 : "=a" (max_amd_level)
12589 : "a" (0x80000000)
12590 : "ebx", "ecx", "edx");
12591@@ -144,7 +144,7 @@ static void get_flags(void)
12592 if (max_amd_level >= 0x80000001 &&
12593 max_amd_level <= 0x8000ffff) {
12594 u32 eax = 0x80000001;
12595- asm("cpuid"
12596+ asm volatile("cpuid"
12597 : "+a" (eax),
12598 "=c" (cpu.flags[6]),
12599 "=d" (cpu.flags[1])
12600@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12601 u32 ecx = MSR_K7_HWCR;
12602 u32 eax, edx;
12603
12604- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12605+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12606 eax &= ~(1 << 15);
12607- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12608+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12609
12610 get_flags(); /* Make sure it really did something */
12611 err = check_flags();
12612@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12613 u32 ecx = MSR_VIA_FCR;
12614 u32 eax, edx;
12615
12616- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12617+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12618 eax |= (1<<1)|(1<<7);
12619- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12620+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12621
12622 set_bit(X86_FEATURE_CX8, cpu.flags);
12623 err = check_flags();
12624@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12625 u32 eax, edx;
12626 u32 level = 1;
12627
12628- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12629- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12630- asm("cpuid"
12631+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12632+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12633+ asm volatile("cpuid"
12634 : "+a" (level), "=d" (cpu.flags[0])
12635 : : "ecx", "ebx");
12636- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12637+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12638
12639 err = check_flags();
12640 }
12641diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12642index 9ec06a1..2c25e79 100644
12643--- a/arch/x86/boot/header.S
12644+++ b/arch/x86/boot/header.S
12645@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12646 # single linked list of
12647 # struct setup_data
12648
12649-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12650+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12651
12652 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12653+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12654+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12655+#else
12656 #define VO_INIT_SIZE (VO__end - VO__text)
12657+#endif
12658 #if ZO_INIT_SIZE > VO_INIT_SIZE
12659 #define INIT_SIZE ZO_INIT_SIZE
12660 #else
12661diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12662index db75d07..8e6d0af 100644
12663--- a/arch/x86/boot/memory.c
12664+++ b/arch/x86/boot/memory.c
12665@@ -19,7 +19,7 @@
12666
12667 static int detect_memory_e820(void)
12668 {
12669- int count = 0;
12670+ unsigned int count = 0;
12671 struct biosregs ireg, oreg;
12672 struct e820entry *desc = boot_params.e820_map;
12673 static struct e820entry buf; /* static so it is zeroed */
12674diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12675index 11e8c6e..fdbb1ed 100644
12676--- a/arch/x86/boot/video-vesa.c
12677+++ b/arch/x86/boot/video-vesa.c
12678@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
12679
12680 boot_params.screen_info.vesapm_seg = oreg.es;
12681 boot_params.screen_info.vesapm_off = oreg.di;
12682+ boot_params.screen_info.vesapm_size = oreg.cx;
12683 }
12684
12685 /*
12686diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
12687index 43eda28..5ab5fdb 100644
12688--- a/arch/x86/boot/video.c
12689+++ b/arch/x86/boot/video.c
12690@@ -96,7 +96,7 @@ static void store_mode_params(void)
12691 static unsigned int get_entry(void)
12692 {
12693 char entry_buf[4];
12694- int i, len = 0;
12695+ unsigned int i, len = 0;
12696 int key;
12697 unsigned int v;
12698
12699diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
12700index 9105655..41779c1 100644
12701--- a/arch/x86/crypto/aes-x86_64-asm_64.S
12702+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
12703@@ -8,6 +8,8 @@
12704 * including this sentence is retained in full.
12705 */
12706
12707+#include <asm/alternative-asm.h>
12708+
12709 .extern crypto_ft_tab
12710 .extern crypto_it_tab
12711 .extern crypto_fl_tab
12712@@ -70,6 +72,8 @@
12713 je B192; \
12714 leaq 32(r9),r9;
12715
12716+#define ret pax_force_retaddr; ret
12717+
12718 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
12719 movq r1,r2; \
12720 movq r3,r4; \
12721diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
12722index 477e9d7..c92c7d8 100644
12723--- a/arch/x86/crypto/aesni-intel_asm.S
12724+++ b/arch/x86/crypto/aesni-intel_asm.S
12725@@ -31,6 +31,7 @@
12726
12727 #include <linux/linkage.h>
12728 #include <asm/inst.h>
12729+#include <asm/alternative-asm.h>
12730
12731 #ifdef __x86_64__
12732 .data
12733@@ -205,7 +206,7 @@ enc: .octa 0x2
12734 * num_initial_blocks = b mod 4
12735 * encrypt the initial num_initial_blocks blocks and apply ghash on
12736 * the ciphertext
12737-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12738+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12739 * are clobbered
12740 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12741 */
12742@@ -214,8 +215,8 @@ enc: .octa 0x2
12743 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
12744 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
12745 mov arg7, %r10 # %r10 = AAD
12746- mov arg8, %r12 # %r12 = aadLen
12747- mov %r12, %r11
12748+ mov arg8, %r15 # %r15 = aadLen
12749+ mov %r15, %r11
12750 pxor %xmm\i, %xmm\i
12751 _get_AAD_loop\num_initial_blocks\operation:
12752 movd (%r10), \TMP1
12753@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
12754 psrldq $4, %xmm\i
12755 pxor \TMP1, %xmm\i
12756 add $4, %r10
12757- sub $4, %r12
12758+ sub $4, %r15
12759 jne _get_AAD_loop\num_initial_blocks\operation
12760 cmp $16, %r11
12761 je _get_AAD_loop2_done\num_initial_blocks\operation
12762- mov $16, %r12
12763+ mov $16, %r15
12764 _get_AAD_loop2\num_initial_blocks\operation:
12765 psrldq $4, %xmm\i
12766- sub $4, %r12
12767- cmp %r11, %r12
12768+ sub $4, %r15
12769+ cmp %r11, %r15
12770 jne _get_AAD_loop2\num_initial_blocks\operation
12771 _get_AAD_loop2_done\num_initial_blocks\operation:
12772 movdqa SHUF_MASK(%rip), %xmm14
12773@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
12774 * num_initial_blocks = b mod 4
12775 * encrypt the initial num_initial_blocks blocks and apply ghash on
12776 * the ciphertext
12777-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12778+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12779 * are clobbered
12780 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12781 */
12782@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
12783 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
12784 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
12785 mov arg7, %r10 # %r10 = AAD
12786- mov arg8, %r12 # %r12 = aadLen
12787- mov %r12, %r11
12788+ mov arg8, %r15 # %r15 = aadLen
12789+ mov %r15, %r11
12790 pxor %xmm\i, %xmm\i
12791 _get_AAD_loop\num_initial_blocks\operation:
12792 movd (%r10), \TMP1
12793@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
12794 psrldq $4, %xmm\i
12795 pxor \TMP1, %xmm\i
12796 add $4, %r10
12797- sub $4, %r12
12798+ sub $4, %r15
12799 jne _get_AAD_loop\num_initial_blocks\operation
12800 cmp $16, %r11
12801 je _get_AAD_loop2_done\num_initial_blocks\operation
12802- mov $16, %r12
12803+ mov $16, %r15
12804 _get_AAD_loop2\num_initial_blocks\operation:
12805 psrldq $4, %xmm\i
12806- sub $4, %r12
12807- cmp %r11, %r12
12808+ sub $4, %r15
12809+ cmp %r11, %r15
12810 jne _get_AAD_loop2\num_initial_blocks\operation
12811 _get_AAD_loop2_done\num_initial_blocks\operation:
12812 movdqa SHUF_MASK(%rip), %xmm14
12813@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
12814 *
12815 *****************************************************************************/
12816 ENTRY(aesni_gcm_dec)
12817- push %r12
12818+ push %r15
12819 push %r13
12820 push %r14
12821 mov %rsp, %r14
12822@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
12823 */
12824 sub $VARIABLE_OFFSET, %rsp
12825 and $~63, %rsp # align rsp to 64 bytes
12826- mov %arg6, %r12
12827- movdqu (%r12), %xmm13 # %xmm13 = HashKey
12828+ mov %arg6, %r15
12829+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
12830 movdqa SHUF_MASK(%rip), %xmm2
12831 PSHUFB_XMM %xmm2, %xmm13
12832
12833@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
12834 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
12835 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
12836 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
12837- mov %r13, %r12
12838- and $(3<<4), %r12
12839+ mov %r13, %r15
12840+ and $(3<<4), %r15
12841 jz _initial_num_blocks_is_0_decrypt
12842- cmp $(2<<4), %r12
12843+ cmp $(2<<4), %r15
12844 jb _initial_num_blocks_is_1_decrypt
12845 je _initial_num_blocks_is_2_decrypt
12846 _initial_num_blocks_is_3_decrypt:
12847@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
12848 sub $16, %r11
12849 add %r13, %r11
12850 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
12851- lea SHIFT_MASK+16(%rip), %r12
12852- sub %r13, %r12
12853+ lea SHIFT_MASK+16(%rip), %r15
12854+ sub %r13, %r15
12855 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
12856 # (%r13 is the number of bytes in plaintext mod 16)
12857- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
12858+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
12859 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
12860
12861 movdqa %xmm1, %xmm2
12862 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
12863- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
12864+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
12865 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
12866 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
12867 pand %xmm1, %xmm2
12868@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
12869 sub $1, %r13
12870 jne _less_than_8_bytes_left_decrypt
12871 _multiple_of_16_bytes_decrypt:
12872- mov arg8, %r12 # %r13 = aadLen (number of bytes)
12873- shl $3, %r12 # convert into number of bits
12874- movd %r12d, %xmm15 # len(A) in %xmm15
12875+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
12876+ shl $3, %r15 # convert into number of bits
12877+ movd %r15d, %xmm15 # len(A) in %xmm15
12878 shl $3, %arg4 # len(C) in bits (*128)
12879 MOVQ_R64_XMM %arg4, %xmm1
12880 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
12881@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
12882 mov %r14, %rsp
12883 pop %r14
12884 pop %r13
12885- pop %r12
12886+ pop %r15
12887+ pax_force_retaddr
12888 ret
12889 ENDPROC(aesni_gcm_dec)
12890
12891@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
12892 * poly = x^128 + x^127 + x^126 + x^121 + 1
12893 ***************************************************************************/
12894 ENTRY(aesni_gcm_enc)
12895- push %r12
12896+ push %r15
12897 push %r13
12898 push %r14
12899 mov %rsp, %r14
12900@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
12901 #
12902 sub $VARIABLE_OFFSET, %rsp
12903 and $~63, %rsp
12904- mov %arg6, %r12
12905- movdqu (%r12), %xmm13
12906+ mov %arg6, %r15
12907+ movdqu (%r15), %xmm13
12908 movdqa SHUF_MASK(%rip), %xmm2
12909 PSHUFB_XMM %xmm2, %xmm13
12910
12911@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
12912 movdqa %xmm13, HashKey(%rsp)
12913 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
12914 and $-16, %r13
12915- mov %r13, %r12
12916+ mov %r13, %r15
12917
12918 # Encrypt first few blocks
12919
12920- and $(3<<4), %r12
12921+ and $(3<<4), %r15
12922 jz _initial_num_blocks_is_0_encrypt
12923- cmp $(2<<4), %r12
12924+ cmp $(2<<4), %r15
12925 jb _initial_num_blocks_is_1_encrypt
12926 je _initial_num_blocks_is_2_encrypt
12927 _initial_num_blocks_is_3_encrypt:
12928@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
12929 sub $16, %r11
12930 add %r13, %r11
12931 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
12932- lea SHIFT_MASK+16(%rip), %r12
12933- sub %r13, %r12
12934+ lea SHIFT_MASK+16(%rip), %r15
12935+ sub %r13, %r15
12936 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
12937 # (%r13 is the number of bytes in plaintext mod 16)
12938- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
12939+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
12940 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
12941 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
12942- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
12943+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
12944 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
12945 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
12946 movdqa SHUF_MASK(%rip), %xmm10
12947@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
12948 sub $1, %r13
12949 jne _less_than_8_bytes_left_encrypt
12950 _multiple_of_16_bytes_encrypt:
12951- mov arg8, %r12 # %r12 = addLen (number of bytes)
12952- shl $3, %r12
12953- movd %r12d, %xmm15 # len(A) in %xmm15
12954+ mov arg8, %r15 # %r15 = addLen (number of bytes)
12955+ shl $3, %r15
12956+ movd %r15d, %xmm15 # len(A) in %xmm15
12957 shl $3, %arg4 # len(C) in bits (*128)
12958 MOVQ_R64_XMM %arg4, %xmm1
12959 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
12960@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
12961 mov %r14, %rsp
12962 pop %r14
12963 pop %r13
12964- pop %r12
12965+ pop %r15
12966+ pax_force_retaddr
12967 ret
12968 ENDPROC(aesni_gcm_enc)
12969
12970@@ -1722,6 +1725,7 @@ _key_expansion_256a:
12971 pxor %xmm1, %xmm0
12972 movaps %xmm0, (TKEYP)
12973 add $0x10, TKEYP
12974+ pax_force_retaddr
12975 ret
12976 ENDPROC(_key_expansion_128)
12977 ENDPROC(_key_expansion_256a)
12978@@ -1748,6 +1752,7 @@ _key_expansion_192a:
12979 shufps $0b01001110, %xmm2, %xmm1
12980 movaps %xmm1, 0x10(TKEYP)
12981 add $0x20, TKEYP
12982+ pax_force_retaddr
12983 ret
12984 ENDPROC(_key_expansion_192a)
12985
12986@@ -1768,6 +1773,7 @@ _key_expansion_192b:
12987
12988 movaps %xmm0, (TKEYP)
12989 add $0x10, TKEYP
12990+ pax_force_retaddr
12991 ret
12992 ENDPROC(_key_expansion_192b)
12993
12994@@ -1781,6 +1787,7 @@ _key_expansion_256b:
12995 pxor %xmm1, %xmm2
12996 movaps %xmm2, (TKEYP)
12997 add $0x10, TKEYP
12998+ pax_force_retaddr
12999 ret
13000 ENDPROC(_key_expansion_256b)
13001
13002@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13003 #ifndef __x86_64__
13004 popl KEYP
13005 #endif
13006+ pax_force_retaddr
13007 ret
13008 ENDPROC(aesni_set_key)
13009
13010@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13011 popl KLEN
13012 popl KEYP
13013 #endif
13014+ pax_force_retaddr
13015 ret
13016 ENDPROC(aesni_enc)
13017
13018@@ -1974,6 +1983,7 @@ _aesni_enc1:
13019 AESENC KEY STATE
13020 movaps 0x70(TKEYP), KEY
13021 AESENCLAST KEY STATE
13022+ pax_force_retaddr
13023 ret
13024 ENDPROC(_aesni_enc1)
13025
13026@@ -2083,6 +2093,7 @@ _aesni_enc4:
13027 AESENCLAST KEY STATE2
13028 AESENCLAST KEY STATE3
13029 AESENCLAST KEY STATE4
13030+ pax_force_retaddr
13031 ret
13032 ENDPROC(_aesni_enc4)
13033
13034@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13035 popl KLEN
13036 popl KEYP
13037 #endif
13038+ pax_force_retaddr
13039 ret
13040 ENDPROC(aesni_dec)
13041
13042@@ -2164,6 +2176,7 @@ _aesni_dec1:
13043 AESDEC KEY STATE
13044 movaps 0x70(TKEYP), KEY
13045 AESDECLAST KEY STATE
13046+ pax_force_retaddr
13047 ret
13048 ENDPROC(_aesni_dec1)
13049
13050@@ -2273,6 +2286,7 @@ _aesni_dec4:
13051 AESDECLAST KEY STATE2
13052 AESDECLAST KEY STATE3
13053 AESDECLAST KEY STATE4
13054+ pax_force_retaddr
13055 ret
13056 ENDPROC(_aesni_dec4)
13057
13058@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13059 popl KEYP
13060 popl LEN
13061 #endif
13062+ pax_force_retaddr
13063 ret
13064 ENDPROC(aesni_ecb_enc)
13065
13066@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13067 popl KEYP
13068 popl LEN
13069 #endif
13070+ pax_force_retaddr
13071 ret
13072 ENDPROC(aesni_ecb_dec)
13073
13074@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13075 popl LEN
13076 popl IVP
13077 #endif
13078+ pax_force_retaddr
13079 ret
13080 ENDPROC(aesni_cbc_enc)
13081
13082@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13083 popl LEN
13084 popl IVP
13085 #endif
13086+ pax_force_retaddr
13087 ret
13088 ENDPROC(aesni_cbc_dec)
13089
13090@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13091 mov $1, TCTR_LOW
13092 MOVQ_R64_XMM TCTR_LOW INC
13093 MOVQ_R64_XMM CTR TCTR_LOW
13094+ pax_force_retaddr
13095 ret
13096 ENDPROC(_aesni_inc_init)
13097
13098@@ -2579,6 +2598,7 @@ _aesni_inc:
13099 .Linc_low:
13100 movaps CTR, IV
13101 PSHUFB_XMM BSWAP_MASK IV
13102+ pax_force_retaddr
13103 ret
13104 ENDPROC(_aesni_inc)
13105
13106@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13107 .Lctr_enc_ret:
13108 movups IV, (IVP)
13109 .Lctr_enc_just_ret:
13110+ pax_force_retaddr
13111 ret
13112 ENDPROC(aesni_ctr_enc)
13113
13114@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13115 pxor INC, STATE4
13116 movdqu STATE4, 0x70(OUTP)
13117
13118+ pax_force_retaddr
13119 ret
13120 ENDPROC(aesni_xts_crypt8)
13121
13122diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13123index 246c670..466e2d6 100644
13124--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13125+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13126@@ -21,6 +21,7 @@
13127 */
13128
13129 #include <linux/linkage.h>
13130+#include <asm/alternative-asm.h>
13131
13132 .file "blowfish-x86_64-asm.S"
13133 .text
13134@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13135 jnz .L__enc_xor;
13136
13137 write_block();
13138+ pax_force_retaddr
13139 ret;
13140 .L__enc_xor:
13141 xor_block();
13142+ pax_force_retaddr
13143 ret;
13144 ENDPROC(__blowfish_enc_blk)
13145
13146@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13147
13148 movq %r11, %rbp;
13149
13150+ pax_force_retaddr
13151 ret;
13152 ENDPROC(blowfish_dec_blk)
13153
13154@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13155
13156 popq %rbx;
13157 popq %rbp;
13158+ pax_force_retaddr
13159 ret;
13160
13161 .L__enc_xor4:
13162@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13163
13164 popq %rbx;
13165 popq %rbp;
13166+ pax_force_retaddr
13167 ret;
13168 ENDPROC(__blowfish_enc_blk_4way)
13169
13170@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13171 popq %rbx;
13172 popq %rbp;
13173
13174+ pax_force_retaddr
13175 ret;
13176 ENDPROC(blowfish_dec_blk_4way)
13177diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13178index ce71f92..1dce7ec 100644
13179--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13180+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13181@@ -16,6 +16,7 @@
13182 */
13183
13184 #include <linux/linkage.h>
13185+#include <asm/alternative-asm.h>
13186
13187 #define CAMELLIA_TABLE_BYTE_LEN 272
13188
13189@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13190 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13191 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13192 %rcx, (%r9));
13193+ pax_force_retaddr
13194 ret;
13195 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13196
13197@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13198 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13199 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13200 %rax, (%r9));
13201+ pax_force_retaddr
13202 ret;
13203 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13204
13205@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13206 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13207 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13208
13209+ pax_force_retaddr
13210 ret;
13211
13212 .align 8
13213@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13214 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13215 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13216
13217+ pax_force_retaddr
13218 ret;
13219
13220 .align 8
13221@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13222 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13223 %xmm8, %rsi);
13224
13225+ pax_force_retaddr
13226 ret;
13227 ENDPROC(camellia_ecb_enc_16way)
13228
13229@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13230 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13231 %xmm8, %rsi);
13232
13233+ pax_force_retaddr
13234 ret;
13235 ENDPROC(camellia_ecb_dec_16way)
13236
13237@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13238 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13239 %xmm8, %rsi);
13240
13241+ pax_force_retaddr
13242 ret;
13243 ENDPROC(camellia_cbc_dec_16way)
13244
13245@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13246 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13247 %xmm8, %rsi);
13248
13249+ pax_force_retaddr
13250 ret;
13251 ENDPROC(camellia_ctr_16way)
13252
13253@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13254 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13255 %xmm8, %rsi);
13256
13257+ pax_force_retaddr
13258 ret;
13259 ENDPROC(camellia_xts_crypt_16way)
13260
13261diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13262index 0e0b886..5a3123c 100644
13263--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13264+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13265@@ -11,6 +11,7 @@
13266 */
13267
13268 #include <linux/linkage.h>
13269+#include <asm/alternative-asm.h>
13270
13271 #define CAMELLIA_TABLE_BYTE_LEN 272
13272
13273@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13274 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13275 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13276 %rcx, (%r9));
13277+ pax_force_retaddr
13278 ret;
13279 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13280
13281@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13282 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13283 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13284 %rax, (%r9));
13285+ pax_force_retaddr
13286 ret;
13287 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13288
13289@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13290 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13291 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13292
13293+ pax_force_retaddr
13294 ret;
13295
13296 .align 8
13297@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13298 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13299 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13300
13301+ pax_force_retaddr
13302 ret;
13303
13304 .align 8
13305@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13306
13307 vzeroupper;
13308
13309+ pax_force_retaddr
13310 ret;
13311 ENDPROC(camellia_ecb_enc_32way)
13312
13313@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13314
13315 vzeroupper;
13316
13317+ pax_force_retaddr
13318 ret;
13319 ENDPROC(camellia_ecb_dec_32way)
13320
13321@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13322
13323 vzeroupper;
13324
13325+ pax_force_retaddr
13326 ret;
13327 ENDPROC(camellia_cbc_dec_32way)
13328
13329@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13330
13331 vzeroupper;
13332
13333+ pax_force_retaddr
13334 ret;
13335 ENDPROC(camellia_ctr_32way)
13336
13337@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13338
13339 vzeroupper;
13340
13341+ pax_force_retaddr
13342 ret;
13343 ENDPROC(camellia_xts_crypt_32way)
13344
13345diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13346index 310319c..db3d7b5 100644
13347--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13348+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13349@@ -21,6 +21,7 @@
13350 */
13351
13352 #include <linux/linkage.h>
13353+#include <asm/alternative-asm.h>
13354
13355 .file "camellia-x86_64-asm_64.S"
13356 .text
13357@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13358 enc_outunpack(mov, RT1);
13359
13360 movq RRBP, %rbp;
13361+ pax_force_retaddr
13362 ret;
13363
13364 .L__enc_xor:
13365 enc_outunpack(xor, RT1);
13366
13367 movq RRBP, %rbp;
13368+ pax_force_retaddr
13369 ret;
13370 ENDPROC(__camellia_enc_blk)
13371
13372@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13373 dec_outunpack();
13374
13375 movq RRBP, %rbp;
13376+ pax_force_retaddr
13377 ret;
13378 ENDPROC(camellia_dec_blk)
13379
13380@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13381
13382 movq RRBP, %rbp;
13383 popq %rbx;
13384+ pax_force_retaddr
13385 ret;
13386
13387 .L__enc2_xor:
13388@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13389
13390 movq RRBP, %rbp;
13391 popq %rbx;
13392+ pax_force_retaddr
13393 ret;
13394 ENDPROC(__camellia_enc_blk_2way)
13395
13396@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13397
13398 movq RRBP, %rbp;
13399 movq RXOR, %rbx;
13400+ pax_force_retaddr
13401 ret;
13402 ENDPROC(camellia_dec_blk_2way)
13403diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13404index c35fd5d..2d8c7db 100644
13405--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13406+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13407@@ -24,6 +24,7 @@
13408 */
13409
13410 #include <linux/linkage.h>
13411+#include <asm/alternative-asm.h>
13412
13413 .file "cast5-avx-x86_64-asm_64.S"
13414
13415@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13416 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13417 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13418
13419+ pax_force_retaddr
13420 ret;
13421 ENDPROC(__cast5_enc_blk16)
13422
13423@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13424 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13425 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13426
13427+ pax_force_retaddr
13428 ret;
13429
13430 .L__skip_dec:
13431@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13432 vmovdqu RR4, (6*4*4)(%r11);
13433 vmovdqu RL4, (7*4*4)(%r11);
13434
13435+ pax_force_retaddr
13436 ret;
13437 ENDPROC(cast5_ecb_enc_16way)
13438
13439@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13440 vmovdqu RR4, (6*4*4)(%r11);
13441 vmovdqu RL4, (7*4*4)(%r11);
13442
13443+ pax_force_retaddr
13444 ret;
13445 ENDPROC(cast5_ecb_dec_16way)
13446
13447@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13448 * %rdx: src
13449 */
13450
13451- pushq %r12;
13452+ pushq %r14;
13453
13454 movq %rsi, %r11;
13455- movq %rdx, %r12;
13456+ movq %rdx, %r14;
13457
13458 vmovdqu (0*16)(%rdx), RL1;
13459 vmovdqu (1*16)(%rdx), RR1;
13460@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13461 call __cast5_dec_blk16;
13462
13463 /* xor with src */
13464- vmovq (%r12), RX;
13465+ vmovq (%r14), RX;
13466 vpshufd $0x4f, RX, RX;
13467 vpxor RX, RR1, RR1;
13468- vpxor 0*16+8(%r12), RL1, RL1;
13469- vpxor 1*16+8(%r12), RR2, RR2;
13470- vpxor 2*16+8(%r12), RL2, RL2;
13471- vpxor 3*16+8(%r12), RR3, RR3;
13472- vpxor 4*16+8(%r12), RL3, RL3;
13473- vpxor 5*16+8(%r12), RR4, RR4;
13474- vpxor 6*16+8(%r12), RL4, RL4;
13475+ vpxor 0*16+8(%r14), RL1, RL1;
13476+ vpxor 1*16+8(%r14), RR2, RR2;
13477+ vpxor 2*16+8(%r14), RL2, RL2;
13478+ vpxor 3*16+8(%r14), RR3, RR3;
13479+ vpxor 4*16+8(%r14), RL3, RL3;
13480+ vpxor 5*16+8(%r14), RR4, RR4;
13481+ vpxor 6*16+8(%r14), RL4, RL4;
13482
13483 vmovdqu RR1, (0*16)(%r11);
13484 vmovdqu RL1, (1*16)(%r11);
13485@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13486 vmovdqu RR4, (6*16)(%r11);
13487 vmovdqu RL4, (7*16)(%r11);
13488
13489- popq %r12;
13490+ popq %r14;
13491
13492+ pax_force_retaddr
13493 ret;
13494 ENDPROC(cast5_cbc_dec_16way)
13495
13496@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13497 * %rcx: iv (big endian, 64bit)
13498 */
13499
13500- pushq %r12;
13501+ pushq %r14;
13502
13503 movq %rsi, %r11;
13504- movq %rdx, %r12;
13505+ movq %rdx, %r14;
13506
13507 vpcmpeqd RTMP, RTMP, RTMP;
13508 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13509@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13510 call __cast5_enc_blk16;
13511
13512 /* dst = src ^ iv */
13513- vpxor (0*16)(%r12), RR1, RR1;
13514- vpxor (1*16)(%r12), RL1, RL1;
13515- vpxor (2*16)(%r12), RR2, RR2;
13516- vpxor (3*16)(%r12), RL2, RL2;
13517- vpxor (4*16)(%r12), RR3, RR3;
13518- vpxor (5*16)(%r12), RL3, RL3;
13519- vpxor (6*16)(%r12), RR4, RR4;
13520- vpxor (7*16)(%r12), RL4, RL4;
13521+ vpxor (0*16)(%r14), RR1, RR1;
13522+ vpxor (1*16)(%r14), RL1, RL1;
13523+ vpxor (2*16)(%r14), RR2, RR2;
13524+ vpxor (3*16)(%r14), RL2, RL2;
13525+ vpxor (4*16)(%r14), RR3, RR3;
13526+ vpxor (5*16)(%r14), RL3, RL3;
13527+ vpxor (6*16)(%r14), RR4, RR4;
13528+ vpxor (7*16)(%r14), RL4, RL4;
13529 vmovdqu RR1, (0*16)(%r11);
13530 vmovdqu RL1, (1*16)(%r11);
13531 vmovdqu RR2, (2*16)(%r11);
13532@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13533 vmovdqu RR4, (6*16)(%r11);
13534 vmovdqu RL4, (7*16)(%r11);
13535
13536- popq %r12;
13537+ popq %r14;
13538
13539+ pax_force_retaddr
13540 ret;
13541 ENDPROC(cast5_ctr_16way)
13542diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13543index e3531f8..e123f35 100644
13544--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13545+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13546@@ -24,6 +24,7 @@
13547 */
13548
13549 #include <linux/linkage.h>
13550+#include <asm/alternative-asm.h>
13551 #include "glue_helper-asm-avx.S"
13552
13553 .file "cast6-avx-x86_64-asm_64.S"
13554@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13555 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13556 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13557
13558+ pax_force_retaddr
13559 ret;
13560 ENDPROC(__cast6_enc_blk8)
13561
13562@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13563 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13564 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13565
13566+ pax_force_retaddr
13567 ret;
13568 ENDPROC(__cast6_dec_blk8)
13569
13570@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13571
13572 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13573
13574+ pax_force_retaddr
13575 ret;
13576 ENDPROC(cast6_ecb_enc_8way)
13577
13578@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13579
13580 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13581
13582+ pax_force_retaddr
13583 ret;
13584 ENDPROC(cast6_ecb_dec_8way)
13585
13586@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13587 * %rdx: src
13588 */
13589
13590- pushq %r12;
13591+ pushq %r14;
13592
13593 movq %rsi, %r11;
13594- movq %rdx, %r12;
13595+ movq %rdx, %r14;
13596
13597 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13598
13599 call __cast6_dec_blk8;
13600
13601- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13602+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13603
13604- popq %r12;
13605+ popq %r14;
13606
13607+ pax_force_retaddr
13608 ret;
13609 ENDPROC(cast6_cbc_dec_8way)
13610
13611@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13612 * %rcx: iv (little endian, 128bit)
13613 */
13614
13615- pushq %r12;
13616+ pushq %r14;
13617
13618 movq %rsi, %r11;
13619- movq %rdx, %r12;
13620+ movq %rdx, %r14;
13621
13622 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13623 RD2, RX, RKR, RKM);
13624
13625 call __cast6_enc_blk8;
13626
13627- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13628+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13629
13630- popq %r12;
13631+ popq %r14;
13632
13633+ pax_force_retaddr
13634 ret;
13635 ENDPROC(cast6_ctr_8way)
13636
13637@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13638 /* dst <= regs xor IVs(in dst) */
13639 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13640
13641+ pax_force_retaddr
13642 ret;
13643 ENDPROC(cast6_xts_enc_8way)
13644
13645@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13646 /* dst <= regs xor IVs(in dst) */
13647 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13648
13649+ pax_force_retaddr
13650 ret;
13651 ENDPROC(cast6_xts_dec_8way)
13652diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13653index dbc4339..de6e120 100644
13654--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13655+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13656@@ -45,6 +45,7 @@
13657
13658 #include <asm/inst.h>
13659 #include <linux/linkage.h>
13660+#include <asm/alternative-asm.h>
13661
13662 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13663
13664@@ -312,6 +313,7 @@ do_return:
13665 popq %rsi
13666 popq %rdi
13667 popq %rbx
13668+ pax_force_retaddr
13669 ret
13670
13671 ################################################################
13672diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13673index 586f41a..d02851e 100644
13674--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
13675+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13676@@ -18,6 +18,7 @@
13677
13678 #include <linux/linkage.h>
13679 #include <asm/inst.h>
13680+#include <asm/alternative-asm.h>
13681
13682 .data
13683
13684@@ -93,6 +94,7 @@ __clmul_gf128mul_ble:
13685 psrlq $1, T2
13686 pxor T2, T1
13687 pxor T1, DATA
13688+ pax_force_retaddr
13689 ret
13690 ENDPROC(__clmul_gf128mul_ble)
13691
13692@@ -105,6 +107,7 @@ ENTRY(clmul_ghash_mul)
13693 call __clmul_gf128mul_ble
13694 PSHUFB_XMM BSWAP DATA
13695 movups DATA, (%rdi)
13696+ pax_force_retaddr
13697 ret
13698 ENDPROC(clmul_ghash_mul)
13699
13700@@ -132,6 +135,7 @@ ENTRY(clmul_ghash_update)
13701 PSHUFB_XMM BSWAP DATA
13702 movups DATA, (%rdi)
13703 .Lupdate_just_ret:
13704+ pax_force_retaddr
13705 ret
13706 ENDPROC(clmul_ghash_update)
13707
13708@@ -157,5 +161,6 @@ ENTRY(clmul_ghash_setkey)
13709 pand .Lpoly, %xmm1
13710 pxor %xmm1, %xmm0
13711 movups %xmm0, (%rdi)
13712+ pax_force_retaddr
13713 ret
13714 ENDPROC(clmul_ghash_setkey)
13715diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13716index 9279e0b..c4b3d2c 100644
13717--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
13718+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13719@@ -1,4 +1,5 @@
13720 #include <linux/linkage.h>
13721+#include <asm/alternative-asm.h>
13722
13723 # enter salsa20_encrypt_bytes
13724 ENTRY(salsa20_encrypt_bytes)
13725@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
13726 add %r11,%rsp
13727 mov %rdi,%rax
13728 mov %rsi,%rdx
13729+ pax_force_retaddr
13730 ret
13731 # bytesatleast65:
13732 ._bytesatleast65:
13733@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
13734 add %r11,%rsp
13735 mov %rdi,%rax
13736 mov %rsi,%rdx
13737+ pax_force_retaddr
13738 ret
13739 ENDPROC(salsa20_keysetup)
13740
13741@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
13742 add %r11,%rsp
13743 mov %rdi,%rax
13744 mov %rsi,%rdx
13745+ pax_force_retaddr
13746 ret
13747 ENDPROC(salsa20_ivsetup)
13748diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13749index 2f202f4..d9164d6 100644
13750--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13751+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13752@@ -24,6 +24,7 @@
13753 */
13754
13755 #include <linux/linkage.h>
13756+#include <asm/alternative-asm.h>
13757 #include "glue_helper-asm-avx.S"
13758
13759 .file "serpent-avx-x86_64-asm_64.S"
13760@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
13761 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13762 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13763
13764+ pax_force_retaddr
13765 ret;
13766 ENDPROC(__serpent_enc_blk8_avx)
13767
13768@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
13769 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13770 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13771
13772+ pax_force_retaddr
13773 ret;
13774 ENDPROC(__serpent_dec_blk8_avx)
13775
13776@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
13777
13778 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13779
13780+ pax_force_retaddr
13781 ret;
13782 ENDPROC(serpent_ecb_enc_8way_avx)
13783
13784@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
13785
13786 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13787
13788+ pax_force_retaddr
13789 ret;
13790 ENDPROC(serpent_ecb_dec_8way_avx)
13791
13792@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
13793
13794 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13795
13796+ pax_force_retaddr
13797 ret;
13798 ENDPROC(serpent_cbc_dec_8way_avx)
13799
13800@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
13801
13802 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13803
13804+ pax_force_retaddr
13805 ret;
13806 ENDPROC(serpent_ctr_8way_avx)
13807
13808@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
13809 /* dst <= regs xor IVs(in dst) */
13810 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13811
13812+ pax_force_retaddr
13813 ret;
13814 ENDPROC(serpent_xts_enc_8way_avx)
13815
13816@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
13817 /* dst <= regs xor IVs(in dst) */
13818 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13819
13820+ pax_force_retaddr
13821 ret;
13822 ENDPROC(serpent_xts_dec_8way_avx)
13823diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
13824index b222085..abd483c 100644
13825--- a/arch/x86/crypto/serpent-avx2-asm_64.S
13826+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
13827@@ -15,6 +15,7 @@
13828 */
13829
13830 #include <linux/linkage.h>
13831+#include <asm/alternative-asm.h>
13832 #include "glue_helper-asm-avx2.S"
13833
13834 .file "serpent-avx2-asm_64.S"
13835@@ -610,6 +611,7 @@ __serpent_enc_blk16:
13836 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13837 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13838
13839+ pax_force_retaddr
13840 ret;
13841 ENDPROC(__serpent_enc_blk16)
13842
13843@@ -664,6 +666,7 @@ __serpent_dec_blk16:
13844 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13845 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13846
13847+ pax_force_retaddr
13848 ret;
13849 ENDPROC(__serpent_dec_blk16)
13850
13851@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
13852
13853 vzeroupper;
13854
13855+ pax_force_retaddr
13856 ret;
13857 ENDPROC(serpent_ecb_enc_16way)
13858
13859@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
13860
13861 vzeroupper;
13862
13863+ pax_force_retaddr
13864 ret;
13865 ENDPROC(serpent_ecb_dec_16way)
13866
13867@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
13868
13869 vzeroupper;
13870
13871+ pax_force_retaddr
13872 ret;
13873 ENDPROC(serpent_cbc_dec_16way)
13874
13875@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
13876
13877 vzeroupper;
13878
13879+ pax_force_retaddr
13880 ret;
13881 ENDPROC(serpent_ctr_16way)
13882
13883@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
13884
13885 vzeroupper;
13886
13887+ pax_force_retaddr
13888 ret;
13889 ENDPROC(serpent_xts_enc_16way)
13890
13891@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
13892
13893 vzeroupper;
13894
13895+ pax_force_retaddr
13896 ret;
13897 ENDPROC(serpent_xts_dec_16way)
13898diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13899index acc066c..1559cc4 100644
13900--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13901+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13902@@ -25,6 +25,7 @@
13903 */
13904
13905 #include <linux/linkage.h>
13906+#include <asm/alternative-asm.h>
13907
13908 .file "serpent-sse2-x86_64-asm_64.S"
13909 .text
13910@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
13911 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13912 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13913
13914+ pax_force_retaddr
13915 ret;
13916
13917 .L__enc_xor8:
13918 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13919 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13920
13921+ pax_force_retaddr
13922 ret;
13923 ENDPROC(__serpent_enc_blk_8way)
13924
13925@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
13926 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13927 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13928
13929+ pax_force_retaddr
13930 ret;
13931 ENDPROC(serpent_dec_blk_8way)
13932diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
13933index a410950..9dfe7ad 100644
13934--- a/arch/x86/crypto/sha1_ssse3_asm.S
13935+++ b/arch/x86/crypto/sha1_ssse3_asm.S
13936@@ -29,6 +29,7 @@
13937 */
13938
13939 #include <linux/linkage.h>
13940+#include <asm/alternative-asm.h>
13941
13942 #define CTX %rdi // arg1
13943 #define BUF %rsi // arg2
13944@@ -75,9 +76,9 @@
13945
13946 push %rbx
13947 push %rbp
13948- push %r12
13949+ push %r14
13950
13951- mov %rsp, %r12
13952+ mov %rsp, %r14
13953 sub $64, %rsp # allocate workspace
13954 and $~15, %rsp # align stack
13955
13956@@ -99,11 +100,12 @@
13957 xor %rax, %rax
13958 rep stosq
13959
13960- mov %r12, %rsp # deallocate workspace
13961+ mov %r14, %rsp # deallocate workspace
13962
13963- pop %r12
13964+ pop %r14
13965 pop %rbp
13966 pop %rbx
13967+ pax_force_retaddr
13968 ret
13969
13970 ENDPROC(\name)
13971diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
13972index 642f156..51a513c 100644
13973--- a/arch/x86/crypto/sha256-avx-asm.S
13974+++ b/arch/x86/crypto/sha256-avx-asm.S
13975@@ -49,6 +49,7 @@
13976
13977 #ifdef CONFIG_AS_AVX
13978 #include <linux/linkage.h>
13979+#include <asm/alternative-asm.h>
13980
13981 ## assume buffers not aligned
13982 #define VMOVDQ vmovdqu
13983@@ -460,6 +461,7 @@ done_hash:
13984 popq %r13
13985 popq %rbp
13986 popq %rbx
13987+ pax_force_retaddr
13988 ret
13989 ENDPROC(sha256_transform_avx)
13990
13991diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
13992index 9e86944..3795e6a 100644
13993--- a/arch/x86/crypto/sha256-avx2-asm.S
13994+++ b/arch/x86/crypto/sha256-avx2-asm.S
13995@@ -50,6 +50,7 @@
13996
13997 #ifdef CONFIG_AS_AVX2
13998 #include <linux/linkage.h>
13999+#include <asm/alternative-asm.h>
14000
14001 ## assume buffers not aligned
14002 #define VMOVDQ vmovdqu
14003@@ -720,6 +721,7 @@ done_hash:
14004 popq %r12
14005 popq %rbp
14006 popq %rbx
14007+ pax_force_retaddr
14008 ret
14009 ENDPROC(sha256_transform_rorx)
14010
14011diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14012index f833b74..8c62a9e 100644
14013--- a/arch/x86/crypto/sha256-ssse3-asm.S
14014+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14015@@ -47,6 +47,7 @@
14016 ########################################################################
14017
14018 #include <linux/linkage.h>
14019+#include <asm/alternative-asm.h>
14020
14021 ## assume buffers not aligned
14022 #define MOVDQ movdqu
14023@@ -471,6 +472,7 @@ done_hash:
14024 popq %rbp
14025 popq %rbx
14026
14027+ pax_force_retaddr
14028 ret
14029 ENDPROC(sha256_transform_ssse3)
14030
14031diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14032index 974dde9..a823ff9 100644
14033--- a/arch/x86/crypto/sha512-avx-asm.S
14034+++ b/arch/x86/crypto/sha512-avx-asm.S
14035@@ -49,6 +49,7 @@
14036
14037 #ifdef CONFIG_AS_AVX
14038 #include <linux/linkage.h>
14039+#include <asm/alternative-asm.h>
14040
14041 .text
14042
14043@@ -364,6 +365,7 @@ updateblock:
14044 mov frame_RSPSAVE(%rsp), %rsp
14045
14046 nowork:
14047+ pax_force_retaddr
14048 ret
14049 ENDPROC(sha512_transform_avx)
14050
14051diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14052index 568b961..ed20c37 100644
14053--- a/arch/x86/crypto/sha512-avx2-asm.S
14054+++ b/arch/x86/crypto/sha512-avx2-asm.S
14055@@ -51,6 +51,7 @@
14056
14057 #ifdef CONFIG_AS_AVX2
14058 #include <linux/linkage.h>
14059+#include <asm/alternative-asm.h>
14060
14061 .text
14062
14063@@ -678,6 +679,7 @@ done_hash:
14064
14065 # Restore Stack Pointer
14066 mov frame_RSPSAVE(%rsp), %rsp
14067+ pax_force_retaddr
14068 ret
14069 ENDPROC(sha512_transform_rorx)
14070
14071diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14072index fb56855..6edd768 100644
14073--- a/arch/x86/crypto/sha512-ssse3-asm.S
14074+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14075@@ -48,6 +48,7 @@
14076 ########################################################################
14077
14078 #include <linux/linkage.h>
14079+#include <asm/alternative-asm.h>
14080
14081 .text
14082
14083@@ -363,6 +364,7 @@ updateblock:
14084 mov frame_RSPSAVE(%rsp), %rsp
14085
14086 nowork:
14087+ pax_force_retaddr
14088 ret
14089 ENDPROC(sha512_transform_ssse3)
14090
14091diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14092index 0505813..b067311 100644
14093--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14094+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14095@@ -24,6 +24,7 @@
14096 */
14097
14098 #include <linux/linkage.h>
14099+#include <asm/alternative-asm.h>
14100 #include "glue_helper-asm-avx.S"
14101
14102 .file "twofish-avx-x86_64-asm_64.S"
14103@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14104 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14105 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14106
14107+ pax_force_retaddr
14108 ret;
14109 ENDPROC(__twofish_enc_blk8)
14110
14111@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14112 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14113 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14114
14115+ pax_force_retaddr
14116 ret;
14117 ENDPROC(__twofish_dec_blk8)
14118
14119@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14120
14121 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14122
14123+ pax_force_retaddr
14124 ret;
14125 ENDPROC(twofish_ecb_enc_8way)
14126
14127@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14128
14129 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14130
14131+ pax_force_retaddr
14132 ret;
14133 ENDPROC(twofish_ecb_dec_8way)
14134
14135@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14136 * %rdx: src
14137 */
14138
14139- pushq %r12;
14140+ pushq %r14;
14141
14142 movq %rsi, %r11;
14143- movq %rdx, %r12;
14144+ movq %rdx, %r14;
14145
14146 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14147
14148 call __twofish_dec_blk8;
14149
14150- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14151+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14152
14153- popq %r12;
14154+ popq %r14;
14155
14156+ pax_force_retaddr
14157 ret;
14158 ENDPROC(twofish_cbc_dec_8way)
14159
14160@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14161 * %rcx: iv (little endian, 128bit)
14162 */
14163
14164- pushq %r12;
14165+ pushq %r14;
14166
14167 movq %rsi, %r11;
14168- movq %rdx, %r12;
14169+ movq %rdx, %r14;
14170
14171 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14172 RD2, RX0, RX1, RY0);
14173
14174 call __twofish_enc_blk8;
14175
14176- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14177+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14178
14179- popq %r12;
14180+ popq %r14;
14181
14182+ pax_force_retaddr
14183 ret;
14184 ENDPROC(twofish_ctr_8way)
14185
14186@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14187 /* dst <= regs xor IVs(in dst) */
14188 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14189
14190+ pax_force_retaddr
14191 ret;
14192 ENDPROC(twofish_xts_enc_8way)
14193
14194@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14195 /* dst <= regs xor IVs(in dst) */
14196 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14197
14198+ pax_force_retaddr
14199 ret;
14200 ENDPROC(twofish_xts_dec_8way)
14201diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14202index 1c3b7ce..02f578d 100644
14203--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14204+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14205@@ -21,6 +21,7 @@
14206 */
14207
14208 #include <linux/linkage.h>
14209+#include <asm/alternative-asm.h>
14210
14211 .file "twofish-x86_64-asm-3way.S"
14212 .text
14213@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14214 popq %r13;
14215 popq %r14;
14216 popq %r15;
14217+ pax_force_retaddr
14218 ret;
14219
14220 .L__enc_xor3:
14221@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14222 popq %r13;
14223 popq %r14;
14224 popq %r15;
14225+ pax_force_retaddr
14226 ret;
14227 ENDPROC(__twofish_enc_blk_3way)
14228
14229@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14230 popq %r13;
14231 popq %r14;
14232 popq %r15;
14233+ pax_force_retaddr
14234 ret;
14235 ENDPROC(twofish_dec_blk_3way)
14236diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14237index a039d21..524b8b2 100644
14238--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14239+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14240@@ -22,6 +22,7 @@
14241
14242 #include <linux/linkage.h>
14243 #include <asm/asm-offsets.h>
14244+#include <asm/alternative-asm.h>
14245
14246 #define a_offset 0
14247 #define b_offset 4
14248@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14249
14250 popq R1
14251 movq $1,%rax
14252+ pax_force_retaddr
14253 ret
14254 ENDPROC(twofish_enc_blk)
14255
14256@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14257
14258 popq R1
14259 movq $1,%rax
14260+ pax_force_retaddr
14261 ret
14262 ENDPROC(twofish_dec_blk)
14263diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14264index d21ff89..6da8e6e 100644
14265--- a/arch/x86/ia32/ia32_aout.c
14266+++ b/arch/x86/ia32/ia32_aout.c
14267@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14268 unsigned long dump_start, dump_size;
14269 struct user32 dump;
14270
14271+ memset(&dump, 0, sizeof(dump));
14272+
14273 fs = get_fs();
14274 set_fs(KERNEL_DS);
14275 has_dumped = 1;
14276diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14277index 2206757..85cbcfa 100644
14278--- a/arch/x86/ia32/ia32_signal.c
14279+++ b/arch/x86/ia32/ia32_signal.c
14280@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14281 if (__get_user(set.sig[0], &frame->sc.oldmask)
14282 || (_COMPAT_NSIG_WORDS > 1
14283 && __copy_from_user((((char *) &set.sig) + 4),
14284- &frame->extramask,
14285+ frame->extramask,
14286 sizeof(frame->extramask))))
14287 goto badframe;
14288
14289@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14290 sp -= frame_size;
14291 /* Align the stack pointer according to the i386 ABI,
14292 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14293- sp = ((sp + 4) & -16ul) - 4;
14294+ sp = ((sp - 12) & -16ul) - 4;
14295 return (void __user *) sp;
14296 }
14297
14298@@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14299 restorer = VDSO32_SYMBOL(current->mm->context.vdso,
14300 sigreturn);
14301 else
14302- restorer = &frame->retcode;
14303+ restorer = frame->retcode;
14304 }
14305
14306 put_user_try {
14307@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14308 * These are actually not used anymore, but left because some
14309 * gdb versions depend on them as a marker.
14310 */
14311- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14312+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14313 } put_user_catch(err);
14314
14315 if (err)
14316@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14317 0xb8,
14318 __NR_ia32_rt_sigreturn,
14319 0x80cd,
14320- 0,
14321+ 0
14322 };
14323
14324 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14325@@ -461,16 +461,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14326
14327 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14328 restorer = ksig->ka.sa.sa_restorer;
14329+ else if (current->mm->context.vdso)
14330+ /* Return stub is in 32bit vsyscall page */
14331+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14332 else
14333- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
14334- rt_sigreturn);
14335+ restorer = frame->retcode;
14336 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14337
14338 /*
14339 * Not actually used anymore, but left because some gdb
14340 * versions need it.
14341 */
14342- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14343+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14344 } put_user_catch(err);
14345
14346 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14347diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14348index 4299eb0..c0687a7 100644
14349--- a/arch/x86/ia32/ia32entry.S
14350+++ b/arch/x86/ia32/ia32entry.S
14351@@ -15,8 +15,10 @@
14352 #include <asm/irqflags.h>
14353 #include <asm/asm.h>
14354 #include <asm/smap.h>
14355+#include <asm/pgtable.h>
14356 #include <linux/linkage.h>
14357 #include <linux/err.h>
14358+#include <asm/alternative-asm.h>
14359
14360 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14361 #include <linux/elf-em.h>
14362@@ -62,12 +64,12 @@
14363 */
14364 .macro LOAD_ARGS32 offset, _r9=0
14365 .if \_r9
14366- movl \offset+16(%rsp),%r9d
14367+ movl \offset+R9(%rsp),%r9d
14368 .endif
14369- movl \offset+40(%rsp),%ecx
14370- movl \offset+48(%rsp),%edx
14371- movl \offset+56(%rsp),%esi
14372- movl \offset+64(%rsp),%edi
14373+ movl \offset+RCX(%rsp),%ecx
14374+ movl \offset+RDX(%rsp),%edx
14375+ movl \offset+RSI(%rsp),%esi
14376+ movl \offset+RDI(%rsp),%edi
14377 movl %eax,%eax /* zero extension */
14378 .endm
14379
14380@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14381 ENDPROC(native_irq_enable_sysexit)
14382 #endif
14383
14384+ .macro pax_enter_kernel_user
14385+ pax_set_fptr_mask
14386+#ifdef CONFIG_PAX_MEMORY_UDEREF
14387+ call pax_enter_kernel_user
14388+#endif
14389+ .endm
14390+
14391+ .macro pax_exit_kernel_user
14392+#ifdef CONFIG_PAX_MEMORY_UDEREF
14393+ call pax_exit_kernel_user
14394+#endif
14395+#ifdef CONFIG_PAX_RANDKSTACK
14396+ pushq %rax
14397+ pushq %r11
14398+ call pax_randomize_kstack
14399+ popq %r11
14400+ popq %rax
14401+#endif
14402+ .endm
14403+
14404+ .macro pax_erase_kstack
14405+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14406+ call pax_erase_kstack
14407+#endif
14408+ .endm
14409+
14410 /*
14411 * 32bit SYSENTER instruction entry.
14412 *
14413@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14414 CFI_REGISTER rsp,rbp
14415 SWAPGS_UNSAFE_STACK
14416 movq PER_CPU_VAR(kernel_stack), %rsp
14417- addq $(KERNEL_STACK_OFFSET),%rsp
14418- /*
14419- * No need to follow this irqs on/off section: the syscall
14420- * disabled irqs, here we enable it straight after entry:
14421- */
14422- ENABLE_INTERRUPTS(CLBR_NONE)
14423 movl %ebp,%ebp /* zero extension */
14424 pushq_cfi $__USER32_DS
14425 /*CFI_REL_OFFSET ss,0*/
14426@@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
14427 CFI_REL_OFFSET rsp,0
14428 pushfq_cfi
14429 /*CFI_REL_OFFSET rflags,0*/
14430- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14431- CFI_REGISTER rip,r10
14432+ orl $X86_EFLAGS_IF,(%rsp)
14433+ GET_THREAD_INFO(%r11)
14434+ movl TI_sysenter_return(%r11), %r11d
14435+ CFI_REGISTER rip,r11
14436 pushq_cfi $__USER32_CS
14437 /*CFI_REL_OFFSET cs,0*/
14438 movl %eax, %eax
14439- pushq_cfi %r10
14440+ pushq_cfi %r11
14441 CFI_REL_OFFSET rip,0
14442 pushq_cfi %rax
14443 cld
14444 SAVE_ARGS 0,1,0
14445+ pax_enter_kernel_user
14446+
14447+#ifdef CONFIG_PAX_RANDKSTACK
14448+ pax_erase_kstack
14449+#endif
14450+
14451+ /*
14452+ * No need to follow this irqs on/off section: the syscall
14453+ * disabled irqs, here we enable it straight after entry:
14454+ */
14455+ ENABLE_INTERRUPTS(CLBR_NONE)
14456 /* no need to do an access_ok check here because rbp has been
14457 32bit zero extended */
14458+
14459+#ifdef CONFIG_PAX_MEMORY_UDEREF
14460+ addq pax_user_shadow_base,%rbp
14461+ ASM_PAX_OPEN_USERLAND
14462+#endif
14463+
14464 ASM_STAC
14465 1: movl (%rbp),%ebp
14466 _ASM_EXTABLE(1b,ia32_badarg)
14467 ASM_CLAC
14468- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14469- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14470+
14471+#ifdef CONFIG_PAX_MEMORY_UDEREF
14472+ ASM_PAX_CLOSE_USERLAND
14473+#endif
14474+
14475+ GET_THREAD_INFO(%r11)
14476+ orl $TS_COMPAT,TI_status(%r11)
14477+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14478 CFI_REMEMBER_STATE
14479 jnz sysenter_tracesys
14480 cmpq $(IA32_NR_syscalls-1),%rax
14481@@ -162,15 +209,18 @@ sysenter_do_call:
14482 sysenter_dispatch:
14483 call *ia32_sys_call_table(,%rax,8)
14484 movq %rax,RAX-ARGOFFSET(%rsp)
14485+ GET_THREAD_INFO(%r11)
14486 DISABLE_INTERRUPTS(CLBR_NONE)
14487 TRACE_IRQS_OFF
14488- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14489+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14490 jnz sysexit_audit
14491 sysexit_from_sys_call:
14492- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14493+ pax_exit_kernel_user
14494+ pax_erase_kstack
14495+ andl $~TS_COMPAT,TI_status(%r11)
14496 /* clear IF, that popfq doesn't enable interrupts early */
14497- andl $~0x200,EFLAGS-R11(%rsp)
14498- movl RIP-R11(%rsp),%edx /* User %eip */
14499+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
14500+ movl RIP(%rsp),%edx /* User %eip */
14501 CFI_REGISTER rip,rdx
14502 RESTORE_ARGS 0,24,0,0,0,0
14503 xorq %r8,%r8
14504@@ -193,6 +243,9 @@ sysexit_from_sys_call:
14505 movl %eax,%esi /* 2nd arg: syscall number */
14506 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
14507 call __audit_syscall_entry
14508+
14509+ pax_erase_kstack
14510+
14511 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14512 cmpq $(IA32_NR_syscalls-1),%rax
14513 ja ia32_badsys
14514@@ -204,7 +257,7 @@ sysexit_from_sys_call:
14515 .endm
14516
14517 .macro auditsys_exit exit
14518- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14519+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14520 jnz ia32_ret_from_sys_call
14521 TRACE_IRQS_ON
14522 ENABLE_INTERRUPTS(CLBR_NONE)
14523@@ -215,11 +268,12 @@ sysexit_from_sys_call:
14524 1: setbe %al /* 1 if error, 0 if not */
14525 movzbl %al,%edi /* zero-extend that into %edi */
14526 call __audit_syscall_exit
14527+ GET_THREAD_INFO(%r11)
14528 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14529 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14530 DISABLE_INTERRUPTS(CLBR_NONE)
14531 TRACE_IRQS_OFF
14532- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14533+ testl %edi,TI_flags(%r11)
14534 jz \exit
14535 CLEAR_RREGS -ARGOFFSET
14536 jmp int_with_check
14537@@ -237,7 +291,7 @@ sysexit_audit:
14538
14539 sysenter_tracesys:
14540 #ifdef CONFIG_AUDITSYSCALL
14541- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14542+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14543 jz sysenter_auditsys
14544 #endif
14545 SAVE_REST
14546@@ -249,6 +303,9 @@ sysenter_tracesys:
14547 RESTORE_REST
14548 cmpq $(IA32_NR_syscalls-1),%rax
14549 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14550+
14551+ pax_erase_kstack
14552+
14553 jmp sysenter_do_call
14554 CFI_ENDPROC
14555 ENDPROC(ia32_sysenter_target)
14556@@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
14557 ENTRY(ia32_cstar_target)
14558 CFI_STARTPROC32 simple
14559 CFI_SIGNAL_FRAME
14560- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14561+ CFI_DEF_CFA rsp,0
14562 CFI_REGISTER rip,rcx
14563 /*CFI_REGISTER rflags,r11*/
14564 SWAPGS_UNSAFE_STACK
14565 movl %esp,%r8d
14566 CFI_REGISTER rsp,r8
14567 movq PER_CPU_VAR(kernel_stack),%rsp
14568+ SAVE_ARGS 8*6,0,0
14569+ pax_enter_kernel_user
14570+
14571+#ifdef CONFIG_PAX_RANDKSTACK
14572+ pax_erase_kstack
14573+#endif
14574+
14575 /*
14576 * No need to follow this irqs on/off section: the syscall
14577 * disabled irqs and here we enable it straight after entry:
14578 */
14579 ENABLE_INTERRUPTS(CLBR_NONE)
14580- SAVE_ARGS 8,0,0
14581 movl %eax,%eax /* zero extension */
14582 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14583 movq %rcx,RIP-ARGOFFSET(%rsp)
14584@@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
14585 /* no need to do an access_ok check here because r8 has been
14586 32bit zero extended */
14587 /* hardware stack frame is complete now */
14588+
14589+#ifdef CONFIG_PAX_MEMORY_UDEREF
14590+ ASM_PAX_OPEN_USERLAND
14591+ movq pax_user_shadow_base,%r8
14592+ addq RSP-ARGOFFSET(%rsp),%r8
14593+#endif
14594+
14595 ASM_STAC
14596 1: movl (%r8),%r9d
14597 _ASM_EXTABLE(1b,ia32_badarg)
14598 ASM_CLAC
14599- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14600- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14601+
14602+#ifdef CONFIG_PAX_MEMORY_UDEREF
14603+ ASM_PAX_CLOSE_USERLAND
14604+#endif
14605+
14606+ GET_THREAD_INFO(%r11)
14607+ orl $TS_COMPAT,TI_status(%r11)
14608+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14609 CFI_REMEMBER_STATE
14610 jnz cstar_tracesys
14611 cmpq $IA32_NR_syscalls-1,%rax
14612@@ -319,13 +395,16 @@ cstar_do_call:
14613 cstar_dispatch:
14614 call *ia32_sys_call_table(,%rax,8)
14615 movq %rax,RAX-ARGOFFSET(%rsp)
14616+ GET_THREAD_INFO(%r11)
14617 DISABLE_INTERRUPTS(CLBR_NONE)
14618 TRACE_IRQS_OFF
14619- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14620+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14621 jnz sysretl_audit
14622 sysretl_from_sys_call:
14623- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14624- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14625+ pax_exit_kernel_user
14626+ pax_erase_kstack
14627+ andl $~TS_COMPAT,TI_status(%r11)
14628+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14629 movl RIP-ARGOFFSET(%rsp),%ecx
14630 CFI_REGISTER rip,rcx
14631 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14632@@ -352,7 +431,7 @@ sysretl_audit:
14633
14634 cstar_tracesys:
14635 #ifdef CONFIG_AUDITSYSCALL
14636- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14637+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14638 jz cstar_auditsys
14639 #endif
14640 xchgl %r9d,%ebp
14641@@ -366,11 +445,19 @@ cstar_tracesys:
14642 xchgl %ebp,%r9d
14643 cmpq $(IA32_NR_syscalls-1),%rax
14644 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14645+
14646+ pax_erase_kstack
14647+
14648 jmp cstar_do_call
14649 END(ia32_cstar_target)
14650
14651 ia32_badarg:
14652 ASM_CLAC
14653+
14654+#ifdef CONFIG_PAX_MEMORY_UDEREF
14655+ ASM_PAX_CLOSE_USERLAND
14656+#endif
14657+
14658 movq $-EFAULT,%rax
14659 jmp ia32_sysret
14660 CFI_ENDPROC
14661@@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
14662 CFI_REL_OFFSET rip,RIP-RIP
14663 PARAVIRT_ADJUST_EXCEPTION_FRAME
14664 SWAPGS
14665- /*
14666- * No need to follow this irqs on/off section: the syscall
14667- * disabled irqs and here we enable it straight after entry:
14668- */
14669- ENABLE_INTERRUPTS(CLBR_NONE)
14670 movl %eax,%eax
14671 pushq_cfi %rax
14672 cld
14673 /* note the registers are not zero extended to the sf.
14674 this could be a problem. */
14675 SAVE_ARGS 0,1,0
14676- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14677- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14678+ pax_enter_kernel_user
14679+
14680+#ifdef CONFIG_PAX_RANDKSTACK
14681+ pax_erase_kstack
14682+#endif
14683+
14684+ /*
14685+ * No need to follow this irqs on/off section: the syscall
14686+ * disabled irqs and here we enable it straight after entry:
14687+ */
14688+ ENABLE_INTERRUPTS(CLBR_NONE)
14689+ GET_THREAD_INFO(%r11)
14690+ orl $TS_COMPAT,TI_status(%r11)
14691+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14692 jnz ia32_tracesys
14693 cmpq $(IA32_NR_syscalls-1),%rax
14694 ja ia32_badsys
14695@@ -442,6 +536,9 @@ ia32_tracesys:
14696 RESTORE_REST
14697 cmpq $(IA32_NR_syscalls-1),%rax
14698 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
14699+
14700+ pax_erase_kstack
14701+
14702 jmp ia32_do_call
14703 END(ia32_syscall)
14704
14705diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
14706index 8e0ceec..af13504 100644
14707--- a/arch/x86/ia32/sys_ia32.c
14708+++ b/arch/x86/ia32/sys_ia32.c
14709@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
14710 */
14711 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
14712 {
14713- typeof(ubuf->st_uid) uid = 0;
14714- typeof(ubuf->st_gid) gid = 0;
14715+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
14716+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
14717 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
14718 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
14719 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
14720diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
14721index 372231c..51b537d 100644
14722--- a/arch/x86/include/asm/alternative-asm.h
14723+++ b/arch/x86/include/asm/alternative-asm.h
14724@@ -18,6 +18,45 @@
14725 .endm
14726 #endif
14727
14728+#ifdef KERNEXEC_PLUGIN
14729+ .macro pax_force_retaddr_bts rip=0
14730+ btsq $63,\rip(%rsp)
14731+ .endm
14732+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14733+ .macro pax_force_retaddr rip=0, reload=0
14734+ btsq $63,\rip(%rsp)
14735+ .endm
14736+ .macro pax_force_fptr ptr
14737+ btsq $63,\ptr
14738+ .endm
14739+ .macro pax_set_fptr_mask
14740+ .endm
14741+#endif
14742+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
14743+ .macro pax_force_retaddr rip=0, reload=0
14744+ .if \reload
14745+ pax_set_fptr_mask
14746+ .endif
14747+ orq %r12,\rip(%rsp)
14748+ .endm
14749+ .macro pax_force_fptr ptr
14750+ orq %r12,\ptr
14751+ .endm
14752+ .macro pax_set_fptr_mask
14753+ movabs $0x8000000000000000,%r12
14754+ .endm
14755+#endif
14756+#else
14757+ .macro pax_force_retaddr rip=0, reload=0
14758+ .endm
14759+ .macro pax_force_fptr ptr
14760+ .endm
14761+ .macro pax_force_retaddr_bts rip=0
14762+ .endm
14763+ .macro pax_set_fptr_mask
14764+ .endm
14765+#endif
14766+
14767 .macro altinstruction_entry orig alt feature orig_len alt_len
14768 .long \orig - .
14769 .long \alt - .
14770diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
14771index 0a3f9c9..c9d081d 100644
14772--- a/arch/x86/include/asm/alternative.h
14773+++ b/arch/x86/include/asm/alternative.h
14774@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
14775 ".pushsection .discard,\"aw\",@progbits\n" \
14776 DISCARD_ENTRY(1) \
14777 ".popsection\n" \
14778- ".pushsection .altinstr_replacement, \"ax\"\n" \
14779+ ".pushsection .altinstr_replacement, \"a\"\n" \
14780 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
14781 ".popsection"
14782
14783@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
14784 DISCARD_ENTRY(1) \
14785 DISCARD_ENTRY(2) \
14786 ".popsection\n" \
14787- ".pushsection .altinstr_replacement, \"ax\"\n" \
14788+ ".pushsection .altinstr_replacement, \"a\"\n" \
14789 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
14790 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
14791 ".popsection"
14792diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
14793index 1d2091a..f5074c1 100644
14794--- a/arch/x86/include/asm/apic.h
14795+++ b/arch/x86/include/asm/apic.h
14796@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
14797
14798 #ifdef CONFIG_X86_LOCAL_APIC
14799
14800-extern unsigned int apic_verbosity;
14801+extern int apic_verbosity;
14802 extern int local_apic_timer_c2_ok;
14803
14804 extern int disable_apic;
14805diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
14806index 20370c6..a2eb9b0 100644
14807--- a/arch/x86/include/asm/apm.h
14808+++ b/arch/x86/include/asm/apm.h
14809@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
14810 __asm__ __volatile__(APM_DO_ZERO_SEGS
14811 "pushl %%edi\n\t"
14812 "pushl %%ebp\n\t"
14813- "lcall *%%cs:apm_bios_entry\n\t"
14814+ "lcall *%%ss:apm_bios_entry\n\t"
14815 "setc %%al\n\t"
14816 "popl %%ebp\n\t"
14817 "popl %%edi\n\t"
14818@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
14819 __asm__ __volatile__(APM_DO_ZERO_SEGS
14820 "pushl %%edi\n\t"
14821 "pushl %%ebp\n\t"
14822- "lcall *%%cs:apm_bios_entry\n\t"
14823+ "lcall *%%ss:apm_bios_entry\n\t"
14824 "setc %%bl\n\t"
14825 "popl %%ebp\n\t"
14826 "popl %%edi\n\t"
14827diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
14828index b17f4f4..9620151 100644
14829--- a/arch/x86/include/asm/atomic.h
14830+++ b/arch/x86/include/asm/atomic.h
14831@@ -23,7 +23,18 @@
14832 */
14833 static inline int atomic_read(const atomic_t *v)
14834 {
14835- return (*(volatile int *)&(v)->counter);
14836+ return (*(volatile const int *)&(v)->counter);
14837+}
14838+
14839+/**
14840+ * atomic_read_unchecked - read atomic variable
14841+ * @v: pointer of type atomic_unchecked_t
14842+ *
14843+ * Atomically reads the value of @v.
14844+ */
14845+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
14846+{
14847+ return (*(volatile const int *)&(v)->counter);
14848 }
14849
14850 /**
14851@@ -39,6 +50,18 @@ static inline void atomic_set(atomic_t *v, int i)
14852 }
14853
14854 /**
14855+ * atomic_set_unchecked - set atomic variable
14856+ * @v: pointer of type atomic_unchecked_t
14857+ * @i: required value
14858+ *
14859+ * Atomically sets the value of @v to @i.
14860+ */
14861+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
14862+{
14863+ v->counter = i;
14864+}
14865+
14866+/**
14867 * atomic_add - add integer to atomic variable
14868 * @i: integer value to add
14869 * @v: pointer of type atomic_t
14870@@ -47,7 +70,29 @@ static inline void atomic_set(atomic_t *v, int i)
14871 */
14872 static inline void atomic_add(int i, atomic_t *v)
14873 {
14874- asm volatile(LOCK_PREFIX "addl %1,%0"
14875+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
14876+
14877+#ifdef CONFIG_PAX_REFCOUNT
14878+ "jno 0f\n"
14879+ LOCK_PREFIX "subl %1,%0\n"
14880+ "int $4\n0:\n"
14881+ _ASM_EXTABLE(0b, 0b)
14882+#endif
14883+
14884+ : "+m" (v->counter)
14885+ : "ir" (i));
14886+}
14887+
14888+/**
14889+ * atomic_add_unchecked - add integer to atomic variable
14890+ * @i: integer value to add
14891+ * @v: pointer of type atomic_unchecked_t
14892+ *
14893+ * Atomically adds @i to @v.
14894+ */
14895+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
14896+{
14897+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
14898 : "+m" (v->counter)
14899 : "ir" (i));
14900 }
14901@@ -61,7 +106,29 @@ static inline void atomic_add(int i, atomic_t *v)
14902 */
14903 static inline void atomic_sub(int i, atomic_t *v)
14904 {
14905- asm volatile(LOCK_PREFIX "subl %1,%0"
14906+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
14907+
14908+#ifdef CONFIG_PAX_REFCOUNT
14909+ "jno 0f\n"
14910+ LOCK_PREFIX "addl %1,%0\n"
14911+ "int $4\n0:\n"
14912+ _ASM_EXTABLE(0b, 0b)
14913+#endif
14914+
14915+ : "+m" (v->counter)
14916+ : "ir" (i));
14917+}
14918+
14919+/**
14920+ * atomic_sub_unchecked - subtract integer from atomic variable
14921+ * @i: integer value to subtract
14922+ * @v: pointer of type atomic_unchecked_t
14923+ *
14924+ * Atomically subtracts @i from @v.
14925+ */
14926+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
14927+{
14928+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
14929 : "+m" (v->counter)
14930 : "ir" (i));
14931 }
14932@@ -77,7 +144,7 @@ static inline void atomic_sub(int i, atomic_t *v)
14933 */
14934 static inline int atomic_sub_and_test(int i, atomic_t *v)
14935 {
14936- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
14937+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
14938 }
14939
14940 /**
14941@@ -88,7 +155,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
14942 */
14943 static inline void atomic_inc(atomic_t *v)
14944 {
14945- asm volatile(LOCK_PREFIX "incl %0"
14946+ asm volatile(LOCK_PREFIX "incl %0\n"
14947+
14948+#ifdef CONFIG_PAX_REFCOUNT
14949+ "jno 0f\n"
14950+ LOCK_PREFIX "decl %0\n"
14951+ "int $4\n0:\n"
14952+ _ASM_EXTABLE(0b, 0b)
14953+#endif
14954+
14955+ : "+m" (v->counter));
14956+}
14957+
14958+/**
14959+ * atomic_inc_unchecked - increment atomic variable
14960+ * @v: pointer of type atomic_unchecked_t
14961+ *
14962+ * Atomically increments @v by 1.
14963+ */
14964+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
14965+{
14966+ asm volatile(LOCK_PREFIX "incl %0\n"
14967 : "+m" (v->counter));
14968 }
14969
14970@@ -100,7 +187,27 @@ static inline void atomic_inc(atomic_t *v)
14971 */
14972 static inline void atomic_dec(atomic_t *v)
14973 {
14974- asm volatile(LOCK_PREFIX "decl %0"
14975+ asm volatile(LOCK_PREFIX "decl %0\n"
14976+
14977+#ifdef CONFIG_PAX_REFCOUNT
14978+ "jno 0f\n"
14979+ LOCK_PREFIX "incl %0\n"
14980+ "int $4\n0:\n"
14981+ _ASM_EXTABLE(0b, 0b)
14982+#endif
14983+
14984+ : "+m" (v->counter));
14985+}
14986+
14987+/**
14988+ * atomic_dec_unchecked - decrement atomic variable
14989+ * @v: pointer of type atomic_unchecked_t
14990+ *
14991+ * Atomically decrements @v by 1.
14992+ */
14993+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
14994+{
14995+ asm volatile(LOCK_PREFIX "decl %0\n"
14996 : "+m" (v->counter));
14997 }
14998
14999@@ -114,7 +221,7 @@ static inline void atomic_dec(atomic_t *v)
15000 */
15001 static inline int atomic_dec_and_test(atomic_t *v)
15002 {
15003- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15004+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15005 }
15006
15007 /**
15008@@ -127,7 +234,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15009 */
15010 static inline int atomic_inc_and_test(atomic_t *v)
15011 {
15012- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15013+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15014+}
15015+
15016+/**
15017+ * atomic_inc_and_test_unchecked - increment and test
15018+ * @v: pointer of type atomic_unchecked_t
15019+ *
15020+ * Atomically increments @v by 1
15021+ * and returns true if the result is zero, or false for all
15022+ * other cases.
15023+ */
15024+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15025+{
15026+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15027 }
15028
15029 /**
15030@@ -141,7 +261,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15031 */
15032 static inline int atomic_add_negative(int i, atomic_t *v)
15033 {
15034- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15035+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15036 }
15037
15038 /**
15039@@ -153,6 +273,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15040 */
15041 static inline int atomic_add_return(int i, atomic_t *v)
15042 {
15043+ return i + xadd_check_overflow(&v->counter, i);
15044+}
15045+
15046+/**
15047+ * atomic_add_return_unchecked - add integer and return
15048+ * @i: integer value to add
15049+ * @v: pointer of type atomic_unchecked_t
15050+ *
15051+ * Atomically adds @i to @v and returns @i + @v
15052+ */
15053+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15054+{
15055 return i + xadd(&v->counter, i);
15056 }
15057
15058@@ -169,9 +301,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
15059 }
15060
15061 #define atomic_inc_return(v) (atomic_add_return(1, v))
15062+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15063+{
15064+ return atomic_add_return_unchecked(1, v);
15065+}
15066 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15067
15068-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15069+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15070+{
15071+ return cmpxchg(&v->counter, old, new);
15072+}
15073+
15074+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15075 {
15076 return cmpxchg(&v->counter, old, new);
15077 }
15078@@ -181,6 +322,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15079 return xchg(&v->counter, new);
15080 }
15081
15082+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15083+{
15084+ return xchg(&v->counter, new);
15085+}
15086+
15087 /**
15088 * __atomic_add_unless - add unless the number is already a given value
15089 * @v: pointer of type atomic_t
15090@@ -190,14 +336,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
15091 * Atomically adds @a to @v, so long as @v was not already @u.
15092 * Returns the old value of @v.
15093 */
15094-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15095+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
15096 {
15097- int c, old;
15098+ int c, old, new;
15099 c = atomic_read(v);
15100 for (;;) {
15101- if (unlikely(c == (u)))
15102+ if (unlikely(c == u))
15103 break;
15104- old = atomic_cmpxchg((v), c, c + (a));
15105+
15106+ asm volatile("addl %2,%0\n"
15107+
15108+#ifdef CONFIG_PAX_REFCOUNT
15109+ "jno 0f\n"
15110+ "subl %2,%0\n"
15111+ "int $4\n0:\n"
15112+ _ASM_EXTABLE(0b, 0b)
15113+#endif
15114+
15115+ : "=r" (new)
15116+ : "0" (c), "ir" (a));
15117+
15118+ old = atomic_cmpxchg(v, c, new);
15119 if (likely(old == c))
15120 break;
15121 c = old;
15122@@ -206,6 +365,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15123 }
15124
15125 /**
15126+ * atomic_inc_not_zero_hint - increment if not null
15127+ * @v: pointer of type atomic_t
15128+ * @hint: probable value of the atomic before the increment
15129+ *
15130+ * This version of atomic_inc_not_zero() gives a hint of probable
15131+ * value of the atomic. This helps processor to not read the memory
15132+ * before doing the atomic read/modify/write cycle, lowering
15133+ * number of bus transactions on some arches.
15134+ *
15135+ * Returns: 0 if increment was not done, 1 otherwise.
15136+ */
15137+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15138+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15139+{
15140+ int val, c = hint, new;
15141+
15142+ /* sanity test, should be removed by compiler if hint is a constant */
15143+ if (!hint)
15144+ return __atomic_add_unless(v, 1, 0);
15145+
15146+ do {
15147+ asm volatile("incl %0\n"
15148+
15149+#ifdef CONFIG_PAX_REFCOUNT
15150+ "jno 0f\n"
15151+ "decl %0\n"
15152+ "int $4\n0:\n"
15153+ _ASM_EXTABLE(0b, 0b)
15154+#endif
15155+
15156+ : "=r" (new)
15157+ : "0" (c));
15158+
15159+ val = atomic_cmpxchg(v, c, new);
15160+ if (val == c)
15161+ return 1;
15162+ c = val;
15163+ } while (c);
15164+
15165+ return 0;
15166+}
15167+
15168+/**
15169 * atomic_inc_short - increment of a short integer
15170 * @v: pointer to type int
15171 *
15172@@ -234,14 +436,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
15173 #endif
15174
15175 /* These are x86-specific, used by some header files */
15176-#define atomic_clear_mask(mask, addr) \
15177- asm volatile(LOCK_PREFIX "andl %0,%1" \
15178- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15179+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15180+{
15181+ asm volatile(LOCK_PREFIX "andl %1,%0"
15182+ : "+m" (v->counter)
15183+ : "r" (~(mask))
15184+ : "memory");
15185+}
15186
15187-#define atomic_set_mask(mask, addr) \
15188- asm volatile(LOCK_PREFIX "orl %0,%1" \
15189- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15190- : "memory")
15191+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15192+{
15193+ asm volatile(LOCK_PREFIX "andl %1,%0"
15194+ : "+m" (v->counter)
15195+ : "r" (~(mask))
15196+ : "memory");
15197+}
15198+
15199+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15200+{
15201+ asm volatile(LOCK_PREFIX "orl %1,%0"
15202+ : "+m" (v->counter)
15203+ : "r" (mask)
15204+ : "memory");
15205+}
15206+
15207+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15208+{
15209+ asm volatile(LOCK_PREFIX "orl %1,%0"
15210+ : "+m" (v->counter)
15211+ : "r" (mask)
15212+ : "memory");
15213+}
15214
15215 /* Atomic operations are already serializing on x86 */
15216 #define smp_mb__before_atomic_dec() barrier()
15217diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15218index b154de7..bf18a5a 100644
15219--- a/arch/x86/include/asm/atomic64_32.h
15220+++ b/arch/x86/include/asm/atomic64_32.h
15221@@ -12,6 +12,14 @@ typedef struct {
15222 u64 __aligned(8) counter;
15223 } atomic64_t;
15224
15225+#ifdef CONFIG_PAX_REFCOUNT
15226+typedef struct {
15227+ u64 __aligned(8) counter;
15228+} atomic64_unchecked_t;
15229+#else
15230+typedef atomic64_t atomic64_unchecked_t;
15231+#endif
15232+
15233 #define ATOMIC64_INIT(val) { (val) }
15234
15235 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15236@@ -37,21 +45,31 @@ typedef struct {
15237 ATOMIC64_DECL_ONE(sym##_386)
15238
15239 ATOMIC64_DECL_ONE(add_386);
15240+ATOMIC64_DECL_ONE(add_unchecked_386);
15241 ATOMIC64_DECL_ONE(sub_386);
15242+ATOMIC64_DECL_ONE(sub_unchecked_386);
15243 ATOMIC64_DECL_ONE(inc_386);
15244+ATOMIC64_DECL_ONE(inc_unchecked_386);
15245 ATOMIC64_DECL_ONE(dec_386);
15246+ATOMIC64_DECL_ONE(dec_unchecked_386);
15247 #endif
15248
15249 #define alternative_atomic64(f, out, in...) \
15250 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15251
15252 ATOMIC64_DECL(read);
15253+ATOMIC64_DECL(read_unchecked);
15254 ATOMIC64_DECL(set);
15255+ATOMIC64_DECL(set_unchecked);
15256 ATOMIC64_DECL(xchg);
15257 ATOMIC64_DECL(add_return);
15258+ATOMIC64_DECL(add_return_unchecked);
15259 ATOMIC64_DECL(sub_return);
15260+ATOMIC64_DECL(sub_return_unchecked);
15261 ATOMIC64_DECL(inc_return);
15262+ATOMIC64_DECL(inc_return_unchecked);
15263 ATOMIC64_DECL(dec_return);
15264+ATOMIC64_DECL(dec_return_unchecked);
15265 ATOMIC64_DECL(dec_if_positive);
15266 ATOMIC64_DECL(inc_not_zero);
15267 ATOMIC64_DECL(add_unless);
15268@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15269 }
15270
15271 /**
15272+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15273+ * @p: pointer to type atomic64_unchecked_t
15274+ * @o: expected value
15275+ * @n: new value
15276+ *
15277+ * Atomically sets @v to @n if it was equal to @o and returns
15278+ * the old value.
15279+ */
15280+
15281+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15282+{
15283+ return cmpxchg64(&v->counter, o, n);
15284+}
15285+
15286+/**
15287 * atomic64_xchg - xchg atomic64 variable
15288 * @v: pointer to type atomic64_t
15289 * @n: value to assign
15290@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15291 }
15292
15293 /**
15294+ * atomic64_set_unchecked - set atomic64 variable
15295+ * @v: pointer to type atomic64_unchecked_t
15296+ * @n: value to assign
15297+ *
15298+ * Atomically sets the value of @v to @n.
15299+ */
15300+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15301+{
15302+ unsigned high = (unsigned)(i >> 32);
15303+ unsigned low = (unsigned)i;
15304+ alternative_atomic64(set, /* no output */,
15305+ "S" (v), "b" (low), "c" (high)
15306+ : "eax", "edx", "memory");
15307+}
15308+
15309+/**
15310 * atomic64_read - read atomic64 variable
15311 * @v: pointer to type atomic64_t
15312 *
15313@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15314 }
15315
15316 /**
15317+ * atomic64_read_unchecked - read atomic64 variable
15318+ * @v: pointer to type atomic64_unchecked_t
15319+ *
15320+ * Atomically reads the value of @v and returns it.
15321+ */
15322+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15323+{
15324+ long long r;
15325+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15326+ return r;
15327+ }
15328+
15329+/**
15330 * atomic64_add_return - add and return
15331 * @i: integer value to add
15332 * @v: pointer to type atomic64_t
15333@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15334 return i;
15335 }
15336
15337+/**
15338+ * atomic64_add_return_unchecked - add and return
15339+ * @i: integer value to add
15340+ * @v: pointer to type atomic64_unchecked_t
15341+ *
15342+ * Atomically adds @i to @v and returns @i + *@v
15343+ */
15344+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15345+{
15346+ alternative_atomic64(add_return_unchecked,
15347+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15348+ ASM_NO_INPUT_CLOBBER("memory"));
15349+ return i;
15350+}
15351+
15352 /*
15353 * Other variants with different arithmetic operators:
15354 */
15355@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15356 return a;
15357 }
15358
15359+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15360+{
15361+ long long a;
15362+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15363+ "S" (v) : "memory", "ecx");
15364+ return a;
15365+}
15366+
15367 static inline long long atomic64_dec_return(atomic64_t *v)
15368 {
15369 long long a;
15370@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15371 }
15372
15373 /**
15374+ * atomic64_add_unchecked - add integer to atomic64 variable
15375+ * @i: integer value to add
15376+ * @v: pointer to type atomic64_unchecked_t
15377+ *
15378+ * Atomically adds @i to @v.
15379+ */
15380+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15381+{
15382+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15383+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15384+ ASM_NO_INPUT_CLOBBER("memory"));
15385+ return i;
15386+}
15387+
15388+/**
15389 * atomic64_sub - subtract the atomic64 variable
15390 * @i: integer value to subtract
15391 * @v: pointer to type atomic64_t
15392diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15393index 46e9052..ae45136 100644
15394--- a/arch/x86/include/asm/atomic64_64.h
15395+++ b/arch/x86/include/asm/atomic64_64.h
15396@@ -18,7 +18,19 @@
15397 */
15398 static inline long atomic64_read(const atomic64_t *v)
15399 {
15400- return (*(volatile long *)&(v)->counter);
15401+ return (*(volatile const long *)&(v)->counter);
15402+}
15403+
15404+/**
15405+ * atomic64_read_unchecked - read atomic64 variable
15406+ * @v: pointer of type atomic64_unchecked_t
15407+ *
15408+ * Atomically reads the value of @v.
15409+ * Doesn't imply a read memory barrier.
15410+ */
15411+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15412+{
15413+ return (*(volatile const long *)&(v)->counter);
15414 }
15415
15416 /**
15417@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15418 }
15419
15420 /**
15421+ * atomic64_set_unchecked - set atomic64 variable
15422+ * @v: pointer to type atomic64_unchecked_t
15423+ * @i: required value
15424+ *
15425+ * Atomically sets the value of @v to @i.
15426+ */
15427+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15428+{
15429+ v->counter = i;
15430+}
15431+
15432+/**
15433 * atomic64_add - add integer to atomic64 variable
15434 * @i: integer value to add
15435 * @v: pointer to type atomic64_t
15436@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15437 */
15438 static inline void atomic64_add(long i, atomic64_t *v)
15439 {
15440+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15441+
15442+#ifdef CONFIG_PAX_REFCOUNT
15443+ "jno 0f\n"
15444+ LOCK_PREFIX "subq %1,%0\n"
15445+ "int $4\n0:\n"
15446+ _ASM_EXTABLE(0b, 0b)
15447+#endif
15448+
15449+ : "=m" (v->counter)
15450+ : "er" (i), "m" (v->counter));
15451+}
15452+
15453+/**
15454+ * atomic64_add_unchecked - add integer to atomic64 variable
15455+ * @i: integer value to add
15456+ * @v: pointer to type atomic64_unchecked_t
15457+ *
15458+ * Atomically adds @i to @v.
15459+ */
15460+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15461+{
15462 asm volatile(LOCK_PREFIX "addq %1,%0"
15463 : "=m" (v->counter)
15464 : "er" (i), "m" (v->counter));
15465@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15466 */
15467 static inline void atomic64_sub(long i, atomic64_t *v)
15468 {
15469- asm volatile(LOCK_PREFIX "subq %1,%0"
15470+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15471+
15472+#ifdef CONFIG_PAX_REFCOUNT
15473+ "jno 0f\n"
15474+ LOCK_PREFIX "addq %1,%0\n"
15475+ "int $4\n0:\n"
15476+ _ASM_EXTABLE(0b, 0b)
15477+#endif
15478+
15479+ : "=m" (v->counter)
15480+ : "er" (i), "m" (v->counter));
15481+}
15482+
15483+/**
15484+ * atomic64_sub_unchecked - subtract the atomic64 variable
15485+ * @i: integer value to subtract
15486+ * @v: pointer to type atomic64_unchecked_t
15487+ *
15488+ * Atomically subtracts @i from @v.
15489+ */
15490+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15491+{
15492+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15493 : "=m" (v->counter)
15494 : "er" (i), "m" (v->counter));
15495 }
15496@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15497 */
15498 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15499 {
15500- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15501+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15502 }
15503
15504 /**
15505@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15506 */
15507 static inline void atomic64_inc(atomic64_t *v)
15508 {
15509+ asm volatile(LOCK_PREFIX "incq %0\n"
15510+
15511+#ifdef CONFIG_PAX_REFCOUNT
15512+ "jno 0f\n"
15513+ LOCK_PREFIX "decq %0\n"
15514+ "int $4\n0:\n"
15515+ _ASM_EXTABLE(0b, 0b)
15516+#endif
15517+
15518+ : "=m" (v->counter)
15519+ : "m" (v->counter));
15520+}
15521+
15522+/**
15523+ * atomic64_inc_unchecked - increment atomic64 variable
15524+ * @v: pointer to type atomic64_unchecked_t
15525+ *
15526+ * Atomically increments @v by 1.
15527+ */
15528+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15529+{
15530 asm volatile(LOCK_PREFIX "incq %0"
15531 : "=m" (v->counter)
15532 : "m" (v->counter));
15533@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15534 */
15535 static inline void atomic64_dec(atomic64_t *v)
15536 {
15537- asm volatile(LOCK_PREFIX "decq %0"
15538+ asm volatile(LOCK_PREFIX "decq %0\n"
15539+
15540+#ifdef CONFIG_PAX_REFCOUNT
15541+ "jno 0f\n"
15542+ LOCK_PREFIX "incq %0\n"
15543+ "int $4\n0:\n"
15544+ _ASM_EXTABLE(0b, 0b)
15545+#endif
15546+
15547+ : "=m" (v->counter)
15548+ : "m" (v->counter));
15549+}
15550+
15551+/**
15552+ * atomic64_dec_unchecked - decrement atomic64 variable
15553+ * @v: pointer to type atomic64_t
15554+ *
15555+ * Atomically decrements @v by 1.
15556+ */
15557+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15558+{
15559+ asm volatile(LOCK_PREFIX "decq %0\n"
15560 : "=m" (v->counter)
15561 : "m" (v->counter));
15562 }
15563@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15564 */
15565 static inline int atomic64_dec_and_test(atomic64_t *v)
15566 {
15567- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15568+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15569 }
15570
15571 /**
15572@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15573 */
15574 static inline int atomic64_inc_and_test(atomic64_t *v)
15575 {
15576- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15577+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15578 }
15579
15580 /**
15581@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15582 */
15583 static inline int atomic64_add_negative(long i, atomic64_t *v)
15584 {
15585- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15586+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15587 }
15588
15589 /**
15590@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15591 */
15592 static inline long atomic64_add_return(long i, atomic64_t *v)
15593 {
15594+ return i + xadd_check_overflow(&v->counter, i);
15595+}
15596+
15597+/**
15598+ * atomic64_add_return_unchecked - add and return
15599+ * @i: integer value to add
15600+ * @v: pointer to type atomic64_unchecked_t
15601+ *
15602+ * Atomically adds @i to @v and returns @i + @v
15603+ */
15604+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15605+{
15606 return i + xadd(&v->counter, i);
15607 }
15608
15609@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15610 }
15611
15612 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15613+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15614+{
15615+ return atomic64_add_return_unchecked(1, v);
15616+}
15617 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15618
15619 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15620@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15621 return cmpxchg(&v->counter, old, new);
15622 }
15623
15624+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15625+{
15626+ return cmpxchg(&v->counter, old, new);
15627+}
15628+
15629 static inline long atomic64_xchg(atomic64_t *v, long new)
15630 {
15631 return xchg(&v->counter, new);
15632@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15633 */
15634 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15635 {
15636- long c, old;
15637+ long c, old, new;
15638 c = atomic64_read(v);
15639 for (;;) {
15640- if (unlikely(c == (u)))
15641+ if (unlikely(c == u))
15642 break;
15643- old = atomic64_cmpxchg((v), c, c + (a));
15644+
15645+ asm volatile("add %2,%0\n"
15646+
15647+#ifdef CONFIG_PAX_REFCOUNT
15648+ "jno 0f\n"
15649+ "sub %2,%0\n"
15650+ "int $4\n0:\n"
15651+ _ASM_EXTABLE(0b, 0b)
15652+#endif
15653+
15654+ : "=r" (new)
15655+ : "0" (c), "ir" (a));
15656+
15657+ old = atomic64_cmpxchg(v, c, new);
15658 if (likely(old == c))
15659 break;
15660 c = old;
15661 }
15662- return c != (u);
15663+ return c != u;
15664 }
15665
15666 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
15667diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
15668index 9fc1af7..fc71228 100644
15669--- a/arch/x86/include/asm/bitops.h
15670+++ b/arch/x86/include/asm/bitops.h
15671@@ -49,7 +49,7 @@
15672 * a mask operation on a byte.
15673 */
15674 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
15675-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
15676+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
15677 #define CONST_MASK(nr) (1 << ((nr) & 7))
15678
15679 /**
15680@@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
15681 */
15682 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
15683 {
15684- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
15685+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
15686 }
15687
15688 /**
15689@@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
15690 */
15691 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
15692 {
15693- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
15694+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
15695 }
15696
15697 /**
15698@@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
15699 */
15700 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
15701 {
15702- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
15703+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
15704 }
15705
15706 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
15707@@ -345,7 +345,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
15708 *
15709 * Undefined if no bit exists, so code should check against 0 first.
15710 */
15711-static inline unsigned long __ffs(unsigned long word)
15712+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
15713 {
15714 asm("rep; bsf %1,%0"
15715 : "=r" (word)
15716@@ -359,7 +359,7 @@ static inline unsigned long __ffs(unsigned long word)
15717 *
15718 * Undefined if no zero exists, so code should check against ~0UL first.
15719 */
15720-static inline unsigned long ffz(unsigned long word)
15721+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
15722 {
15723 asm("rep; bsf %1,%0"
15724 : "=r" (word)
15725@@ -373,7 +373,7 @@ static inline unsigned long ffz(unsigned long word)
15726 *
15727 * Undefined if no set bit exists, so code should check against 0 first.
15728 */
15729-static inline unsigned long __fls(unsigned long word)
15730+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
15731 {
15732 asm("bsr %1,%0"
15733 : "=r" (word)
15734@@ -436,7 +436,7 @@ static inline int ffs(int x)
15735 * set bit if value is nonzero. The last (most significant) bit is
15736 * at position 32.
15737 */
15738-static inline int fls(int x)
15739+static inline int __intentional_overflow(-1) fls(int x)
15740 {
15741 int r;
15742
15743@@ -478,7 +478,7 @@ static inline int fls(int x)
15744 * at position 64.
15745 */
15746 #ifdef CONFIG_X86_64
15747-static __always_inline int fls64(__u64 x)
15748+static __always_inline long fls64(__u64 x)
15749 {
15750 int bitpos = -1;
15751 /*
15752diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
15753index 4fa687a..60f2d39 100644
15754--- a/arch/x86/include/asm/boot.h
15755+++ b/arch/x86/include/asm/boot.h
15756@@ -6,10 +6,15 @@
15757 #include <uapi/asm/boot.h>
15758
15759 /* Physical address where kernel should be loaded. */
15760-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15761+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15762 + (CONFIG_PHYSICAL_ALIGN - 1)) \
15763 & ~(CONFIG_PHYSICAL_ALIGN - 1))
15764
15765+#ifndef __ASSEMBLY__
15766+extern unsigned char __LOAD_PHYSICAL_ADDR[];
15767+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
15768+#endif
15769+
15770 /* Minimum kernel alignment, as a power of two */
15771 #ifdef CONFIG_X86_64
15772 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
15773diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
15774index 48f99f1..d78ebf9 100644
15775--- a/arch/x86/include/asm/cache.h
15776+++ b/arch/x86/include/asm/cache.h
15777@@ -5,12 +5,13 @@
15778
15779 /* L1 cache line size */
15780 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
15781-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
15782+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
15783
15784 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
15785+#define __read_only __attribute__((__section__(".data..read_only")))
15786
15787 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
15788-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
15789+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
15790
15791 #ifdef CONFIG_X86_VSMP
15792 #ifdef CONFIG_SMP
15793diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
15794index 9863ee3..4a1f8e1 100644
15795--- a/arch/x86/include/asm/cacheflush.h
15796+++ b/arch/x86/include/asm/cacheflush.h
15797@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
15798 unsigned long pg_flags = pg->flags & _PGMT_MASK;
15799
15800 if (pg_flags == _PGMT_DEFAULT)
15801- return -1;
15802+ return ~0UL;
15803 else if (pg_flags == _PGMT_WC)
15804 return _PAGE_CACHE_WC;
15805 else if (pg_flags == _PGMT_UC_MINUS)
15806diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
15807index cb4c73b..c473c29 100644
15808--- a/arch/x86/include/asm/calling.h
15809+++ b/arch/x86/include/asm/calling.h
15810@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
15811 #define RSP 152
15812 #define SS 160
15813
15814-#define ARGOFFSET R11
15815-#define SWFRAME ORIG_RAX
15816+#define ARGOFFSET R15
15817
15818 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
15819- subq $9*8+\addskip, %rsp
15820- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
15821- movq_cfi rdi, 8*8
15822- movq_cfi rsi, 7*8
15823- movq_cfi rdx, 6*8
15824+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
15825+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
15826+ movq_cfi rdi, RDI
15827+ movq_cfi rsi, RSI
15828+ movq_cfi rdx, RDX
15829
15830 .if \save_rcx
15831- movq_cfi rcx, 5*8
15832+ movq_cfi rcx, RCX
15833 .endif
15834
15835- movq_cfi rax, 4*8
15836+ movq_cfi rax, RAX
15837
15838 .if \save_r891011
15839- movq_cfi r8, 3*8
15840- movq_cfi r9, 2*8
15841- movq_cfi r10, 1*8
15842- movq_cfi r11, 0*8
15843+ movq_cfi r8, R8
15844+ movq_cfi r9, R9
15845+ movq_cfi r10, R10
15846+ movq_cfi r11, R11
15847 .endif
15848
15849+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15850+ movq_cfi r12, R12
15851+#endif
15852+
15853 .endm
15854
15855-#define ARG_SKIP (9*8)
15856+#define ARG_SKIP ORIG_RAX
15857
15858 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
15859 rstor_r8910=1, rstor_rdx=1
15860+
15861+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15862+ movq_cfi_restore R12, r12
15863+#endif
15864+
15865 .if \rstor_r11
15866- movq_cfi_restore 0*8, r11
15867+ movq_cfi_restore R11, r11
15868 .endif
15869
15870 .if \rstor_r8910
15871- movq_cfi_restore 1*8, r10
15872- movq_cfi_restore 2*8, r9
15873- movq_cfi_restore 3*8, r8
15874+ movq_cfi_restore R10, r10
15875+ movq_cfi_restore R9, r9
15876+ movq_cfi_restore R8, r8
15877 .endif
15878
15879 .if \rstor_rax
15880- movq_cfi_restore 4*8, rax
15881+ movq_cfi_restore RAX, rax
15882 .endif
15883
15884 .if \rstor_rcx
15885- movq_cfi_restore 5*8, rcx
15886+ movq_cfi_restore RCX, rcx
15887 .endif
15888
15889 .if \rstor_rdx
15890- movq_cfi_restore 6*8, rdx
15891+ movq_cfi_restore RDX, rdx
15892 .endif
15893
15894- movq_cfi_restore 7*8, rsi
15895- movq_cfi_restore 8*8, rdi
15896+ movq_cfi_restore RSI, rsi
15897+ movq_cfi_restore RDI, rdi
15898
15899- .if ARG_SKIP+\addskip > 0
15900- addq $ARG_SKIP+\addskip, %rsp
15901- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
15902+ .if ORIG_RAX+\addskip > 0
15903+ addq $ORIG_RAX+\addskip, %rsp
15904+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
15905 .endif
15906 .endm
15907
15908- .macro LOAD_ARGS offset, skiprax=0
15909- movq \offset(%rsp), %r11
15910- movq \offset+8(%rsp), %r10
15911- movq \offset+16(%rsp), %r9
15912- movq \offset+24(%rsp), %r8
15913- movq \offset+40(%rsp), %rcx
15914- movq \offset+48(%rsp), %rdx
15915- movq \offset+56(%rsp), %rsi
15916- movq \offset+64(%rsp), %rdi
15917+ .macro LOAD_ARGS skiprax=0
15918+ movq R11(%rsp), %r11
15919+ movq R10(%rsp), %r10
15920+ movq R9(%rsp), %r9
15921+ movq R8(%rsp), %r8
15922+ movq RCX(%rsp), %rcx
15923+ movq RDX(%rsp), %rdx
15924+ movq RSI(%rsp), %rsi
15925+ movq RDI(%rsp), %rdi
15926 .if \skiprax
15927 .else
15928- movq \offset+72(%rsp), %rax
15929+ movq RAX(%rsp), %rax
15930 .endif
15931 .endm
15932
15933-#define REST_SKIP (6*8)
15934-
15935 .macro SAVE_REST
15936- subq $REST_SKIP, %rsp
15937- CFI_ADJUST_CFA_OFFSET REST_SKIP
15938- movq_cfi rbx, 5*8
15939- movq_cfi rbp, 4*8
15940- movq_cfi r12, 3*8
15941- movq_cfi r13, 2*8
15942- movq_cfi r14, 1*8
15943- movq_cfi r15, 0*8
15944+ movq_cfi rbx, RBX
15945+ movq_cfi rbp, RBP
15946+
15947+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15948+ movq_cfi r12, R12
15949+#endif
15950+
15951+ movq_cfi r13, R13
15952+ movq_cfi r14, R14
15953+ movq_cfi r15, R15
15954 .endm
15955
15956 .macro RESTORE_REST
15957- movq_cfi_restore 0*8, r15
15958- movq_cfi_restore 1*8, r14
15959- movq_cfi_restore 2*8, r13
15960- movq_cfi_restore 3*8, r12
15961- movq_cfi_restore 4*8, rbp
15962- movq_cfi_restore 5*8, rbx
15963- addq $REST_SKIP, %rsp
15964- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
15965+ movq_cfi_restore R15, r15
15966+ movq_cfi_restore R14, r14
15967+ movq_cfi_restore R13, r13
15968+
15969+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15970+ movq_cfi_restore R12, r12
15971+#endif
15972+
15973+ movq_cfi_restore RBP, rbp
15974+ movq_cfi_restore RBX, rbx
15975 .endm
15976
15977 .macro SAVE_ALL
15978diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
15979index f50de69..2b0a458 100644
15980--- a/arch/x86/include/asm/checksum_32.h
15981+++ b/arch/x86/include/asm/checksum_32.h
15982@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
15983 int len, __wsum sum,
15984 int *src_err_ptr, int *dst_err_ptr);
15985
15986+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
15987+ int len, __wsum sum,
15988+ int *src_err_ptr, int *dst_err_ptr);
15989+
15990+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
15991+ int len, __wsum sum,
15992+ int *src_err_ptr, int *dst_err_ptr);
15993+
15994 /*
15995 * Note: when you get a NULL pointer exception here this means someone
15996 * passed in an incorrect kernel address to one of these functions.
15997@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
15998
15999 might_sleep();
16000 stac();
16001- ret = csum_partial_copy_generic((__force void *)src, dst,
16002+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16003 len, sum, err_ptr, NULL);
16004 clac();
16005
16006@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16007 might_sleep();
16008 if (access_ok(VERIFY_WRITE, dst, len)) {
16009 stac();
16010- ret = csum_partial_copy_generic(src, (__force void *)dst,
16011+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16012 len, sum, NULL, err_ptr);
16013 clac();
16014 return ret;
16015diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16016index d47786a..ce1b05d 100644
16017--- a/arch/x86/include/asm/cmpxchg.h
16018+++ b/arch/x86/include/asm/cmpxchg.h
16019@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
16020 __compiletime_error("Bad argument size for cmpxchg");
16021 extern void __xadd_wrong_size(void)
16022 __compiletime_error("Bad argument size for xadd");
16023+extern void __xadd_check_overflow_wrong_size(void)
16024+ __compiletime_error("Bad argument size for xadd_check_overflow");
16025 extern void __add_wrong_size(void)
16026 __compiletime_error("Bad argument size for add");
16027+extern void __add_check_overflow_wrong_size(void)
16028+ __compiletime_error("Bad argument size for add_check_overflow");
16029
16030 /*
16031 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16032@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
16033 __ret; \
16034 })
16035
16036+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16037+ ({ \
16038+ __typeof__ (*(ptr)) __ret = (arg); \
16039+ switch (sizeof(*(ptr))) { \
16040+ case __X86_CASE_L: \
16041+ asm volatile (lock #op "l %0, %1\n" \
16042+ "jno 0f\n" \
16043+ "mov %0,%1\n" \
16044+ "int $4\n0:\n" \
16045+ _ASM_EXTABLE(0b, 0b) \
16046+ : "+r" (__ret), "+m" (*(ptr)) \
16047+ : : "memory", "cc"); \
16048+ break; \
16049+ case __X86_CASE_Q: \
16050+ asm volatile (lock #op "q %q0, %1\n" \
16051+ "jno 0f\n" \
16052+ "mov %0,%1\n" \
16053+ "int $4\n0:\n" \
16054+ _ASM_EXTABLE(0b, 0b) \
16055+ : "+r" (__ret), "+m" (*(ptr)) \
16056+ : : "memory", "cc"); \
16057+ break; \
16058+ default: \
16059+ __ ## op ## _check_overflow_wrong_size(); \
16060+ } \
16061+ __ret; \
16062+ })
16063+
16064 /*
16065 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16066 * Since this is generally used to protect other memory information, we
16067@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
16068 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16069 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16070
16071+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16072+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16073+
16074 #define __add(ptr, inc, lock) \
16075 ({ \
16076 __typeof__ (*(ptr)) __ret = (inc); \
16077diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16078index 59c6c40..5e0b22c 100644
16079--- a/arch/x86/include/asm/compat.h
16080+++ b/arch/x86/include/asm/compat.h
16081@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16082 typedef u32 compat_uint_t;
16083 typedef u32 compat_ulong_t;
16084 typedef u64 __attribute__((aligned(4))) compat_u64;
16085-typedef u32 compat_uptr_t;
16086+typedef u32 __user compat_uptr_t;
16087
16088 struct compat_timespec {
16089 compat_time_t tv_sec;
16090diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16091index 89270b4..f0abf8e 100644
16092--- a/arch/x86/include/asm/cpufeature.h
16093+++ b/arch/x86/include/asm/cpufeature.h
16094@@ -203,7 +203,7 @@
16095 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
16096 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
16097 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
16098-
16099+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16100
16101 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16102 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16103@@ -211,7 +211,7 @@
16104 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
16105 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
16106 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
16107-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
16108+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
16109 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
16110 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16111 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
16112@@ -353,6 +353,7 @@ extern const char * const x86_power_flags[32];
16113 #undef cpu_has_centaur_mcr
16114 #define cpu_has_centaur_mcr 0
16115
16116+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16117 #endif /* CONFIG_X86_64 */
16118
16119 #if __GNUC__ >= 4
16120@@ -405,7 +406,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16121
16122 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16123 t_warn:
16124- warn_pre_alternatives();
16125+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16126+ warn_pre_alternatives();
16127 return false;
16128 #endif
16129
16130@@ -425,7 +427,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16131 ".section .discard,\"aw\",@progbits\n"
16132 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16133 ".previous\n"
16134- ".section .altinstr_replacement,\"ax\"\n"
16135+ ".section .altinstr_replacement,\"a\"\n"
16136 "3: movb $1,%0\n"
16137 "4:\n"
16138 ".previous\n"
16139@@ -462,7 +464,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16140 " .byte 2b - 1b\n" /* src len */
16141 " .byte 4f - 3f\n" /* repl len */
16142 ".previous\n"
16143- ".section .altinstr_replacement,\"ax\"\n"
16144+ ".section .altinstr_replacement,\"a\"\n"
16145 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16146 "4:\n"
16147 ".previous\n"
16148@@ -495,7 +497,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16149 ".section .discard,\"aw\",@progbits\n"
16150 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16151 ".previous\n"
16152- ".section .altinstr_replacement,\"ax\"\n"
16153+ ".section .altinstr_replacement,\"a\"\n"
16154 "3: movb $0,%0\n"
16155 "4:\n"
16156 ".previous\n"
16157@@ -509,7 +511,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16158 ".section .discard,\"aw\",@progbits\n"
16159 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16160 ".previous\n"
16161- ".section .altinstr_replacement,\"ax\"\n"
16162+ ".section .altinstr_replacement,\"a\"\n"
16163 "5: movb $1,%0\n"
16164 "6:\n"
16165 ".previous\n"
16166diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16167index 50d033a..37deb26 100644
16168--- a/arch/x86/include/asm/desc.h
16169+++ b/arch/x86/include/asm/desc.h
16170@@ -4,6 +4,7 @@
16171 #include <asm/desc_defs.h>
16172 #include <asm/ldt.h>
16173 #include <asm/mmu.h>
16174+#include <asm/pgtable.h>
16175
16176 #include <linux/smp.h>
16177 #include <linux/percpu.h>
16178@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16179
16180 desc->type = (info->read_exec_only ^ 1) << 1;
16181 desc->type |= info->contents << 2;
16182+ desc->type |= info->seg_not_present ^ 1;
16183
16184 desc->s = 1;
16185 desc->dpl = 0x3;
16186@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16187 }
16188
16189 extern struct desc_ptr idt_descr;
16190-extern gate_desc idt_table[];
16191-extern struct desc_ptr debug_idt_descr;
16192-extern gate_desc debug_idt_table[];
16193-
16194-struct gdt_page {
16195- struct desc_struct gdt[GDT_ENTRIES];
16196-} __attribute__((aligned(PAGE_SIZE)));
16197-
16198-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16199+extern gate_desc idt_table[IDT_ENTRIES];
16200+extern const struct desc_ptr debug_idt_descr;
16201+extern gate_desc debug_idt_table[IDT_ENTRIES];
16202
16203+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16204 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16205 {
16206- return per_cpu(gdt_page, cpu).gdt;
16207+ return cpu_gdt_table[cpu];
16208 }
16209
16210 #ifdef CONFIG_X86_64
16211@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16212 unsigned long base, unsigned dpl, unsigned flags,
16213 unsigned short seg)
16214 {
16215- gate->a = (seg << 16) | (base & 0xffff);
16216- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16217+ gate->gate.offset_low = base;
16218+ gate->gate.seg = seg;
16219+ gate->gate.reserved = 0;
16220+ gate->gate.type = type;
16221+ gate->gate.s = 0;
16222+ gate->gate.dpl = dpl;
16223+ gate->gate.p = 1;
16224+ gate->gate.offset_high = base >> 16;
16225 }
16226
16227 #endif
16228@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16229
16230 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16231 {
16232+ pax_open_kernel();
16233 memcpy(&idt[entry], gate, sizeof(*gate));
16234+ pax_close_kernel();
16235 }
16236
16237 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16238 {
16239+ pax_open_kernel();
16240 memcpy(&ldt[entry], desc, 8);
16241+ pax_close_kernel();
16242 }
16243
16244 static inline void
16245@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16246 default: size = sizeof(*gdt); break;
16247 }
16248
16249+ pax_open_kernel();
16250 memcpy(&gdt[entry], desc, size);
16251+ pax_close_kernel();
16252 }
16253
16254 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16255@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16256
16257 static inline void native_load_tr_desc(void)
16258 {
16259+ pax_open_kernel();
16260 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16261+ pax_close_kernel();
16262 }
16263
16264 static inline void native_load_gdt(const struct desc_ptr *dtr)
16265@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16266 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16267 unsigned int i;
16268
16269+ pax_open_kernel();
16270 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16271 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16272+ pax_close_kernel();
16273 }
16274
16275 #define _LDT_empty(info) \
16276@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
16277 preempt_enable();
16278 }
16279
16280-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16281+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16282 {
16283 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16284 }
16285@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16286 }
16287
16288 #ifdef CONFIG_X86_64
16289-static inline void set_nmi_gate(int gate, void *addr)
16290+static inline void set_nmi_gate(int gate, const void *addr)
16291 {
16292 gate_desc s;
16293
16294@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16295 #endif
16296
16297 #ifdef CONFIG_TRACING
16298-extern struct desc_ptr trace_idt_descr;
16299-extern gate_desc trace_idt_table[];
16300+extern const struct desc_ptr trace_idt_descr;
16301+extern gate_desc trace_idt_table[IDT_ENTRIES];
16302 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16303 {
16304 write_idt_entry(trace_idt_table, entry, gate);
16305 }
16306
16307-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16308+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16309 unsigned dpl, unsigned ist, unsigned seg)
16310 {
16311 gate_desc s;
16312@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16313 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16314 #endif
16315
16316-static inline void _set_gate(int gate, unsigned type, void *addr,
16317+static inline void _set_gate(int gate, unsigned type, const void *addr,
16318 unsigned dpl, unsigned ist, unsigned seg)
16319 {
16320 gate_desc s;
16321@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16322 #define set_intr_gate(n, addr) \
16323 do { \
16324 BUG_ON((unsigned)n > 0xFF); \
16325- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16326+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16327 __KERNEL_CS); \
16328- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16329+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16330 0, 0, __KERNEL_CS); \
16331 } while (0)
16332
16333@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
16334 /*
16335 * This routine sets up an interrupt gate at directory privilege level 3.
16336 */
16337-static inline void set_system_intr_gate(unsigned int n, void *addr)
16338+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16339 {
16340 BUG_ON((unsigned)n > 0xFF);
16341 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16342 }
16343
16344-static inline void set_system_trap_gate(unsigned int n, void *addr)
16345+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16346 {
16347 BUG_ON((unsigned)n > 0xFF);
16348 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16349 }
16350
16351-static inline void set_trap_gate(unsigned int n, void *addr)
16352+static inline void set_trap_gate(unsigned int n, const void *addr)
16353 {
16354 BUG_ON((unsigned)n > 0xFF);
16355 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16356@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16357 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16358 {
16359 BUG_ON((unsigned)n > 0xFF);
16360- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16361+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16362 }
16363
16364-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16365+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16366 {
16367 BUG_ON((unsigned)n > 0xFF);
16368 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16369 }
16370
16371-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16372+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16373 {
16374 BUG_ON((unsigned)n > 0xFF);
16375 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16376@@ -503,4 +516,17 @@ static inline void load_current_idt(void)
16377 else
16378 load_idt((const struct desc_ptr *)&idt_descr);
16379 }
16380+
16381+#ifdef CONFIG_X86_32
16382+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16383+{
16384+ struct desc_struct d;
16385+
16386+ if (likely(limit))
16387+ limit = (limit - 1UL) >> PAGE_SHIFT;
16388+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16389+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16390+}
16391+#endif
16392+
16393 #endif /* _ASM_X86_DESC_H */
16394diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16395index 278441f..b95a174 100644
16396--- a/arch/x86/include/asm/desc_defs.h
16397+++ b/arch/x86/include/asm/desc_defs.h
16398@@ -31,6 +31,12 @@ struct desc_struct {
16399 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16400 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16401 };
16402+ struct {
16403+ u16 offset_low;
16404+ u16 seg;
16405+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16406+ unsigned offset_high: 16;
16407+ } gate;
16408 };
16409 } __attribute__((packed));
16410
16411diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16412index ced283a..ffe04cc 100644
16413--- a/arch/x86/include/asm/div64.h
16414+++ b/arch/x86/include/asm/div64.h
16415@@ -39,7 +39,7 @@
16416 __mod; \
16417 })
16418
16419-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16420+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16421 {
16422 union {
16423 u64 v64;
16424diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16425index 9c999c1..3860cb8 100644
16426--- a/arch/x86/include/asm/elf.h
16427+++ b/arch/x86/include/asm/elf.h
16428@@ -243,7 +243,25 @@ extern int force_personality32;
16429 the loader. We need to make sure that it is out of the way of the program
16430 that it will "exec", and that there is sufficient room for the brk. */
16431
16432+#ifdef CONFIG_PAX_SEGMEXEC
16433+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16434+#else
16435 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16436+#endif
16437+
16438+#ifdef CONFIG_PAX_ASLR
16439+#ifdef CONFIG_X86_32
16440+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16441+
16442+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16443+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16444+#else
16445+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16446+
16447+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16448+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16449+#endif
16450+#endif
16451
16452 /* This yields a mask that user programs can use to figure out what
16453 instruction set this CPU supports. This could be done in user space,
16454@@ -296,16 +314,12 @@ do { \
16455
16456 #define ARCH_DLINFO \
16457 do { \
16458- if (vdso_enabled) \
16459- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16460- (unsigned long)current->mm->context.vdso); \
16461+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16462 } while (0)
16463
16464 #define ARCH_DLINFO_X32 \
16465 do { \
16466- if (vdso_enabled) \
16467- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16468- (unsigned long)current->mm->context.vdso); \
16469+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16470 } while (0)
16471
16472 #define AT_SYSINFO 32
16473@@ -320,7 +334,7 @@ else \
16474
16475 #endif /* !CONFIG_X86_32 */
16476
16477-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16478+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16479
16480 #define VDSO_ENTRY \
16481 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
16482@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
16483 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
16484 #define compat_arch_setup_additional_pages syscall32_setup_pages
16485
16486-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16487-#define arch_randomize_brk arch_randomize_brk
16488-
16489 /*
16490 * True on X86_32 or when emulating IA32 on X86_64
16491 */
16492diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16493index 77a99ac..39ff7f5 100644
16494--- a/arch/x86/include/asm/emergency-restart.h
16495+++ b/arch/x86/include/asm/emergency-restart.h
16496@@ -1,6 +1,6 @@
16497 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16498 #define _ASM_X86_EMERGENCY_RESTART_H
16499
16500-extern void machine_emergency_restart(void);
16501+extern void machine_emergency_restart(void) __noreturn;
16502
16503 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16504diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16505index d3d7469..677ef72 100644
16506--- a/arch/x86/include/asm/floppy.h
16507+++ b/arch/x86/include/asm/floppy.h
16508@@ -229,18 +229,18 @@ static struct fd_routine_l {
16509 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16510 } fd_routine[] = {
16511 {
16512- request_dma,
16513- free_dma,
16514- get_dma_residue,
16515- dma_mem_alloc,
16516- hard_dma_setup
16517+ ._request_dma = request_dma,
16518+ ._free_dma = free_dma,
16519+ ._get_dma_residue = get_dma_residue,
16520+ ._dma_mem_alloc = dma_mem_alloc,
16521+ ._dma_setup = hard_dma_setup
16522 },
16523 {
16524- vdma_request_dma,
16525- vdma_nop,
16526- vdma_get_dma_residue,
16527- vdma_mem_alloc,
16528- vdma_dma_setup
16529+ ._request_dma = vdma_request_dma,
16530+ ._free_dma = vdma_nop,
16531+ ._get_dma_residue = vdma_get_dma_residue,
16532+ ._dma_mem_alloc = vdma_mem_alloc,
16533+ ._dma_setup = vdma_dma_setup
16534 }
16535 };
16536
16537diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16538index cea1c76..6c0d79b 100644
16539--- a/arch/x86/include/asm/fpu-internal.h
16540+++ b/arch/x86/include/asm/fpu-internal.h
16541@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16542 #define user_insn(insn, output, input...) \
16543 ({ \
16544 int err; \
16545+ pax_open_userland(); \
16546 asm volatile(ASM_STAC "\n" \
16547- "1:" #insn "\n\t" \
16548+ "1:" \
16549+ __copyuser_seg \
16550+ #insn "\n\t" \
16551 "2: " ASM_CLAC "\n" \
16552 ".section .fixup,\"ax\"\n" \
16553 "3: movl $-1,%[err]\n" \
16554@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16555 _ASM_EXTABLE(1b, 3b) \
16556 : [err] "=r" (err), output \
16557 : "0"(0), input); \
16558+ pax_close_userland(); \
16559 err; \
16560 })
16561
16562@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16563 "fnclex\n\t"
16564 "emms\n\t"
16565 "fildl %P[addr]" /* set F?P to defined value */
16566- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16567+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16568 }
16569
16570 return fpu_restore_checking(&tsk->thread.fpu);
16571diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16572index be27ba1..04a8801 100644
16573--- a/arch/x86/include/asm/futex.h
16574+++ b/arch/x86/include/asm/futex.h
16575@@ -12,6 +12,7 @@
16576 #include <asm/smap.h>
16577
16578 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16579+ typecheck(u32 __user *, uaddr); \
16580 asm volatile("\t" ASM_STAC "\n" \
16581 "1:\t" insn "\n" \
16582 "2:\t" ASM_CLAC "\n" \
16583@@ -20,15 +21,16 @@
16584 "\tjmp\t2b\n" \
16585 "\t.previous\n" \
16586 _ASM_EXTABLE(1b, 3b) \
16587- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16588+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16589 : "i" (-EFAULT), "0" (oparg), "1" (0))
16590
16591 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16592+ typecheck(u32 __user *, uaddr); \
16593 asm volatile("\t" ASM_STAC "\n" \
16594 "1:\tmovl %2, %0\n" \
16595 "\tmovl\t%0, %3\n" \
16596 "\t" insn "\n" \
16597- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16598+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16599 "\tjnz\t1b\n" \
16600 "3:\t" ASM_CLAC "\n" \
16601 "\t.section .fixup,\"ax\"\n" \
16602@@ -38,7 +40,7 @@
16603 _ASM_EXTABLE(1b, 4b) \
16604 _ASM_EXTABLE(2b, 4b) \
16605 : "=&a" (oldval), "=&r" (ret), \
16606- "+m" (*uaddr), "=&r" (tem) \
16607+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16608 : "r" (oparg), "i" (-EFAULT), "1" (0))
16609
16610 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16611@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16612
16613 pagefault_disable();
16614
16615+ pax_open_userland();
16616 switch (op) {
16617 case FUTEX_OP_SET:
16618- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16619+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16620 break;
16621 case FUTEX_OP_ADD:
16622- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16623+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16624 uaddr, oparg);
16625 break;
16626 case FUTEX_OP_OR:
16627@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16628 default:
16629 ret = -ENOSYS;
16630 }
16631+ pax_close_userland();
16632
16633 pagefault_enable();
16634
16635@@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
16636 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
16637 return -EFAULT;
16638
16639+ pax_open_userland();
16640 asm volatile("\t" ASM_STAC "\n"
16641- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
16642+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
16643 "2:\t" ASM_CLAC "\n"
16644 "\t.section .fixup, \"ax\"\n"
16645 "3:\tmov %3, %0\n"
16646 "\tjmp 2b\n"
16647 "\t.previous\n"
16648 _ASM_EXTABLE(1b, 3b)
16649- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
16650+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
16651 : "i" (-EFAULT), "r" (newval), "1" (oldval)
16652 : "memory"
16653 );
16654+ pax_close_userland();
16655
16656 *uval = oldval;
16657 return ret;
16658diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
16659index cba45d9..86344ba 100644
16660--- a/arch/x86/include/asm/hw_irq.h
16661+++ b/arch/x86/include/asm/hw_irq.h
16662@@ -165,8 +165,8 @@ extern void setup_ioapic_dest(void);
16663 extern void enable_IO_APIC(void);
16664
16665 /* Statistics */
16666-extern atomic_t irq_err_count;
16667-extern atomic_t irq_mis_count;
16668+extern atomic_unchecked_t irq_err_count;
16669+extern atomic_unchecked_t irq_mis_count;
16670
16671 /* EISA */
16672 extern void eisa_set_level_irq(unsigned int irq);
16673diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
16674index a203659..9889f1c 100644
16675--- a/arch/x86/include/asm/i8259.h
16676+++ b/arch/x86/include/asm/i8259.h
16677@@ -62,7 +62,7 @@ struct legacy_pic {
16678 void (*init)(int auto_eoi);
16679 int (*irq_pending)(unsigned int irq);
16680 void (*make_irq)(unsigned int irq);
16681-};
16682+} __do_const;
16683
16684 extern struct legacy_pic *legacy_pic;
16685 extern struct legacy_pic null_legacy_pic;
16686diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
16687index 34f69cb..6d95446 100644
16688--- a/arch/x86/include/asm/io.h
16689+++ b/arch/x86/include/asm/io.h
16690@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
16691 "m" (*(volatile type __force *)addr) barrier); }
16692
16693 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
16694-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
16695-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
16696+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
16697+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
16698
16699 build_mmio_read(__readb, "b", unsigned char, "=q", )
16700-build_mmio_read(__readw, "w", unsigned short, "=r", )
16701-build_mmio_read(__readl, "l", unsigned int, "=r", )
16702+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
16703+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
16704
16705 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
16706 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
16707@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
16708 return ioremap_nocache(offset, size);
16709 }
16710
16711-extern void iounmap(volatile void __iomem *addr);
16712+extern void iounmap(const volatile void __iomem *addr);
16713
16714 extern void set_iounmap_nonlazy(void);
16715
16716@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
16717
16718 #include <linux/vmalloc.h>
16719
16720+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
16721+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
16722+{
16723+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
16724+}
16725+
16726+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
16727+{
16728+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
16729+}
16730+
16731 /*
16732 * Convert a virtual cached pointer to an uncached pointer
16733 */
16734diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
16735index bba3cf8..06bc8da 100644
16736--- a/arch/x86/include/asm/irqflags.h
16737+++ b/arch/x86/include/asm/irqflags.h
16738@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
16739 sti; \
16740 sysexit
16741
16742+#define GET_CR0_INTO_RDI mov %cr0, %rdi
16743+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
16744+#define GET_CR3_INTO_RDI mov %cr3, %rdi
16745+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
16746+
16747 #else
16748 #define INTERRUPT_RETURN iret
16749 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
16750diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
16751index 9454c16..e4100e3 100644
16752--- a/arch/x86/include/asm/kprobes.h
16753+++ b/arch/x86/include/asm/kprobes.h
16754@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
16755 #define RELATIVEJUMP_SIZE 5
16756 #define RELATIVECALL_OPCODE 0xe8
16757 #define RELATIVE_ADDR_SIZE 4
16758-#define MAX_STACK_SIZE 64
16759-#define MIN_STACK_SIZE(ADDR) \
16760- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
16761- THREAD_SIZE - (unsigned long)(ADDR))) \
16762- ? (MAX_STACK_SIZE) \
16763- : (((unsigned long)current_thread_info()) + \
16764- THREAD_SIZE - (unsigned long)(ADDR)))
16765+#define MAX_STACK_SIZE 64UL
16766+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
16767
16768 #define flush_insn_slot(p) do { } while (0)
16769
16770diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
16771index 4ad6560..75c7bdd 100644
16772--- a/arch/x86/include/asm/local.h
16773+++ b/arch/x86/include/asm/local.h
16774@@ -10,33 +10,97 @@ typedef struct {
16775 atomic_long_t a;
16776 } local_t;
16777
16778+typedef struct {
16779+ atomic_long_unchecked_t a;
16780+} local_unchecked_t;
16781+
16782 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
16783
16784 #define local_read(l) atomic_long_read(&(l)->a)
16785+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
16786 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
16787+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
16788
16789 static inline void local_inc(local_t *l)
16790 {
16791- asm volatile(_ASM_INC "%0"
16792+ asm volatile(_ASM_INC "%0\n"
16793+
16794+#ifdef CONFIG_PAX_REFCOUNT
16795+ "jno 0f\n"
16796+ _ASM_DEC "%0\n"
16797+ "int $4\n0:\n"
16798+ _ASM_EXTABLE(0b, 0b)
16799+#endif
16800+
16801+ : "+m" (l->a.counter));
16802+}
16803+
16804+static inline void local_inc_unchecked(local_unchecked_t *l)
16805+{
16806+ asm volatile(_ASM_INC "%0\n"
16807 : "+m" (l->a.counter));
16808 }
16809
16810 static inline void local_dec(local_t *l)
16811 {
16812- asm volatile(_ASM_DEC "%0"
16813+ asm volatile(_ASM_DEC "%0\n"
16814+
16815+#ifdef CONFIG_PAX_REFCOUNT
16816+ "jno 0f\n"
16817+ _ASM_INC "%0\n"
16818+ "int $4\n0:\n"
16819+ _ASM_EXTABLE(0b, 0b)
16820+#endif
16821+
16822+ : "+m" (l->a.counter));
16823+}
16824+
16825+static inline void local_dec_unchecked(local_unchecked_t *l)
16826+{
16827+ asm volatile(_ASM_DEC "%0\n"
16828 : "+m" (l->a.counter));
16829 }
16830
16831 static inline void local_add(long i, local_t *l)
16832 {
16833- asm volatile(_ASM_ADD "%1,%0"
16834+ asm volatile(_ASM_ADD "%1,%0\n"
16835+
16836+#ifdef CONFIG_PAX_REFCOUNT
16837+ "jno 0f\n"
16838+ _ASM_SUB "%1,%0\n"
16839+ "int $4\n0:\n"
16840+ _ASM_EXTABLE(0b, 0b)
16841+#endif
16842+
16843+ : "+m" (l->a.counter)
16844+ : "ir" (i));
16845+}
16846+
16847+static inline void local_add_unchecked(long i, local_unchecked_t *l)
16848+{
16849+ asm volatile(_ASM_ADD "%1,%0\n"
16850 : "+m" (l->a.counter)
16851 : "ir" (i));
16852 }
16853
16854 static inline void local_sub(long i, local_t *l)
16855 {
16856- asm volatile(_ASM_SUB "%1,%0"
16857+ asm volatile(_ASM_SUB "%1,%0\n"
16858+
16859+#ifdef CONFIG_PAX_REFCOUNT
16860+ "jno 0f\n"
16861+ _ASM_ADD "%1,%0\n"
16862+ "int $4\n0:\n"
16863+ _ASM_EXTABLE(0b, 0b)
16864+#endif
16865+
16866+ : "+m" (l->a.counter)
16867+ : "ir" (i));
16868+}
16869+
16870+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
16871+{
16872+ asm volatile(_ASM_SUB "%1,%0\n"
16873 : "+m" (l->a.counter)
16874 : "ir" (i));
16875 }
16876@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
16877 */
16878 static inline int local_sub_and_test(long i, local_t *l)
16879 {
16880- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
16881+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
16882 }
16883
16884 /**
16885@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
16886 */
16887 static inline int local_dec_and_test(local_t *l)
16888 {
16889- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
16890+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
16891 }
16892
16893 /**
16894@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
16895 */
16896 static inline int local_inc_and_test(local_t *l)
16897 {
16898- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
16899+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
16900 }
16901
16902 /**
16903@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
16904 */
16905 static inline int local_add_negative(long i, local_t *l)
16906 {
16907- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
16908+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
16909 }
16910
16911 /**
16912@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
16913 static inline long local_add_return(long i, local_t *l)
16914 {
16915 long __i = i;
16916+ asm volatile(_ASM_XADD "%0, %1\n"
16917+
16918+#ifdef CONFIG_PAX_REFCOUNT
16919+ "jno 0f\n"
16920+ _ASM_MOV "%0,%1\n"
16921+ "int $4\n0:\n"
16922+ _ASM_EXTABLE(0b, 0b)
16923+#endif
16924+
16925+ : "+r" (i), "+m" (l->a.counter)
16926+ : : "memory");
16927+ return i + __i;
16928+}
16929+
16930+/**
16931+ * local_add_return_unchecked - add and return
16932+ * @i: integer value to add
16933+ * @l: pointer to type local_unchecked_t
16934+ *
16935+ * Atomically adds @i to @l and returns @i + @l
16936+ */
16937+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
16938+{
16939+ long __i = i;
16940 asm volatile(_ASM_XADD "%0, %1;"
16941 : "+r" (i), "+m" (l->a.counter)
16942 : : "memory");
16943@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
16944
16945 #define local_cmpxchg(l, o, n) \
16946 (cmpxchg_local(&((l)->a.counter), (o), (n)))
16947+#define local_cmpxchg_unchecked(l, o, n) \
16948+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
16949 /* Always has a lock prefix */
16950 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
16951
16952diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
16953new file mode 100644
16954index 0000000..2bfd3ba
16955--- /dev/null
16956+++ b/arch/x86/include/asm/mman.h
16957@@ -0,0 +1,15 @@
16958+#ifndef _X86_MMAN_H
16959+#define _X86_MMAN_H
16960+
16961+#include <uapi/asm/mman.h>
16962+
16963+#ifdef __KERNEL__
16964+#ifndef __ASSEMBLY__
16965+#ifdef CONFIG_X86_32
16966+#define arch_mmap_check i386_mmap_check
16967+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
16968+#endif
16969+#endif
16970+#endif
16971+
16972+#endif /* X86_MMAN_H */
16973diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
16974index 5f55e69..e20bfb1 100644
16975--- a/arch/x86/include/asm/mmu.h
16976+++ b/arch/x86/include/asm/mmu.h
16977@@ -9,7 +9,7 @@
16978 * we put the segment information here.
16979 */
16980 typedef struct {
16981- void *ldt;
16982+ struct desc_struct *ldt;
16983 int size;
16984
16985 #ifdef CONFIG_X86_64
16986@@ -18,7 +18,19 @@ typedef struct {
16987 #endif
16988
16989 struct mutex lock;
16990- void *vdso;
16991+ unsigned long vdso;
16992+
16993+#ifdef CONFIG_X86_32
16994+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
16995+ unsigned long user_cs_base;
16996+ unsigned long user_cs_limit;
16997+
16998+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16999+ cpumask_t cpu_user_cs_mask;
17000+#endif
17001+
17002+#endif
17003+#endif
17004 } mm_context_t;
17005
17006 #ifdef CONFIG_SMP
17007diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17008index be12c53..4d24039 100644
17009--- a/arch/x86/include/asm/mmu_context.h
17010+++ b/arch/x86/include/asm/mmu_context.h
17011@@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
17012
17013 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17014 {
17015+
17016+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17017+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17018+ unsigned int i;
17019+ pgd_t *pgd;
17020+
17021+ pax_open_kernel();
17022+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17023+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17024+ set_pgd_batched(pgd+i, native_make_pgd(0));
17025+ pax_close_kernel();
17026+ }
17027+#endif
17028+
17029 #ifdef CONFIG_SMP
17030 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17031 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17032@@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17033 struct task_struct *tsk)
17034 {
17035 unsigned cpu = smp_processor_id();
17036+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17037+ int tlbstate = TLBSTATE_OK;
17038+#endif
17039
17040 if (likely(prev != next)) {
17041 #ifdef CONFIG_SMP
17042+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17043+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17044+#endif
17045 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17046 this_cpu_write(cpu_tlbstate.active_mm, next);
17047 #endif
17048 cpumask_set_cpu(cpu, mm_cpumask(next));
17049
17050 /* Re-load page tables */
17051+#ifdef CONFIG_PAX_PER_CPU_PGD
17052+ pax_open_kernel();
17053+
17054+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17055+ if (static_cpu_has(X86_FEATURE_PCID))
17056+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17057+ else
17058+#endif
17059+
17060+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17061+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17062+ pax_close_kernel();
17063+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17064+
17065+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17066+ if (static_cpu_has(X86_FEATURE_PCID)) {
17067+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17068+ u64 descriptor[2];
17069+ descriptor[0] = PCID_USER;
17070+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17071+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17072+ descriptor[0] = PCID_KERNEL;
17073+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17074+ }
17075+ } else {
17076+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17077+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17078+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17079+ else
17080+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17081+ }
17082+ } else
17083+#endif
17084+
17085+ load_cr3(get_cpu_pgd(cpu, kernel));
17086+#else
17087 load_cr3(next->pgd);
17088+#endif
17089
17090 /* Stop flush ipis for the previous mm */
17091 cpumask_clear_cpu(cpu, mm_cpumask(prev));
17092@@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17093 /* Load the LDT, if the LDT is different: */
17094 if (unlikely(prev->context.ldt != next->context.ldt))
17095 load_LDT_nolock(&next->context);
17096+
17097+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17098+ if (!(__supported_pte_mask & _PAGE_NX)) {
17099+ smp_mb__before_clear_bit();
17100+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17101+ smp_mb__after_clear_bit();
17102+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17103+ }
17104+#endif
17105+
17106+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17107+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17108+ prev->context.user_cs_limit != next->context.user_cs_limit))
17109+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17110+#ifdef CONFIG_SMP
17111+ else if (unlikely(tlbstate != TLBSTATE_OK))
17112+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17113+#endif
17114+#endif
17115+
17116 }
17117+ else {
17118+
17119+#ifdef CONFIG_PAX_PER_CPU_PGD
17120+ pax_open_kernel();
17121+
17122+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17123+ if (static_cpu_has(X86_FEATURE_PCID))
17124+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17125+ else
17126+#endif
17127+
17128+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17129+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17130+ pax_close_kernel();
17131+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17132+
17133+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17134+ if (static_cpu_has(X86_FEATURE_PCID)) {
17135+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17136+ u64 descriptor[2];
17137+ descriptor[0] = PCID_USER;
17138+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17139+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17140+ descriptor[0] = PCID_KERNEL;
17141+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17142+ }
17143+ } else {
17144+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17145+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17146+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17147+ else
17148+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17149+ }
17150+ } else
17151+#endif
17152+
17153+ load_cr3(get_cpu_pgd(cpu, kernel));
17154+#endif
17155+
17156 #ifdef CONFIG_SMP
17157- else {
17158 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17159 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17160
17161@@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17162 * tlb flush IPI delivery. We must reload CR3
17163 * to make sure to use no freed page tables.
17164 */
17165+
17166+#ifndef CONFIG_PAX_PER_CPU_PGD
17167 load_cr3(next->pgd);
17168+#endif
17169+
17170 load_LDT_nolock(&next->context);
17171+
17172+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17173+ if (!(__supported_pte_mask & _PAGE_NX))
17174+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17175+#endif
17176+
17177+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17178+#ifdef CONFIG_PAX_PAGEEXEC
17179+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17180+#endif
17181+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17182+#endif
17183+
17184 }
17185+#endif
17186 }
17187-#endif
17188 }
17189
17190 #define activate_mm(prev, next) \
17191diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17192index e3b7819..b257c64 100644
17193--- a/arch/x86/include/asm/module.h
17194+++ b/arch/x86/include/asm/module.h
17195@@ -5,6 +5,7 @@
17196
17197 #ifdef CONFIG_X86_64
17198 /* X86_64 does not define MODULE_PROC_FAMILY */
17199+#define MODULE_PROC_FAMILY ""
17200 #elif defined CONFIG_M486
17201 #define MODULE_PROC_FAMILY "486 "
17202 #elif defined CONFIG_M586
17203@@ -57,8 +58,20 @@
17204 #error unknown processor family
17205 #endif
17206
17207-#ifdef CONFIG_X86_32
17208-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17209+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17210+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17211+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17212+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17213+#else
17214+#define MODULE_PAX_KERNEXEC ""
17215 #endif
17216
17217+#ifdef CONFIG_PAX_MEMORY_UDEREF
17218+#define MODULE_PAX_UDEREF "UDEREF "
17219+#else
17220+#define MODULE_PAX_UDEREF ""
17221+#endif
17222+
17223+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17224+
17225 #endif /* _ASM_X86_MODULE_H */
17226diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17227index 86f9301..b365cda 100644
17228--- a/arch/x86/include/asm/nmi.h
17229+++ b/arch/x86/include/asm/nmi.h
17230@@ -40,11 +40,11 @@ struct nmiaction {
17231 nmi_handler_t handler;
17232 unsigned long flags;
17233 const char *name;
17234-};
17235+} __do_const;
17236
17237 #define register_nmi_handler(t, fn, fg, n, init...) \
17238 ({ \
17239- static struct nmiaction init fn##_na = { \
17240+ static const struct nmiaction init fn##_na = { \
17241 .handler = (fn), \
17242 .name = (n), \
17243 .flags = (fg), \
17244@@ -52,7 +52,7 @@ struct nmiaction {
17245 __register_nmi_handler((t), &fn##_na); \
17246 })
17247
17248-int __register_nmi_handler(unsigned int, struct nmiaction *);
17249+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17250
17251 void unregister_nmi_handler(unsigned int, const char *);
17252
17253diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17254index c878924..21f4889 100644
17255--- a/arch/x86/include/asm/page.h
17256+++ b/arch/x86/include/asm/page.h
17257@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17258 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17259
17260 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17261+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17262
17263 #define __boot_va(x) __va(x)
17264 #define __boot_pa(x) __pa(x)
17265diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17266index 0f1ddee..e2fc3d1 100644
17267--- a/arch/x86/include/asm/page_64.h
17268+++ b/arch/x86/include/asm/page_64.h
17269@@ -7,9 +7,9 @@
17270
17271 /* duplicated to the one in bootmem.h */
17272 extern unsigned long max_pfn;
17273-extern unsigned long phys_base;
17274+extern const unsigned long phys_base;
17275
17276-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17277+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17278 {
17279 unsigned long y = x - __START_KERNEL_map;
17280
17281diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17282index 401f350..dee5d13 100644
17283--- a/arch/x86/include/asm/paravirt.h
17284+++ b/arch/x86/include/asm/paravirt.h
17285@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17286 return (pmd_t) { ret };
17287 }
17288
17289-static inline pmdval_t pmd_val(pmd_t pmd)
17290+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17291 {
17292 pmdval_t ret;
17293
17294@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17295 val);
17296 }
17297
17298+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17299+{
17300+ pgdval_t val = native_pgd_val(pgd);
17301+
17302+ if (sizeof(pgdval_t) > sizeof(long))
17303+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17304+ val, (u64)val >> 32);
17305+ else
17306+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17307+ val);
17308+}
17309+
17310 static inline void pgd_clear(pgd_t *pgdp)
17311 {
17312 set_pgd(pgdp, __pgd(0));
17313@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17314 pv_mmu_ops.set_fixmap(idx, phys, flags);
17315 }
17316
17317+#ifdef CONFIG_PAX_KERNEXEC
17318+static inline unsigned long pax_open_kernel(void)
17319+{
17320+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17321+}
17322+
17323+static inline unsigned long pax_close_kernel(void)
17324+{
17325+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17326+}
17327+#else
17328+static inline unsigned long pax_open_kernel(void) { return 0; }
17329+static inline unsigned long pax_close_kernel(void) { return 0; }
17330+#endif
17331+
17332 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17333
17334 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17335@@ -906,7 +933,7 @@ extern void default_banner(void);
17336
17337 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17338 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17339-#define PARA_INDIRECT(addr) *%cs:addr
17340+#define PARA_INDIRECT(addr) *%ss:addr
17341 #endif
17342
17343 #define INTERRUPT_RETURN \
17344@@ -981,6 +1008,21 @@ extern void default_banner(void);
17345 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17346 CLBR_NONE, \
17347 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17348+
17349+#define GET_CR0_INTO_RDI \
17350+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17351+ mov %rax,%rdi
17352+
17353+#define SET_RDI_INTO_CR0 \
17354+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17355+
17356+#define GET_CR3_INTO_RDI \
17357+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17358+ mov %rax,%rdi
17359+
17360+#define SET_RDI_INTO_CR3 \
17361+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17362+
17363 #endif /* CONFIG_X86_32 */
17364
17365 #endif /* __ASSEMBLY__ */
17366diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17367index aab8f67..0fb0ee4 100644
17368--- a/arch/x86/include/asm/paravirt_types.h
17369+++ b/arch/x86/include/asm/paravirt_types.h
17370@@ -84,7 +84,7 @@ struct pv_init_ops {
17371 */
17372 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17373 unsigned long addr, unsigned len);
17374-};
17375+} __no_const __no_randomize_layout;
17376
17377
17378 struct pv_lazy_ops {
17379@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17380 void (*enter)(void);
17381 void (*leave)(void);
17382 void (*flush)(void);
17383-};
17384+} __no_randomize_layout;
17385
17386 struct pv_time_ops {
17387 unsigned long long (*sched_clock)(void);
17388 unsigned long long (*steal_clock)(int cpu);
17389 unsigned long (*get_tsc_khz)(void);
17390-};
17391+} __no_const __no_randomize_layout;
17392
17393 struct pv_cpu_ops {
17394 /* hooks for various privileged instructions */
17395@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17396
17397 void (*start_context_switch)(struct task_struct *prev);
17398 void (*end_context_switch)(struct task_struct *next);
17399-};
17400+} __no_const __no_randomize_layout;
17401
17402 struct pv_irq_ops {
17403 /*
17404@@ -215,7 +215,7 @@ struct pv_irq_ops {
17405 #ifdef CONFIG_X86_64
17406 void (*adjust_exception_frame)(void);
17407 #endif
17408-};
17409+} __no_randomize_layout;
17410
17411 struct pv_apic_ops {
17412 #ifdef CONFIG_X86_LOCAL_APIC
17413@@ -223,7 +223,7 @@ struct pv_apic_ops {
17414 unsigned long start_eip,
17415 unsigned long start_esp);
17416 #endif
17417-};
17418+} __no_const __no_randomize_layout;
17419
17420 struct pv_mmu_ops {
17421 unsigned long (*read_cr2)(void);
17422@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17423 struct paravirt_callee_save make_pud;
17424
17425 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17426+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17427 #endif /* PAGETABLE_LEVELS == 4 */
17428 #endif /* PAGETABLE_LEVELS >= 3 */
17429
17430@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17431 an mfn. We can tell which is which from the index. */
17432 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17433 phys_addr_t phys, pgprot_t flags);
17434-};
17435+
17436+#ifdef CONFIG_PAX_KERNEXEC
17437+ unsigned long (*pax_open_kernel)(void);
17438+ unsigned long (*pax_close_kernel)(void);
17439+#endif
17440+
17441+} __no_randomize_layout;
17442
17443 struct arch_spinlock;
17444 #ifdef CONFIG_SMP
17445@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17446 struct pv_lock_ops {
17447 struct paravirt_callee_save lock_spinning;
17448 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17449-};
17450+} __no_randomize_layout;
17451
17452 /* This contains all the paravirt structures: we get a convenient
17453 * number for each function using the offset which we use to indicate
17454- * what to patch. */
17455+ * what to patch.
17456+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17457+ */
17458+
17459 struct paravirt_patch_template {
17460 struct pv_init_ops pv_init_ops;
17461 struct pv_time_ops pv_time_ops;
17462@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17463 struct pv_apic_ops pv_apic_ops;
17464 struct pv_mmu_ops pv_mmu_ops;
17465 struct pv_lock_ops pv_lock_ops;
17466-};
17467+} __no_randomize_layout;
17468
17469 extern struct pv_info pv_info;
17470 extern struct pv_init_ops pv_init_ops;
17471diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17472index c4412e9..90e88c5 100644
17473--- a/arch/x86/include/asm/pgalloc.h
17474+++ b/arch/x86/include/asm/pgalloc.h
17475@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17476 pmd_t *pmd, pte_t *pte)
17477 {
17478 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17479+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17480+}
17481+
17482+static inline void pmd_populate_user(struct mm_struct *mm,
17483+ pmd_t *pmd, pte_t *pte)
17484+{
17485+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17486 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17487 }
17488
17489@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17490
17491 #ifdef CONFIG_X86_PAE
17492 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17493+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17494+{
17495+ pud_populate(mm, pudp, pmd);
17496+}
17497 #else /* !CONFIG_X86_PAE */
17498 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17499 {
17500 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17501 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17502 }
17503+
17504+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17505+{
17506+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17507+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17508+}
17509 #endif /* CONFIG_X86_PAE */
17510
17511 #if PAGETABLE_LEVELS > 3
17512@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17513 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17514 }
17515
17516+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17517+{
17518+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17519+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17520+}
17521+
17522 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17523 {
17524 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17525diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17526index 3bf2dd0..23d2a9f 100644
17527--- a/arch/x86/include/asm/pgtable-2level.h
17528+++ b/arch/x86/include/asm/pgtable-2level.h
17529@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17530
17531 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17532 {
17533+ pax_open_kernel();
17534 *pmdp = pmd;
17535+ pax_close_kernel();
17536 }
17537
17538 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17539diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17540index 81bb91b..9392125 100644
17541--- a/arch/x86/include/asm/pgtable-3level.h
17542+++ b/arch/x86/include/asm/pgtable-3level.h
17543@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17544
17545 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17546 {
17547+ pax_open_kernel();
17548 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17549+ pax_close_kernel();
17550 }
17551
17552 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17553 {
17554+ pax_open_kernel();
17555 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17556+ pax_close_kernel();
17557 }
17558
17559 /*
17560diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17561index 5ad38ad..71db3f2 100644
17562--- a/arch/x86/include/asm/pgtable.h
17563+++ b/arch/x86/include/asm/pgtable.h
17564@@ -45,6 +45,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17565
17566 #ifndef __PAGETABLE_PUD_FOLDED
17567 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17568+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17569 #define pgd_clear(pgd) native_pgd_clear(pgd)
17570 #endif
17571
17572@@ -82,12 +83,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17573
17574 #define arch_end_context_switch(prev) do {} while(0)
17575
17576+#define pax_open_kernel() native_pax_open_kernel()
17577+#define pax_close_kernel() native_pax_close_kernel()
17578 #endif /* CONFIG_PARAVIRT */
17579
17580+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17581+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17582+
17583+#ifdef CONFIG_PAX_KERNEXEC
17584+static inline unsigned long native_pax_open_kernel(void)
17585+{
17586+ unsigned long cr0;
17587+
17588+ preempt_disable();
17589+ barrier();
17590+ cr0 = read_cr0() ^ X86_CR0_WP;
17591+ BUG_ON(cr0 & X86_CR0_WP);
17592+ write_cr0(cr0);
17593+ return cr0 ^ X86_CR0_WP;
17594+}
17595+
17596+static inline unsigned long native_pax_close_kernel(void)
17597+{
17598+ unsigned long cr0;
17599+
17600+ cr0 = read_cr0() ^ X86_CR0_WP;
17601+ BUG_ON(!(cr0 & X86_CR0_WP));
17602+ write_cr0(cr0);
17603+ barrier();
17604+ preempt_enable_no_resched();
17605+ return cr0 ^ X86_CR0_WP;
17606+}
17607+#else
17608+static inline unsigned long native_pax_open_kernel(void) { return 0; }
17609+static inline unsigned long native_pax_close_kernel(void) { return 0; }
17610+#endif
17611+
17612 /*
17613 * The following only work if pte_present() is true.
17614 * Undefined behaviour if not..
17615 */
17616+static inline int pte_user(pte_t pte)
17617+{
17618+ return pte_val(pte) & _PAGE_USER;
17619+}
17620+
17621 static inline int pte_dirty(pte_t pte)
17622 {
17623 return pte_flags(pte) & _PAGE_DIRTY;
17624@@ -148,6 +188,11 @@ static inline unsigned long pud_pfn(pud_t pud)
17625 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
17626 }
17627
17628+static inline unsigned long pgd_pfn(pgd_t pgd)
17629+{
17630+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
17631+}
17632+
17633 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
17634
17635 static inline int pmd_large(pmd_t pte)
17636@@ -201,9 +246,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
17637 return pte_clear_flags(pte, _PAGE_RW);
17638 }
17639
17640+static inline pte_t pte_mkread(pte_t pte)
17641+{
17642+ return __pte(pte_val(pte) | _PAGE_USER);
17643+}
17644+
17645 static inline pte_t pte_mkexec(pte_t pte)
17646 {
17647- return pte_clear_flags(pte, _PAGE_NX);
17648+#ifdef CONFIG_X86_PAE
17649+ if (__supported_pte_mask & _PAGE_NX)
17650+ return pte_clear_flags(pte, _PAGE_NX);
17651+ else
17652+#endif
17653+ return pte_set_flags(pte, _PAGE_USER);
17654+}
17655+
17656+static inline pte_t pte_exprotect(pte_t pte)
17657+{
17658+#ifdef CONFIG_X86_PAE
17659+ if (__supported_pte_mask & _PAGE_NX)
17660+ return pte_set_flags(pte, _PAGE_NX);
17661+ else
17662+#endif
17663+ return pte_clear_flags(pte, _PAGE_USER);
17664 }
17665
17666 static inline pte_t pte_mkdirty(pte_t pte)
17667@@ -430,6 +495,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
17668 #endif
17669
17670 #ifndef __ASSEMBLY__
17671+
17672+#ifdef CONFIG_PAX_PER_CPU_PGD
17673+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
17674+enum cpu_pgd_type {kernel = 0, user = 1};
17675+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
17676+{
17677+ return cpu_pgd[cpu][type];
17678+}
17679+#endif
17680+
17681 #include <linux/mm_types.h>
17682 #include <linux/mmdebug.h>
17683 #include <linux/log2.h>
17684@@ -580,7 +655,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
17685 * Currently stuck as a macro due to indirect forward reference to
17686 * linux/mmzone.h's __section_mem_map_addr() definition:
17687 */
17688-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
17689+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
17690
17691 /* Find an entry in the second-level page table.. */
17692 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
17693@@ -620,7 +695,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
17694 * Currently stuck as a macro due to indirect forward reference to
17695 * linux/mmzone.h's __section_mem_map_addr() definition:
17696 */
17697-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
17698+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
17699
17700 /* to find an entry in a page-table-directory. */
17701 static inline unsigned long pud_index(unsigned long address)
17702@@ -635,7 +710,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
17703
17704 static inline int pgd_bad(pgd_t pgd)
17705 {
17706- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
17707+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
17708 }
17709
17710 static inline int pgd_none(pgd_t pgd)
17711@@ -658,7 +733,12 @@ static inline int pgd_none(pgd_t pgd)
17712 * pgd_offset() returns a (pgd_t *)
17713 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
17714 */
17715-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
17716+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
17717+
17718+#ifdef CONFIG_PAX_PER_CPU_PGD
17719+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
17720+#endif
17721+
17722 /*
17723 * a shortcut which implies the use of the kernel's pgd, instead
17724 * of a process's
17725@@ -669,6 +749,23 @@ static inline int pgd_none(pgd_t pgd)
17726 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
17727 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
17728
17729+#ifdef CONFIG_X86_32
17730+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
17731+#else
17732+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
17733+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
17734+
17735+#ifdef CONFIG_PAX_MEMORY_UDEREF
17736+#ifdef __ASSEMBLY__
17737+#define pax_user_shadow_base pax_user_shadow_base(%rip)
17738+#else
17739+extern unsigned long pax_user_shadow_base;
17740+extern pgdval_t clone_pgd_mask;
17741+#endif
17742+#endif
17743+
17744+#endif
17745+
17746 #ifndef __ASSEMBLY__
17747
17748 extern int direct_gbpages;
17749@@ -835,11 +932,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
17750 * dst and src can be on the same page, but the range must not overlap,
17751 * and must not cross a page boundary.
17752 */
17753-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
17754+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
17755 {
17756- memcpy(dst, src, count * sizeof(pgd_t));
17757+ pax_open_kernel();
17758+ while (count--)
17759+ *dst++ = *src++;
17760+ pax_close_kernel();
17761 }
17762
17763+#ifdef CONFIG_PAX_PER_CPU_PGD
17764+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
17765+#endif
17766+
17767+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17768+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
17769+#else
17770+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
17771+#endif
17772+
17773 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
17774 static inline int page_level_shift(enum pg_level level)
17775 {
17776diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
17777index 9ee3221..b979c6b 100644
17778--- a/arch/x86/include/asm/pgtable_32.h
17779+++ b/arch/x86/include/asm/pgtable_32.h
17780@@ -25,9 +25,6 @@
17781 struct mm_struct;
17782 struct vm_area_struct;
17783
17784-extern pgd_t swapper_pg_dir[1024];
17785-extern pgd_t initial_page_table[1024];
17786-
17787 static inline void pgtable_cache_init(void) { }
17788 static inline void check_pgt_cache(void) { }
17789 void paging_init(void);
17790@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
17791 # include <asm/pgtable-2level.h>
17792 #endif
17793
17794+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
17795+extern pgd_t initial_page_table[PTRS_PER_PGD];
17796+#ifdef CONFIG_X86_PAE
17797+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
17798+#endif
17799+
17800 #if defined(CONFIG_HIGHPTE)
17801 #define pte_offset_map(dir, address) \
17802 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
17803@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
17804 /* Clear a kernel PTE and flush it from the TLB */
17805 #define kpte_clear_flush(ptep, vaddr) \
17806 do { \
17807+ pax_open_kernel(); \
17808 pte_clear(&init_mm, (vaddr), (ptep)); \
17809+ pax_close_kernel(); \
17810 __flush_tlb_one((vaddr)); \
17811 } while (0)
17812
17813 #endif /* !__ASSEMBLY__ */
17814
17815+#define HAVE_ARCH_UNMAPPED_AREA
17816+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
17817+
17818 /*
17819 * kern_addr_valid() is (1) for FLATMEM and (0) for
17820 * SPARSEMEM and DISCONTIGMEM
17821diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
17822index ed5903b..c7fe163 100644
17823--- a/arch/x86/include/asm/pgtable_32_types.h
17824+++ b/arch/x86/include/asm/pgtable_32_types.h
17825@@ -8,7 +8,7 @@
17826 */
17827 #ifdef CONFIG_X86_PAE
17828 # include <asm/pgtable-3level_types.h>
17829-# define PMD_SIZE (1UL << PMD_SHIFT)
17830+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
17831 # define PMD_MASK (~(PMD_SIZE - 1))
17832 #else
17833 # include <asm/pgtable-2level_types.h>
17834@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
17835 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
17836 #endif
17837
17838+#ifdef CONFIG_PAX_KERNEXEC
17839+#ifndef __ASSEMBLY__
17840+extern unsigned char MODULES_EXEC_VADDR[];
17841+extern unsigned char MODULES_EXEC_END[];
17842+#endif
17843+#include <asm/boot.h>
17844+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
17845+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
17846+#else
17847+#define ktla_ktva(addr) (addr)
17848+#define ktva_ktla(addr) (addr)
17849+#endif
17850+
17851 #define MODULES_VADDR VMALLOC_START
17852 #define MODULES_END VMALLOC_END
17853 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
17854diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
17855index e22c1db..23a625a 100644
17856--- a/arch/x86/include/asm/pgtable_64.h
17857+++ b/arch/x86/include/asm/pgtable_64.h
17858@@ -16,10 +16,14 @@
17859
17860 extern pud_t level3_kernel_pgt[512];
17861 extern pud_t level3_ident_pgt[512];
17862+extern pud_t level3_vmalloc_start_pgt[512];
17863+extern pud_t level3_vmalloc_end_pgt[512];
17864+extern pud_t level3_vmemmap_pgt[512];
17865+extern pud_t level2_vmemmap_pgt[512];
17866 extern pmd_t level2_kernel_pgt[512];
17867 extern pmd_t level2_fixmap_pgt[512];
17868-extern pmd_t level2_ident_pgt[512];
17869-extern pgd_t init_level4_pgt[];
17870+extern pmd_t level2_ident_pgt[512*2];
17871+extern pgd_t init_level4_pgt[512];
17872
17873 #define swapper_pg_dir init_level4_pgt
17874
17875@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17876
17877 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17878 {
17879+ pax_open_kernel();
17880 *pmdp = pmd;
17881+ pax_close_kernel();
17882 }
17883
17884 static inline void native_pmd_clear(pmd_t *pmd)
17885@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
17886
17887 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17888 {
17889+ pax_open_kernel();
17890 *pudp = pud;
17891+ pax_close_kernel();
17892 }
17893
17894 static inline void native_pud_clear(pud_t *pud)
17895@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
17896
17897 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
17898 {
17899+ pax_open_kernel();
17900+ *pgdp = pgd;
17901+ pax_close_kernel();
17902+}
17903+
17904+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17905+{
17906 *pgdp = pgd;
17907 }
17908
17909diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
17910index 2d88344..4679fc3 100644
17911--- a/arch/x86/include/asm/pgtable_64_types.h
17912+++ b/arch/x86/include/asm/pgtable_64_types.h
17913@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
17914 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
17915 #define MODULES_END _AC(0xffffffffff000000, UL)
17916 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
17917+#define MODULES_EXEC_VADDR MODULES_VADDR
17918+#define MODULES_EXEC_END MODULES_END
17919+
17920+#define ktla_ktva(addr) (addr)
17921+#define ktva_ktla(addr) (addr)
17922
17923 #define EARLY_DYNAMIC_PAGE_TABLES 64
17924
17925diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
17926index 840c127..a8f297b 100644
17927--- a/arch/x86/include/asm/pgtable_types.h
17928+++ b/arch/x86/include/asm/pgtable_types.h
17929@@ -16,13 +16,12 @@
17930 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
17931 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
17932 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17933-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
17934+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
17935 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
17936 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
17937 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
17938-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
17939-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
17940-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
17941+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
17942+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
17943 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
17944
17945 /* If _PAGE_BIT_PRESENT is clear, we use these: */
17946@@ -40,7 +39,6 @@
17947 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
17948 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
17949 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
17950-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
17951 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
17952 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
17953 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
17954@@ -87,8 +85,10 @@
17955
17956 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
17957 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
17958-#else
17959+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
17960 #define _PAGE_NX (_AT(pteval_t, 0))
17961+#else
17962+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
17963 #endif
17964
17965 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
17966@@ -147,6 +147,9 @@
17967 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
17968 _PAGE_ACCESSED)
17969
17970+#define PAGE_READONLY_NOEXEC PAGE_READONLY
17971+#define PAGE_SHARED_NOEXEC PAGE_SHARED
17972+
17973 #define __PAGE_KERNEL_EXEC \
17974 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
17975 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
17976@@ -157,7 +160,7 @@
17977 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
17978 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
17979 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
17980-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
17981+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
17982 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
17983 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
17984 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
17985@@ -219,8 +222,8 @@
17986 * bits are combined, this will alow user to access the high address mapped
17987 * VDSO in the presence of CONFIG_COMPAT_VDSO
17988 */
17989-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
17990-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
17991+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
17992+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
17993 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
17994 #endif
17995
17996@@ -258,7 +261,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
17997 {
17998 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
17999 }
18000+#endif
18001
18002+#if PAGETABLE_LEVELS == 3
18003+#include <asm-generic/pgtable-nopud.h>
18004+#endif
18005+
18006+#if PAGETABLE_LEVELS == 2
18007+#include <asm-generic/pgtable-nopmd.h>
18008+#endif
18009+
18010+#ifndef __ASSEMBLY__
18011 #if PAGETABLE_LEVELS > 3
18012 typedef struct { pudval_t pud; } pud_t;
18013
18014@@ -272,8 +285,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18015 return pud.pud;
18016 }
18017 #else
18018-#include <asm-generic/pgtable-nopud.h>
18019-
18020 static inline pudval_t native_pud_val(pud_t pud)
18021 {
18022 return native_pgd_val(pud.pgd);
18023@@ -293,8 +304,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18024 return pmd.pmd;
18025 }
18026 #else
18027-#include <asm-generic/pgtable-nopmd.h>
18028-
18029 static inline pmdval_t native_pmd_val(pmd_t pmd)
18030 {
18031 return native_pgd_val(pmd.pud.pgd);
18032@@ -334,7 +343,6 @@ typedef struct page *pgtable_t;
18033
18034 extern pteval_t __supported_pte_mask;
18035 extern void set_nx(void);
18036-extern int nx_enabled;
18037
18038 #define pgprot_writecombine pgprot_writecombine
18039 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18040diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18041index c8b0519..fd29e73 100644
18042--- a/arch/x86/include/asm/preempt.h
18043+++ b/arch/x86/include/asm/preempt.h
18044@@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
18045 */
18046 static __always_inline bool __preempt_count_dec_and_test(void)
18047 {
18048- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18049+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18050 }
18051
18052 /*
18053diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18054index 7b034a4..4fe3e3f 100644
18055--- a/arch/x86/include/asm/processor.h
18056+++ b/arch/x86/include/asm/processor.h
18057@@ -128,7 +128,7 @@ struct cpuinfo_x86 {
18058 /* Index into per_cpu list: */
18059 u16 cpu_index;
18060 u32 microcode;
18061-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
18062+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
18063
18064 #define X86_VENDOR_INTEL 0
18065 #define X86_VENDOR_CYRIX 1
18066@@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18067 : "memory");
18068 }
18069
18070+/* invpcid (%rdx),%rax */
18071+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18072+
18073+#define INVPCID_SINGLE_ADDRESS 0UL
18074+#define INVPCID_SINGLE_CONTEXT 1UL
18075+#define INVPCID_ALL_GLOBAL 2UL
18076+#define INVPCID_ALL_MONGLOBAL 3UL
18077+
18078+#define PCID_KERNEL 0UL
18079+#define PCID_USER 1UL
18080+#define PCID_NOFLUSH (1UL << 63)
18081+
18082 static inline void load_cr3(pgd_t *pgdir)
18083 {
18084- write_cr3(__pa(pgdir));
18085+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18086 }
18087
18088 #ifdef CONFIG_X86_32
18089@@ -283,7 +295,7 @@ struct tss_struct {
18090
18091 } ____cacheline_aligned;
18092
18093-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18094+extern struct tss_struct init_tss[NR_CPUS];
18095
18096 /*
18097 * Save the original ist values for checking stack pointers during debugging
18098@@ -453,6 +465,7 @@ struct thread_struct {
18099 unsigned short ds;
18100 unsigned short fsindex;
18101 unsigned short gsindex;
18102+ unsigned short ss;
18103 #endif
18104 #ifdef CONFIG_X86_32
18105 unsigned long ip;
18106@@ -562,29 +575,8 @@ static inline void load_sp0(struct tss_struct *tss,
18107 extern unsigned long mmu_cr4_features;
18108 extern u32 *trampoline_cr4_features;
18109
18110-static inline void set_in_cr4(unsigned long mask)
18111-{
18112- unsigned long cr4;
18113-
18114- mmu_cr4_features |= mask;
18115- if (trampoline_cr4_features)
18116- *trampoline_cr4_features = mmu_cr4_features;
18117- cr4 = read_cr4();
18118- cr4 |= mask;
18119- write_cr4(cr4);
18120-}
18121-
18122-static inline void clear_in_cr4(unsigned long mask)
18123-{
18124- unsigned long cr4;
18125-
18126- mmu_cr4_features &= ~mask;
18127- if (trampoline_cr4_features)
18128- *trampoline_cr4_features = mmu_cr4_features;
18129- cr4 = read_cr4();
18130- cr4 &= ~mask;
18131- write_cr4(cr4);
18132-}
18133+extern void set_in_cr4(unsigned long mask);
18134+extern void clear_in_cr4(unsigned long mask);
18135
18136 typedef struct {
18137 unsigned long seg;
18138@@ -833,11 +825,18 @@ static inline void spin_lock_prefetch(const void *x)
18139 */
18140 #define TASK_SIZE PAGE_OFFSET
18141 #define TASK_SIZE_MAX TASK_SIZE
18142+
18143+#ifdef CONFIG_PAX_SEGMEXEC
18144+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18145+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18146+#else
18147 #define STACK_TOP TASK_SIZE
18148-#define STACK_TOP_MAX STACK_TOP
18149+#endif
18150+
18151+#define STACK_TOP_MAX TASK_SIZE
18152
18153 #define INIT_THREAD { \
18154- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18155+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18156 .vm86_info = NULL, \
18157 .sysenter_cs = __KERNEL_CS, \
18158 .io_bitmap_ptr = NULL, \
18159@@ -851,7 +850,7 @@ static inline void spin_lock_prefetch(const void *x)
18160 */
18161 #define INIT_TSS { \
18162 .x86_tss = { \
18163- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18164+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18165 .ss0 = __KERNEL_DS, \
18166 .ss1 = __KERNEL_CS, \
18167 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18168@@ -862,11 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
18169 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18170
18171 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18172-#define KSTK_TOP(info) \
18173-({ \
18174- unsigned long *__ptr = (unsigned long *)(info); \
18175- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18176-})
18177+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18178
18179 /*
18180 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18181@@ -881,7 +876,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18182 #define task_pt_regs(task) \
18183 ({ \
18184 struct pt_regs *__regs__; \
18185- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18186+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18187 __regs__ - 1; \
18188 })
18189
18190@@ -891,13 +886,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18191 /*
18192 * User space process size. 47bits minus one guard page.
18193 */
18194-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18195+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18196
18197 /* This decides where the kernel will search for a free chunk of vm
18198 * space during mmap's.
18199 */
18200 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18201- 0xc0000000 : 0xFFFFe000)
18202+ 0xc0000000 : 0xFFFFf000)
18203
18204 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18205 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18206@@ -908,11 +903,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18207 #define STACK_TOP_MAX TASK_SIZE_MAX
18208
18209 #define INIT_THREAD { \
18210- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18211+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18212 }
18213
18214 #define INIT_TSS { \
18215- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18216+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18217 }
18218
18219 /*
18220@@ -940,6 +935,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18221 */
18222 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18223
18224+#ifdef CONFIG_PAX_SEGMEXEC
18225+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18226+#endif
18227+
18228 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18229
18230 /* Get/set a process' ability to use the timestamp counter instruction */
18231@@ -966,7 +965,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18232 return 0;
18233 }
18234
18235-extern unsigned long arch_align_stack(unsigned long sp);
18236+#define arch_align_stack(x) ((x) & ~0xfUL)
18237 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18238
18239 void default_idle(void);
18240@@ -976,6 +975,6 @@ bool xen_set_default_idle(void);
18241 #define xen_set_default_idle 0
18242 #endif
18243
18244-void stop_this_cpu(void *dummy);
18245+void stop_this_cpu(void *dummy) __noreturn;
18246 void df_debug(struct pt_regs *regs, long error_code);
18247 #endif /* _ASM_X86_PROCESSOR_H */
18248diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18249index 942a086..6c26446 100644
18250--- a/arch/x86/include/asm/ptrace.h
18251+++ b/arch/x86/include/asm/ptrace.h
18252@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18253 }
18254
18255 /*
18256- * user_mode_vm(regs) determines whether a register set came from user mode.
18257+ * user_mode(regs) determines whether a register set came from user mode.
18258 * This is true if V8086 mode was enabled OR if the register set was from
18259 * protected mode with RPL-3 CS value. This tricky test checks that with
18260 * one comparison. Many places in the kernel can bypass this full check
18261- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18262+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18263+ * be used.
18264 */
18265-static inline int user_mode(struct pt_regs *regs)
18266+static inline int user_mode_novm(struct pt_regs *regs)
18267 {
18268 #ifdef CONFIG_X86_32
18269 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18270 #else
18271- return !!(regs->cs & 3);
18272+ return !!(regs->cs & SEGMENT_RPL_MASK);
18273 #endif
18274 }
18275
18276-static inline int user_mode_vm(struct pt_regs *regs)
18277+static inline int user_mode(struct pt_regs *regs)
18278 {
18279 #ifdef CONFIG_X86_32
18280 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18281 USER_RPL;
18282 #else
18283- return user_mode(regs);
18284+ return user_mode_novm(regs);
18285 #endif
18286 }
18287
18288@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18289 #ifdef CONFIG_X86_64
18290 static inline bool user_64bit_mode(struct pt_regs *regs)
18291 {
18292+ unsigned long cs = regs->cs & 0xffff;
18293 #ifndef CONFIG_PARAVIRT
18294 /*
18295 * On non-paravirt systems, this is the only long mode CPL 3
18296 * selector. We do not allow long mode selectors in the LDT.
18297 */
18298- return regs->cs == __USER_CS;
18299+ return cs == __USER_CS;
18300 #else
18301 /* Headers are too twisted for this to go in paravirt.h. */
18302- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18303+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18304 #endif
18305 }
18306
18307@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18308 * Traps from the kernel do not save sp and ss.
18309 * Use the helper function to retrieve sp.
18310 */
18311- if (offset == offsetof(struct pt_regs, sp) &&
18312- regs->cs == __KERNEL_CS)
18313- return kernel_stack_pointer(regs);
18314+ if (offset == offsetof(struct pt_regs, sp)) {
18315+ unsigned long cs = regs->cs & 0xffff;
18316+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18317+ return kernel_stack_pointer(regs);
18318+ }
18319 #endif
18320 return *(unsigned long *)((unsigned long)regs + offset);
18321 }
18322diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18323index 9c6b890..5305f53 100644
18324--- a/arch/x86/include/asm/realmode.h
18325+++ b/arch/x86/include/asm/realmode.h
18326@@ -22,16 +22,14 @@ struct real_mode_header {
18327 #endif
18328 /* APM/BIOS reboot */
18329 u32 machine_real_restart_asm;
18330-#ifdef CONFIG_X86_64
18331 u32 machine_real_restart_seg;
18332-#endif
18333 };
18334
18335 /* This must match data at trampoline_32/64.S */
18336 struct trampoline_header {
18337 #ifdef CONFIG_X86_32
18338 u32 start;
18339- u16 gdt_pad;
18340+ u16 boot_cs;
18341 u16 gdt_limit;
18342 u32 gdt_base;
18343 #else
18344diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18345index a82c4f1..ac45053 100644
18346--- a/arch/x86/include/asm/reboot.h
18347+++ b/arch/x86/include/asm/reboot.h
18348@@ -6,13 +6,13 @@
18349 struct pt_regs;
18350
18351 struct machine_ops {
18352- void (*restart)(char *cmd);
18353- void (*halt)(void);
18354- void (*power_off)(void);
18355+ void (* __noreturn restart)(char *cmd);
18356+ void (* __noreturn halt)(void);
18357+ void (* __noreturn power_off)(void);
18358 void (*shutdown)(void);
18359 void (*crash_shutdown)(struct pt_regs *);
18360- void (*emergency_restart)(void);
18361-};
18362+ void (* __noreturn emergency_restart)(void);
18363+} __no_const;
18364
18365 extern struct machine_ops machine_ops;
18366
18367diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18368index 8f7866a..e442f20 100644
18369--- a/arch/x86/include/asm/rmwcc.h
18370+++ b/arch/x86/include/asm/rmwcc.h
18371@@ -3,7 +3,34 @@
18372
18373 #ifdef CC_HAVE_ASM_GOTO
18374
18375-#define __GEN_RMWcc(fullop, var, cc, ...) \
18376+#ifdef CONFIG_PAX_REFCOUNT
18377+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18378+do { \
18379+ asm_volatile_goto (fullop \
18380+ ";jno 0f\n" \
18381+ fullantiop \
18382+ ";int $4\n0:\n" \
18383+ _ASM_EXTABLE(0b, 0b) \
18384+ ";j" cc " %l[cc_label]" \
18385+ : : "m" (var), ## __VA_ARGS__ \
18386+ : "memory" : cc_label); \
18387+ return 0; \
18388+cc_label: \
18389+ return 1; \
18390+} while (0)
18391+#else
18392+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18393+do { \
18394+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18395+ : : "m" (var), ## __VA_ARGS__ \
18396+ : "memory" : cc_label); \
18397+ return 0; \
18398+cc_label: \
18399+ return 1; \
18400+} while (0)
18401+#endif
18402+
18403+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18404 do { \
18405 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18406 : : "m" (var), ## __VA_ARGS__ \
18407@@ -13,15 +40,46 @@ cc_label: \
18408 return 1; \
18409 } while (0)
18410
18411-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18412- __GEN_RMWcc(op " " arg0, var, cc)
18413+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18414+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18415
18416-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18417- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18418+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18419+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18420+
18421+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18422+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18423+
18424+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18425+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18426
18427 #else /* !CC_HAVE_ASM_GOTO */
18428
18429-#define __GEN_RMWcc(fullop, var, cc, ...) \
18430+#ifdef CONFIG_PAX_REFCOUNT
18431+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18432+do { \
18433+ char c; \
18434+ asm volatile (fullop \
18435+ ";jno 0f\n" \
18436+ fullantiop \
18437+ ";int $4\n0:\n" \
18438+ _ASM_EXTABLE(0b, 0b) \
18439+ "; set" cc " %1" \
18440+ : "+m" (var), "=qm" (c) \
18441+ : __VA_ARGS__ : "memory"); \
18442+ return c != 0; \
18443+} while (0)
18444+#else
18445+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18446+do { \
18447+ char c; \
18448+ asm volatile (fullop "; set" cc " %1" \
18449+ : "+m" (var), "=qm" (c) \
18450+ : __VA_ARGS__ : "memory"); \
18451+ return c != 0; \
18452+} while (0)
18453+#endif
18454+
18455+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18456 do { \
18457 char c; \
18458 asm volatile (fullop "; set" cc " %1" \
18459@@ -30,11 +88,17 @@ do { \
18460 return c != 0; \
18461 } while (0)
18462
18463-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18464- __GEN_RMWcc(op " " arg0, var, cc)
18465+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18466+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18467+
18468+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18469+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18470+
18471+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18472+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18473
18474-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18475- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18476+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18477+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18478
18479 #endif /* CC_HAVE_ASM_GOTO */
18480
18481diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18482index cad82c9..2e5c5c1 100644
18483--- a/arch/x86/include/asm/rwsem.h
18484+++ b/arch/x86/include/asm/rwsem.h
18485@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18486 {
18487 asm volatile("# beginning down_read\n\t"
18488 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18489+
18490+#ifdef CONFIG_PAX_REFCOUNT
18491+ "jno 0f\n"
18492+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18493+ "int $4\n0:\n"
18494+ _ASM_EXTABLE(0b, 0b)
18495+#endif
18496+
18497 /* adds 0x00000001 */
18498 " jns 1f\n"
18499 " call call_rwsem_down_read_failed\n"
18500@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18501 "1:\n\t"
18502 " mov %1,%2\n\t"
18503 " add %3,%2\n\t"
18504+
18505+#ifdef CONFIG_PAX_REFCOUNT
18506+ "jno 0f\n"
18507+ "sub %3,%2\n"
18508+ "int $4\n0:\n"
18509+ _ASM_EXTABLE(0b, 0b)
18510+#endif
18511+
18512 " jle 2f\n\t"
18513 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18514 " jnz 1b\n\t"
18515@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18516 long tmp;
18517 asm volatile("# beginning down_write\n\t"
18518 LOCK_PREFIX " xadd %1,(%2)\n\t"
18519+
18520+#ifdef CONFIG_PAX_REFCOUNT
18521+ "jno 0f\n"
18522+ "mov %1,(%2)\n"
18523+ "int $4\n0:\n"
18524+ _ASM_EXTABLE(0b, 0b)
18525+#endif
18526+
18527 /* adds 0xffff0001, returns the old value */
18528 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18529 /* was the active mask 0 before? */
18530@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18531 long tmp;
18532 asm volatile("# beginning __up_read\n\t"
18533 LOCK_PREFIX " xadd %1,(%2)\n\t"
18534+
18535+#ifdef CONFIG_PAX_REFCOUNT
18536+ "jno 0f\n"
18537+ "mov %1,(%2)\n"
18538+ "int $4\n0:\n"
18539+ _ASM_EXTABLE(0b, 0b)
18540+#endif
18541+
18542 /* subtracts 1, returns the old value */
18543 " jns 1f\n\t"
18544 " call call_rwsem_wake\n" /* expects old value in %edx */
18545@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18546 long tmp;
18547 asm volatile("# beginning __up_write\n\t"
18548 LOCK_PREFIX " xadd %1,(%2)\n\t"
18549+
18550+#ifdef CONFIG_PAX_REFCOUNT
18551+ "jno 0f\n"
18552+ "mov %1,(%2)\n"
18553+ "int $4\n0:\n"
18554+ _ASM_EXTABLE(0b, 0b)
18555+#endif
18556+
18557 /* subtracts 0xffff0001, returns the old value */
18558 " jns 1f\n\t"
18559 " call call_rwsem_wake\n" /* expects old value in %edx */
18560@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18561 {
18562 asm volatile("# beginning __downgrade_write\n\t"
18563 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18564+
18565+#ifdef CONFIG_PAX_REFCOUNT
18566+ "jno 0f\n"
18567+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18568+ "int $4\n0:\n"
18569+ _ASM_EXTABLE(0b, 0b)
18570+#endif
18571+
18572 /*
18573 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18574 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18575@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18576 */
18577 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18578 {
18579- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18580+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18581+
18582+#ifdef CONFIG_PAX_REFCOUNT
18583+ "jno 0f\n"
18584+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18585+ "int $4\n0:\n"
18586+ _ASM_EXTABLE(0b, 0b)
18587+#endif
18588+
18589 : "+m" (sem->count)
18590 : "er" (delta));
18591 }
18592@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18593 */
18594 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18595 {
18596- return delta + xadd(&sem->count, delta);
18597+ return delta + xadd_check_overflow(&sem->count, delta);
18598 }
18599
18600 #endif /* __KERNEL__ */
18601diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18602index 6f1c3a8..7744f19 100644
18603--- a/arch/x86/include/asm/segment.h
18604+++ b/arch/x86/include/asm/segment.h
18605@@ -64,10 +64,15 @@
18606 * 26 - ESPFIX small SS
18607 * 27 - per-cpu [ offset to per-cpu data area ]
18608 * 28 - stack_canary-20 [ for stack protector ]
18609- * 29 - unused
18610- * 30 - unused
18611+ * 29 - PCI BIOS CS
18612+ * 30 - PCI BIOS DS
18613 * 31 - TSS for double fault handler
18614 */
18615+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
18616+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
18617+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
18618+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
18619+
18620 #define GDT_ENTRY_TLS_MIN 6
18621 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
18622
18623@@ -79,6 +84,8 @@
18624
18625 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
18626
18627+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
18628+
18629 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
18630
18631 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
18632@@ -104,6 +111,12 @@
18633 #define __KERNEL_STACK_CANARY 0
18634 #endif
18635
18636+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
18637+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
18638+
18639+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
18640+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
18641+
18642 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
18643
18644 /*
18645@@ -141,7 +154,7 @@
18646 */
18647
18648 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
18649-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
18650+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
18651
18652
18653 #else
18654@@ -165,6 +178,8 @@
18655 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
18656 #define __USER32_DS __USER_DS
18657
18658+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
18659+
18660 #define GDT_ENTRY_TSS 8 /* needs two entries */
18661 #define GDT_ENTRY_LDT 10 /* needs two entries */
18662 #define GDT_ENTRY_TLS_MIN 12
18663@@ -173,6 +188,8 @@
18664 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
18665 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
18666
18667+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
18668+
18669 /* TLS indexes for 64bit - hardcoded in arch_prctl */
18670 #define FS_TLS 0
18671 #define GS_TLS 1
18672@@ -180,12 +197,14 @@
18673 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
18674 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
18675
18676-#define GDT_ENTRIES 16
18677+#define GDT_ENTRIES 17
18678
18679 #endif
18680
18681 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
18682+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
18683 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
18684+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
18685 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
18686 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
18687 #ifndef CONFIG_PARAVIRT
18688@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
18689 {
18690 unsigned long __limit;
18691 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
18692- return __limit + 1;
18693+ return __limit;
18694 }
18695
18696 #endif /* !__ASSEMBLY__ */
18697diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
18698index 8d3120f..352b440 100644
18699--- a/arch/x86/include/asm/smap.h
18700+++ b/arch/x86/include/asm/smap.h
18701@@ -25,11 +25,40 @@
18702
18703 #include <asm/alternative-asm.h>
18704
18705+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18706+#define ASM_PAX_OPEN_USERLAND \
18707+ 661: jmp 663f; \
18708+ .pushsection .altinstr_replacement, "a" ; \
18709+ 662: pushq %rax; nop; \
18710+ .popsection ; \
18711+ .pushsection .altinstructions, "a" ; \
18712+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
18713+ .popsection ; \
18714+ call __pax_open_userland; \
18715+ popq %rax; \
18716+ 663:
18717+
18718+#define ASM_PAX_CLOSE_USERLAND \
18719+ 661: jmp 663f; \
18720+ .pushsection .altinstr_replacement, "a" ; \
18721+ 662: pushq %rax; nop; \
18722+ .popsection; \
18723+ .pushsection .altinstructions, "a" ; \
18724+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
18725+ .popsection; \
18726+ call __pax_close_userland; \
18727+ popq %rax; \
18728+ 663:
18729+#else
18730+#define ASM_PAX_OPEN_USERLAND
18731+#define ASM_PAX_CLOSE_USERLAND
18732+#endif
18733+
18734 #ifdef CONFIG_X86_SMAP
18735
18736 #define ASM_CLAC \
18737 661: ASM_NOP3 ; \
18738- .pushsection .altinstr_replacement, "ax" ; \
18739+ .pushsection .altinstr_replacement, "a" ; \
18740 662: __ASM_CLAC ; \
18741 .popsection ; \
18742 .pushsection .altinstructions, "a" ; \
18743@@ -38,7 +67,7 @@
18744
18745 #define ASM_STAC \
18746 661: ASM_NOP3 ; \
18747- .pushsection .altinstr_replacement, "ax" ; \
18748+ .pushsection .altinstr_replacement, "a" ; \
18749 662: __ASM_STAC ; \
18750 .popsection ; \
18751 .pushsection .altinstructions, "a" ; \
18752@@ -56,6 +85,37 @@
18753
18754 #include <asm/alternative.h>
18755
18756+#define __HAVE_ARCH_PAX_OPEN_USERLAND
18757+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
18758+
18759+extern void __pax_open_userland(void);
18760+static __always_inline unsigned long pax_open_userland(void)
18761+{
18762+
18763+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18764+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
18765+ :
18766+ : [open] "i" (__pax_open_userland)
18767+ : "memory", "rax");
18768+#endif
18769+
18770+ return 0;
18771+}
18772+
18773+extern void __pax_close_userland(void);
18774+static __always_inline unsigned long pax_close_userland(void)
18775+{
18776+
18777+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18778+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
18779+ :
18780+ : [close] "i" (__pax_close_userland)
18781+ : "memory", "rax");
18782+#endif
18783+
18784+ return 0;
18785+}
18786+
18787 #ifdef CONFIG_X86_SMAP
18788
18789 static __always_inline void clac(void)
18790diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
18791index 4137890..03fa172 100644
18792--- a/arch/x86/include/asm/smp.h
18793+++ b/arch/x86/include/asm/smp.h
18794@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
18795 /* cpus sharing the last level cache: */
18796 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
18797 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
18798-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
18799+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
18800
18801 static inline struct cpumask *cpu_sibling_mask(int cpu)
18802 {
18803@@ -79,7 +79,7 @@ struct smp_ops {
18804
18805 void (*send_call_func_ipi)(const struct cpumask *mask);
18806 void (*send_call_func_single_ipi)(int cpu);
18807-};
18808+} __no_const;
18809
18810 /* Globals due to paravirt */
18811 extern void set_cpu_sibling_map(int cpu);
18812@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
18813 extern int safe_smp_processor_id(void);
18814
18815 #elif defined(CONFIG_X86_64_SMP)
18816-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
18817-
18818-#define stack_smp_processor_id() \
18819-({ \
18820- struct thread_info *ti; \
18821- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
18822- ti->cpu; \
18823-})
18824+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
18825+#define stack_smp_processor_id() raw_smp_processor_id()
18826 #define safe_smp_processor_id() smp_processor_id()
18827
18828 #endif
18829diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
18830index bf156de..1a782ab 100644
18831--- a/arch/x86/include/asm/spinlock.h
18832+++ b/arch/x86/include/asm/spinlock.h
18833@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
18834 static inline void arch_read_lock(arch_rwlock_t *rw)
18835 {
18836 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
18837+
18838+#ifdef CONFIG_PAX_REFCOUNT
18839+ "jno 0f\n"
18840+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
18841+ "int $4\n0:\n"
18842+ _ASM_EXTABLE(0b, 0b)
18843+#endif
18844+
18845 "jns 1f\n"
18846 "call __read_lock_failed\n\t"
18847 "1:\n"
18848@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
18849 static inline void arch_write_lock(arch_rwlock_t *rw)
18850 {
18851 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
18852+
18853+#ifdef CONFIG_PAX_REFCOUNT
18854+ "jno 0f\n"
18855+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
18856+ "int $4\n0:\n"
18857+ _ASM_EXTABLE(0b, 0b)
18858+#endif
18859+
18860 "jz 1f\n"
18861 "call __write_lock_failed\n\t"
18862 "1:\n"
18863@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
18864
18865 static inline void arch_read_unlock(arch_rwlock_t *rw)
18866 {
18867- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
18868+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
18869+
18870+#ifdef CONFIG_PAX_REFCOUNT
18871+ "jno 0f\n"
18872+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
18873+ "int $4\n0:\n"
18874+ _ASM_EXTABLE(0b, 0b)
18875+#endif
18876+
18877 :"+m" (rw->lock) : : "memory");
18878 }
18879
18880 static inline void arch_write_unlock(arch_rwlock_t *rw)
18881 {
18882- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
18883+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
18884+
18885+#ifdef CONFIG_PAX_REFCOUNT
18886+ "jno 0f\n"
18887+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
18888+ "int $4\n0:\n"
18889+ _ASM_EXTABLE(0b, 0b)
18890+#endif
18891+
18892 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
18893 }
18894
18895diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
18896index 6a99859..03cb807 100644
18897--- a/arch/x86/include/asm/stackprotector.h
18898+++ b/arch/x86/include/asm/stackprotector.h
18899@@ -47,7 +47,7 @@
18900 * head_32 for boot CPU and setup_per_cpu_areas() for others.
18901 */
18902 #define GDT_STACK_CANARY_INIT \
18903- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
18904+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
18905
18906 /*
18907 * Initialize the stackprotector canary value.
18908@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
18909
18910 static inline void load_stack_canary_segment(void)
18911 {
18912-#ifdef CONFIG_X86_32
18913+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18914 asm volatile ("mov %0, %%gs" : : "r" (0));
18915 #endif
18916 }
18917diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
18918index 70bbe39..4ae2bd4 100644
18919--- a/arch/x86/include/asm/stacktrace.h
18920+++ b/arch/x86/include/asm/stacktrace.h
18921@@ -11,28 +11,20 @@
18922
18923 extern int kstack_depth_to_print;
18924
18925-struct thread_info;
18926+struct task_struct;
18927 struct stacktrace_ops;
18928
18929-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
18930- unsigned long *stack,
18931- unsigned long bp,
18932- const struct stacktrace_ops *ops,
18933- void *data,
18934- unsigned long *end,
18935- int *graph);
18936+typedef unsigned long walk_stack_t(struct task_struct *task,
18937+ void *stack_start,
18938+ unsigned long *stack,
18939+ unsigned long bp,
18940+ const struct stacktrace_ops *ops,
18941+ void *data,
18942+ unsigned long *end,
18943+ int *graph);
18944
18945-extern unsigned long
18946-print_context_stack(struct thread_info *tinfo,
18947- unsigned long *stack, unsigned long bp,
18948- const struct stacktrace_ops *ops, void *data,
18949- unsigned long *end, int *graph);
18950-
18951-extern unsigned long
18952-print_context_stack_bp(struct thread_info *tinfo,
18953- unsigned long *stack, unsigned long bp,
18954- const struct stacktrace_ops *ops, void *data,
18955- unsigned long *end, int *graph);
18956+extern walk_stack_t print_context_stack;
18957+extern walk_stack_t print_context_stack_bp;
18958
18959 /* Generic stack tracer with callbacks */
18960
18961@@ -40,7 +32,7 @@ struct stacktrace_ops {
18962 void (*address)(void *data, unsigned long address, int reliable);
18963 /* On negative return stop dumping */
18964 int (*stack)(void *data, char *name);
18965- walk_stack_t walk_stack;
18966+ walk_stack_t *walk_stack;
18967 };
18968
18969 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
18970diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
18971index d7f3b3b..3cc39f1 100644
18972--- a/arch/x86/include/asm/switch_to.h
18973+++ b/arch/x86/include/asm/switch_to.h
18974@@ -108,7 +108,7 @@ do { \
18975 "call __switch_to\n\t" \
18976 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
18977 __switch_canary \
18978- "movq %P[thread_info](%%rsi),%%r8\n\t" \
18979+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
18980 "movq %%rax,%%rdi\n\t" \
18981 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
18982 "jnz ret_from_fork\n\t" \
18983@@ -119,7 +119,7 @@ do { \
18984 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
18985 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
18986 [_tif_fork] "i" (_TIF_FORK), \
18987- [thread_info] "i" (offsetof(struct task_struct, stack)), \
18988+ [thread_info] "m" (current_tinfo), \
18989 [current_task] "m" (current_task) \
18990 __switch_canary_iparam \
18991 : "memory", "cc" __EXTRA_CLOBBER)
18992diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
18993index 3ba3de4..6c113b2 100644
18994--- a/arch/x86/include/asm/thread_info.h
18995+++ b/arch/x86/include/asm/thread_info.h
18996@@ -10,6 +10,7 @@
18997 #include <linux/compiler.h>
18998 #include <asm/page.h>
18999 #include <asm/types.h>
19000+#include <asm/percpu.h>
19001
19002 /*
19003 * low level task data that entry.S needs immediate access to
19004@@ -23,7 +24,6 @@ struct exec_domain;
19005 #include <linux/atomic.h>
19006
19007 struct thread_info {
19008- struct task_struct *task; /* main task structure */
19009 struct exec_domain *exec_domain; /* execution domain */
19010 __u32 flags; /* low level flags */
19011 __u32 status; /* thread synchronous flags */
19012@@ -32,19 +32,13 @@ struct thread_info {
19013 mm_segment_t addr_limit;
19014 struct restart_block restart_block;
19015 void __user *sysenter_return;
19016-#ifdef CONFIG_X86_32
19017- unsigned long previous_esp; /* ESP of the previous stack in
19018- case of nested (IRQ) stacks
19019- */
19020- __u8 supervisor_stack[0];
19021-#endif
19022+ unsigned long lowest_stack;
19023 unsigned int sig_on_uaccess_error:1;
19024 unsigned int uaccess_err:1; /* uaccess failed */
19025 };
19026
19027-#define INIT_THREAD_INFO(tsk) \
19028+#define INIT_THREAD_INFO \
19029 { \
19030- .task = &tsk, \
19031 .exec_domain = &default_exec_domain, \
19032 .flags = 0, \
19033 .cpu = 0, \
19034@@ -55,7 +49,7 @@ struct thread_info {
19035 }, \
19036 }
19037
19038-#define init_thread_info (init_thread_union.thread_info)
19039+#define init_thread_info (init_thread_union.stack)
19040 #define init_stack (init_thread_union.stack)
19041
19042 #else /* !__ASSEMBLY__ */
19043@@ -95,6 +89,7 @@ struct thread_info {
19044 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19045 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19046 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19047+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19048
19049 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19050 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19051@@ -118,17 +113,18 @@ struct thread_info {
19052 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19053 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19054 #define _TIF_X32 (1 << TIF_X32)
19055+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19056
19057 /* work to do in syscall_trace_enter() */
19058 #define _TIF_WORK_SYSCALL_ENTRY \
19059 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19060 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19061- _TIF_NOHZ)
19062+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19063
19064 /* work to do in syscall_trace_leave() */
19065 #define _TIF_WORK_SYSCALL_EXIT \
19066 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19067- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19068+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19069
19070 /* work to do on interrupt/exception return */
19071 #define _TIF_WORK_MASK \
19072@@ -139,7 +135,7 @@ struct thread_info {
19073 /* work to do on any return to user space */
19074 #define _TIF_ALLWORK_MASK \
19075 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19076- _TIF_NOHZ)
19077+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19078
19079 /* Only used for 64 bit */
19080 #define _TIF_DO_NOTIFY_MASK \
19081@@ -153,45 +149,40 @@ struct thread_info {
19082 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
19083 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19084
19085-#ifdef CONFIG_X86_32
19086-
19087-#define STACK_WARN (THREAD_SIZE/8)
19088-/*
19089- * macros/functions for gaining access to the thread information structure
19090- *
19091- * preempt_count needs to be 1 initially, until the scheduler is functional.
19092- */
19093-#ifndef __ASSEMBLY__
19094-
19095-
19096-/* how to get the current stack pointer from C */
19097-register unsigned long current_stack_pointer asm("esp") __used;
19098-
19099-/* how to get the thread information struct from C */
19100-static inline struct thread_info *current_thread_info(void)
19101-{
19102- return (struct thread_info *)
19103- (current_stack_pointer & ~(THREAD_SIZE - 1));
19104-}
19105-
19106-#else /* !__ASSEMBLY__ */
19107-
19108+#ifdef __ASSEMBLY__
19109 /* how to get the thread information struct from ASM */
19110 #define GET_THREAD_INFO(reg) \
19111- movl $-THREAD_SIZE, reg; \
19112- andl %esp, reg
19113+ mov PER_CPU_VAR(current_tinfo), reg
19114
19115 /* use this one if reg already contains %esp */
19116-#define GET_THREAD_INFO_WITH_ESP(reg) \
19117- andl $-THREAD_SIZE, reg
19118+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
19119+#else
19120+/* how to get the thread information struct from C */
19121+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19122+
19123+static __always_inline struct thread_info *current_thread_info(void)
19124+{
19125+ return this_cpu_read_stable(current_tinfo);
19126+}
19127+#endif
19128+
19129+#ifdef CONFIG_X86_32
19130+
19131+#define STACK_WARN (THREAD_SIZE/8)
19132+/*
19133+ * macros/functions for gaining access to the thread information structure
19134+ *
19135+ * preempt_count needs to be 1 initially, until the scheduler is functional.
19136+ */
19137+#ifndef __ASSEMBLY__
19138+
19139+/* how to get the current stack pointer from C */
19140+register unsigned long current_stack_pointer asm("esp") __used;
19141
19142 #endif
19143
19144 #else /* X86_32 */
19145
19146-#include <asm/percpu.h>
19147-#define KERNEL_STACK_OFFSET (5*8)
19148-
19149 /*
19150 * macros/functions for gaining access to the thread information structure
19151 * preempt_count needs to be 1 initially, until the scheduler is functional.
19152@@ -199,27 +190,8 @@ static inline struct thread_info *current_thread_info(void)
19153 #ifndef __ASSEMBLY__
19154 DECLARE_PER_CPU(unsigned long, kernel_stack);
19155
19156-static inline struct thread_info *current_thread_info(void)
19157-{
19158- struct thread_info *ti;
19159- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19160- KERNEL_STACK_OFFSET - THREAD_SIZE);
19161- return ti;
19162-}
19163-
19164-#else /* !__ASSEMBLY__ */
19165-
19166-/* how to get the thread information struct from ASM */
19167-#define GET_THREAD_INFO(reg) \
19168- movq PER_CPU_VAR(kernel_stack),reg ; \
19169- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
19170-
19171-/*
19172- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19173- * a certain register (to be used in assembler memory operands).
19174- */
19175-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19176-
19177+/* how to get the current stack pointer from C */
19178+register unsigned long current_stack_pointer asm("rsp") __used;
19179 #endif
19180
19181 #endif /* !X86_32 */
19182@@ -278,5 +250,12 @@ static inline bool is_ia32_task(void)
19183 extern void arch_task_cache_init(void);
19184 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19185 extern void arch_release_task_struct(struct task_struct *tsk);
19186+
19187+#define __HAVE_THREAD_FUNCTIONS
19188+#define task_thread_info(task) (&(task)->tinfo)
19189+#define task_stack_page(task) ((task)->stack)
19190+#define setup_thread_stack(p, org) do {} while (0)
19191+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19192+
19193 #endif
19194 #endif /* _ASM_X86_THREAD_INFO_H */
19195diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19196index e6d90ba..f81f114 100644
19197--- a/arch/x86/include/asm/tlbflush.h
19198+++ b/arch/x86/include/asm/tlbflush.h
19199@@ -17,18 +17,44 @@
19200
19201 static inline void __native_flush_tlb(void)
19202 {
19203+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19204+ u64 descriptor[2];
19205+
19206+ descriptor[0] = PCID_KERNEL;
19207+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
19208+ return;
19209+ }
19210+
19211+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19212+ if (static_cpu_has(X86_FEATURE_PCID)) {
19213+ unsigned int cpu = raw_get_cpu();
19214+
19215+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19216+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19217+ raw_put_cpu_no_resched();
19218+ return;
19219+ }
19220+#endif
19221+
19222 native_write_cr3(native_read_cr3());
19223 }
19224
19225 static inline void __native_flush_tlb_global_irq_disabled(void)
19226 {
19227- unsigned long cr4;
19228+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19229+ u64 descriptor[2];
19230
19231- cr4 = native_read_cr4();
19232- /* clear PGE */
19233- native_write_cr4(cr4 & ~X86_CR4_PGE);
19234- /* write old PGE again and flush TLBs */
19235- native_write_cr4(cr4);
19236+ descriptor[0] = PCID_KERNEL;
19237+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19238+ } else {
19239+ unsigned long cr4;
19240+
19241+ cr4 = native_read_cr4();
19242+ /* clear PGE */
19243+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19244+ /* write old PGE again and flush TLBs */
19245+ native_write_cr4(cr4);
19246+ }
19247 }
19248
19249 static inline void __native_flush_tlb_global(void)
19250@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19251
19252 static inline void __native_flush_tlb_single(unsigned long addr)
19253 {
19254+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19255+ u64 descriptor[2];
19256+
19257+ descriptor[0] = PCID_KERNEL;
19258+ descriptor[1] = addr;
19259+
19260+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19261+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19262+ if (addr < TASK_SIZE_MAX)
19263+ descriptor[1] += pax_user_shadow_base;
19264+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19265+ }
19266+
19267+ descriptor[0] = PCID_USER;
19268+ descriptor[1] = addr;
19269+#endif
19270+
19271+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19272+ return;
19273+ }
19274+
19275+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19276+ if (static_cpu_has(X86_FEATURE_PCID)) {
19277+ unsigned int cpu = raw_get_cpu();
19278+
19279+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19280+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19281+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19282+ raw_put_cpu_no_resched();
19283+
19284+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19285+ addr += pax_user_shadow_base;
19286+ }
19287+#endif
19288+
19289 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19290 }
19291
19292diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19293index 8ec57c0..451bcfc 100644
19294--- a/arch/x86/include/asm/uaccess.h
19295+++ b/arch/x86/include/asm/uaccess.h
19296@@ -7,6 +7,7 @@
19297 #include <linux/compiler.h>
19298 #include <linux/thread_info.h>
19299 #include <linux/string.h>
19300+#include <linux/spinlock.h>
19301 #include <asm/asm.h>
19302 #include <asm/page.h>
19303 #include <asm/smap.h>
19304@@ -29,7 +30,12 @@
19305
19306 #define get_ds() (KERNEL_DS)
19307 #define get_fs() (current_thread_info()->addr_limit)
19308+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19309+void __set_fs(mm_segment_t x);
19310+void set_fs(mm_segment_t x);
19311+#else
19312 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19313+#endif
19314
19315 #define segment_eq(a, b) ((a).seg == (b).seg)
19316
19317@@ -77,8 +83,34 @@
19318 * checks that the pointer is in the user space range - after calling
19319 * this function, memory access functions may still return -EFAULT.
19320 */
19321-#define access_ok(type, addr, size) \
19322- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
19323+extern int _cond_resched(void);
19324+#define access_ok_noprefault(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
19325+#define access_ok(type, addr, size) \
19326+({ \
19327+ long __size = size; \
19328+ unsigned long __addr = (unsigned long)addr; \
19329+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19330+ unsigned long __end_ao = __addr + __size - 1; \
19331+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19332+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19333+ while(__addr_ao <= __end_ao) { \
19334+ char __c_ao; \
19335+ __addr_ao += PAGE_SIZE; \
19336+ if (__size > PAGE_SIZE) \
19337+ _cond_resched(); \
19338+ if (__get_user(__c_ao, (char __user *)__addr)) \
19339+ break; \
19340+ if (type != VERIFY_WRITE) { \
19341+ __addr = __addr_ao; \
19342+ continue; \
19343+ } \
19344+ if (__put_user(__c_ao, (char __user *)__addr)) \
19345+ break; \
19346+ __addr = __addr_ao; \
19347+ } \
19348+ } \
19349+ __ret_ao; \
19350+})
19351
19352 /*
19353 * The exception table consists of pairs of addresses relative to the
19354@@ -168,10 +200,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19355 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19356 __chk_user_ptr(ptr); \
19357 might_fault(); \
19358+ pax_open_userland(); \
19359 asm volatile("call __get_user_%P3" \
19360 : "=a" (__ret_gu), "=r" (__val_gu) \
19361 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19362 (x) = (__typeof__(*(ptr))) __val_gu; \
19363+ pax_close_userland(); \
19364 __ret_gu; \
19365 })
19366
19367@@ -179,13 +213,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19368 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19369 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19370
19371-
19372+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19373+#define __copyuser_seg "gs;"
19374+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19375+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19376+#else
19377+#define __copyuser_seg
19378+#define __COPYUSER_SET_ES
19379+#define __COPYUSER_RESTORE_ES
19380+#endif
19381
19382 #ifdef CONFIG_X86_32
19383 #define __put_user_asm_u64(x, addr, err, errret) \
19384 asm volatile(ASM_STAC "\n" \
19385- "1: movl %%eax,0(%2)\n" \
19386- "2: movl %%edx,4(%2)\n" \
19387+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19388+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19389 "3: " ASM_CLAC "\n" \
19390 ".section .fixup,\"ax\"\n" \
19391 "4: movl %3,%0\n" \
19392@@ -198,8 +240,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19393
19394 #define __put_user_asm_ex_u64(x, addr) \
19395 asm volatile(ASM_STAC "\n" \
19396- "1: movl %%eax,0(%1)\n" \
19397- "2: movl %%edx,4(%1)\n" \
19398+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19399+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19400 "3: " ASM_CLAC "\n" \
19401 _ASM_EXTABLE_EX(1b, 2b) \
19402 _ASM_EXTABLE_EX(2b, 3b) \
19403@@ -249,7 +291,8 @@ extern void __put_user_8(void);
19404 __typeof__(*(ptr)) __pu_val; \
19405 __chk_user_ptr(ptr); \
19406 might_fault(); \
19407- __pu_val = x; \
19408+ __pu_val = (x); \
19409+ pax_open_userland(); \
19410 switch (sizeof(*(ptr))) { \
19411 case 1: \
19412 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19413@@ -267,6 +310,7 @@ extern void __put_user_8(void);
19414 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19415 break; \
19416 } \
19417+ pax_close_userland(); \
19418 __ret_pu; \
19419 })
19420
19421@@ -347,8 +391,10 @@ do { \
19422 } while (0)
19423
19424 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19425+do { \
19426+ pax_open_userland(); \
19427 asm volatile(ASM_STAC "\n" \
19428- "1: mov"itype" %2,%"rtype"1\n" \
19429+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19430 "2: " ASM_CLAC "\n" \
19431 ".section .fixup,\"ax\"\n" \
19432 "3: mov %3,%0\n" \
19433@@ -356,8 +402,10 @@ do { \
19434 " jmp 2b\n" \
19435 ".previous\n" \
19436 _ASM_EXTABLE(1b, 3b) \
19437- : "=r" (err), ltype(x) \
19438- : "m" (__m(addr)), "i" (errret), "0" (err))
19439+ : "=r" (err), ltype (x) \
19440+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19441+ pax_close_userland(); \
19442+} while (0)
19443
19444 #define __get_user_size_ex(x, ptr, size) \
19445 do { \
19446@@ -381,7 +429,7 @@ do { \
19447 } while (0)
19448
19449 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19450- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19451+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19452 "2:\n" \
19453 _ASM_EXTABLE_EX(1b, 2b) \
19454 : ltype(x) : "m" (__m(addr)))
19455@@ -398,13 +446,24 @@ do { \
19456 int __gu_err; \
19457 unsigned long __gu_val; \
19458 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19459- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19460+ (x) = (__typeof__(*(ptr)))__gu_val; \
19461 __gu_err; \
19462 })
19463
19464 /* FIXME: this hack is definitely wrong -AK */
19465 struct __large_struct { unsigned long buf[100]; };
19466-#define __m(x) (*(struct __large_struct __user *)(x))
19467+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19468+#define ____m(x) \
19469+({ \
19470+ unsigned long ____x = (unsigned long)(x); \
19471+ if (____x < pax_user_shadow_base) \
19472+ ____x += pax_user_shadow_base; \
19473+ (typeof(x))____x; \
19474+})
19475+#else
19476+#define ____m(x) (x)
19477+#endif
19478+#define __m(x) (*(struct __large_struct __user *)____m(x))
19479
19480 /*
19481 * Tell gcc we read from memory instead of writing: this is because
19482@@ -412,8 +471,10 @@ struct __large_struct { unsigned long buf[100]; };
19483 * aliasing issues.
19484 */
19485 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19486+do { \
19487+ pax_open_userland(); \
19488 asm volatile(ASM_STAC "\n" \
19489- "1: mov"itype" %"rtype"1,%2\n" \
19490+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19491 "2: " ASM_CLAC "\n" \
19492 ".section .fixup,\"ax\"\n" \
19493 "3: mov %3,%0\n" \
19494@@ -421,10 +482,12 @@ struct __large_struct { unsigned long buf[100]; };
19495 ".previous\n" \
19496 _ASM_EXTABLE(1b, 3b) \
19497 : "=r"(err) \
19498- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19499+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19500+ pax_close_userland(); \
19501+} while (0)
19502
19503 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19504- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19505+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19506 "2:\n" \
19507 _ASM_EXTABLE_EX(1b, 2b) \
19508 : : ltype(x), "m" (__m(addr)))
19509@@ -434,11 +497,13 @@ struct __large_struct { unsigned long buf[100]; };
19510 */
19511 #define uaccess_try do { \
19512 current_thread_info()->uaccess_err = 0; \
19513+ pax_open_userland(); \
19514 stac(); \
19515 barrier();
19516
19517 #define uaccess_catch(err) \
19518 clac(); \
19519+ pax_close_userland(); \
19520 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19521 } while (0)
19522
19523@@ -463,8 +528,12 @@ struct __large_struct { unsigned long buf[100]; };
19524 * On error, the variable @x is set to zero.
19525 */
19526
19527+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19528+#define __get_user(x, ptr) get_user((x), (ptr))
19529+#else
19530 #define __get_user(x, ptr) \
19531 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19532+#endif
19533
19534 /**
19535 * __put_user: - Write a simple value into user space, with less checking.
19536@@ -486,8 +555,12 @@ struct __large_struct { unsigned long buf[100]; };
19537 * Returns zero on success, or -EFAULT on error.
19538 */
19539
19540+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19541+#define __put_user(x, ptr) put_user((x), (ptr))
19542+#else
19543 #define __put_user(x, ptr) \
19544 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19545+#endif
19546
19547 #define __get_user_unaligned __get_user
19548 #define __put_user_unaligned __put_user
19549@@ -505,7 +578,7 @@ struct __large_struct { unsigned long buf[100]; };
19550 #define get_user_ex(x, ptr) do { \
19551 unsigned long __gue_val; \
19552 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19553- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19554+ (x) = (__typeof__(*(ptr)))__gue_val; \
19555 } while (0)
19556
19557 #define put_user_try uaccess_try
19558@@ -536,17 +609,6 @@ extern struct movsl_mask {
19559
19560 #define ARCH_HAS_NOCACHE_UACCESS 1
19561
19562-#ifdef CONFIG_X86_32
19563-# include <asm/uaccess_32.h>
19564-#else
19565-# include <asm/uaccess_64.h>
19566-#endif
19567-
19568-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19569- unsigned n);
19570-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19571- unsigned n);
19572-
19573 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19574 # define copy_user_diag __compiletime_error
19575 #else
19576@@ -556,7 +618,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19577 extern void copy_user_diag("copy_from_user() buffer size is too small")
19578 copy_from_user_overflow(void);
19579 extern void copy_user_diag("copy_to_user() buffer size is too small")
19580-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19581+copy_to_user_overflow(void);
19582
19583 #undef copy_user_diag
19584
19585@@ -569,7 +631,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19586
19587 extern void
19588 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19589-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19590+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19591 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19592
19593 #else
19594@@ -584,10 +646,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19595
19596 #endif
19597
19598+#ifdef CONFIG_X86_32
19599+# include <asm/uaccess_32.h>
19600+#else
19601+# include <asm/uaccess_64.h>
19602+#endif
19603+
19604 static inline unsigned long __must_check
19605 copy_from_user(void *to, const void __user *from, unsigned long n)
19606 {
19607- int sz = __compiletime_object_size(to);
19608+ size_t sz = __compiletime_object_size(to);
19609
19610 might_fault();
19611
19612@@ -609,12 +677,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19613 * case, and do only runtime checking for non-constant sizes.
19614 */
19615
19616- if (likely(sz < 0 || sz >= n))
19617- n = _copy_from_user(to, from, n);
19618- else if(__builtin_constant_p(n))
19619- copy_from_user_overflow();
19620- else
19621- __copy_from_user_overflow(sz, n);
19622+ if (likely(sz != (size_t)-1 && sz < n)) {
19623+ if(__builtin_constant_p(n))
19624+ copy_from_user_overflow();
19625+ else
19626+ __copy_from_user_overflow(sz, n);
19627+ } if (access_ok(VERIFY_READ, from, n))
19628+ n = __copy_from_user(to, from, n);
19629+ else if ((long)n > 0)
19630+ memset(to, 0, n);
19631
19632 return n;
19633 }
19634@@ -622,17 +693,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19635 static inline unsigned long __must_check
19636 copy_to_user(void __user *to, const void *from, unsigned long n)
19637 {
19638- int sz = __compiletime_object_size(from);
19639+ size_t sz = __compiletime_object_size(from);
19640
19641 might_fault();
19642
19643 /* See the comment in copy_from_user() above. */
19644- if (likely(sz < 0 || sz >= n))
19645- n = _copy_to_user(to, from, n);
19646- else if(__builtin_constant_p(n))
19647- copy_to_user_overflow();
19648- else
19649- __copy_to_user_overflow(sz, n);
19650+ if (likely(sz != (size_t)-1 && sz < n)) {
19651+ if(__builtin_constant_p(n))
19652+ copy_to_user_overflow();
19653+ else
19654+ __copy_to_user_overflow(sz, n);
19655+ } else if (access_ok(VERIFY_WRITE, to, n))
19656+ n = __copy_to_user(to, from, n);
19657
19658 return n;
19659 }
19660diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
19661index 3c03a5d..1071638 100644
19662--- a/arch/x86/include/asm/uaccess_32.h
19663+++ b/arch/x86/include/asm/uaccess_32.h
19664@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
19665 static __always_inline unsigned long __must_check
19666 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
19667 {
19668+ if ((long)n < 0)
19669+ return n;
19670+
19671+ check_object_size(from, n, true);
19672+
19673 if (__builtin_constant_p(n)) {
19674 unsigned long ret;
19675
19676@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
19677 __copy_to_user(void __user *to, const void *from, unsigned long n)
19678 {
19679 might_fault();
19680+
19681 return __copy_to_user_inatomic(to, from, n);
19682 }
19683
19684 static __always_inline unsigned long
19685 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
19686 {
19687+ if ((long)n < 0)
19688+ return n;
19689+
19690 /* Avoid zeroing the tail if the copy fails..
19691 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
19692 * but as the zeroing behaviour is only significant when n is not
19693@@ -137,6 +146,12 @@ static __always_inline unsigned long
19694 __copy_from_user(void *to, const void __user *from, unsigned long n)
19695 {
19696 might_fault();
19697+
19698+ if ((long)n < 0)
19699+ return n;
19700+
19701+ check_object_size(to, n, false);
19702+
19703 if (__builtin_constant_p(n)) {
19704 unsigned long ret;
19705
19706@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
19707 const void __user *from, unsigned long n)
19708 {
19709 might_fault();
19710+
19711+ if ((long)n < 0)
19712+ return n;
19713+
19714 if (__builtin_constant_p(n)) {
19715 unsigned long ret;
19716
19717@@ -181,7 +200,10 @@ static __always_inline unsigned long
19718 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
19719 unsigned long n)
19720 {
19721- return __copy_from_user_ll_nocache_nozero(to, from, n);
19722+ if ((long)n < 0)
19723+ return n;
19724+
19725+ return __copy_from_user_ll_nocache_nozero(to, from, n);
19726 }
19727
19728 #endif /* _ASM_X86_UACCESS_32_H */
19729diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
19730index 190413d..206c200 100644
19731--- a/arch/x86/include/asm/uaccess_64.h
19732+++ b/arch/x86/include/asm/uaccess_64.h
19733@@ -10,6 +10,9 @@
19734 #include <asm/alternative.h>
19735 #include <asm/cpufeature.h>
19736 #include <asm/page.h>
19737+#include <asm/pgtable.h>
19738+
19739+#define set_fs(x) (current_thread_info()->addr_limit = (x))
19740
19741 /*
19742 * Copy To/From Userspace
19743@@ -17,14 +20,14 @@
19744
19745 /* Handles exceptions in both to and from, but doesn't do access_ok */
19746 __must_check unsigned long
19747-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
19748+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
19749 __must_check unsigned long
19750-copy_user_generic_string(void *to, const void *from, unsigned len);
19751+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
19752 __must_check unsigned long
19753-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
19754+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
19755
19756 static __always_inline __must_check unsigned long
19757-copy_user_generic(void *to, const void *from, unsigned len)
19758+copy_user_generic(void *to, const void *from, unsigned long len)
19759 {
19760 unsigned ret;
19761
19762@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
19763 }
19764
19765 __must_check unsigned long
19766-copy_in_user(void __user *to, const void __user *from, unsigned len);
19767+copy_in_user(void __user *to, const void __user *from, unsigned long len);
19768
19769 static __always_inline __must_check
19770-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
19771+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
19772 {
19773- int ret = 0;
19774+ size_t sz = __compiletime_object_size(dst);
19775+ unsigned ret = 0;
19776+
19777+ if (size > INT_MAX)
19778+ return size;
19779+
19780+ check_object_size(dst, size, false);
19781+
19782+#ifdef CONFIG_PAX_MEMORY_UDEREF
19783+ if (!access_ok_noprefault(VERIFY_READ, src, size))
19784+ return size;
19785+#endif
19786+
19787+ if (unlikely(sz != (size_t)-1 && sz < size)) {
19788+ if(__builtin_constant_p(size))
19789+ copy_from_user_overflow();
19790+ else
19791+ __copy_from_user_overflow(sz, size);
19792+ return size;
19793+ }
19794
19795 if (!__builtin_constant_p(size))
19796- return copy_user_generic(dst, (__force void *)src, size);
19797+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19798 switch (size) {
19799- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
19800+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
19801 ret, "b", "b", "=q", 1);
19802 return ret;
19803- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
19804+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
19805 ret, "w", "w", "=r", 2);
19806 return ret;
19807- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
19808+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
19809 ret, "l", "k", "=r", 4);
19810 return ret;
19811- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
19812+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19813 ret, "q", "", "=r", 8);
19814 return ret;
19815 case 10:
19816- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
19817+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19818 ret, "q", "", "=r", 10);
19819 if (unlikely(ret))
19820 return ret;
19821 __get_user_asm(*(u16 *)(8 + (char *)dst),
19822- (u16 __user *)(8 + (char __user *)src),
19823+ (const u16 __user *)(8 + (const char __user *)src),
19824 ret, "w", "w", "=r", 2);
19825 return ret;
19826 case 16:
19827- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
19828+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19829 ret, "q", "", "=r", 16);
19830 if (unlikely(ret))
19831 return ret;
19832 __get_user_asm(*(u64 *)(8 + (char *)dst),
19833- (u64 __user *)(8 + (char __user *)src),
19834+ (const u64 __user *)(8 + (const char __user *)src),
19835 ret, "q", "", "=r", 8);
19836 return ret;
19837 default:
19838- return copy_user_generic(dst, (__force void *)src, size);
19839+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19840 }
19841 }
19842
19843 static __always_inline __must_check
19844-int __copy_from_user(void *dst, const void __user *src, unsigned size)
19845+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
19846 {
19847 might_fault();
19848 return __copy_from_user_nocheck(dst, src, size);
19849 }
19850
19851 static __always_inline __must_check
19852-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
19853+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
19854 {
19855- int ret = 0;
19856+ size_t sz = __compiletime_object_size(src);
19857+ unsigned ret = 0;
19858+
19859+ if (size > INT_MAX)
19860+ return size;
19861+
19862+ check_object_size(src, size, true);
19863+
19864+#ifdef CONFIG_PAX_MEMORY_UDEREF
19865+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
19866+ return size;
19867+#endif
19868+
19869+ if (unlikely(sz != (size_t)-1 && sz < size)) {
19870+ if(__builtin_constant_p(size))
19871+ copy_to_user_overflow();
19872+ else
19873+ __copy_to_user_overflow(sz, size);
19874+ return size;
19875+ }
19876
19877 if (!__builtin_constant_p(size))
19878- return copy_user_generic((__force void *)dst, src, size);
19879+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19880 switch (size) {
19881- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
19882+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
19883 ret, "b", "b", "iq", 1);
19884 return ret;
19885- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
19886+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
19887 ret, "w", "w", "ir", 2);
19888 return ret;
19889- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
19890+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
19891 ret, "l", "k", "ir", 4);
19892 return ret;
19893- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
19894+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19895 ret, "q", "", "er", 8);
19896 return ret;
19897 case 10:
19898- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
19899+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19900 ret, "q", "", "er", 10);
19901 if (unlikely(ret))
19902 return ret;
19903 asm("":::"memory");
19904- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
19905+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
19906 ret, "w", "w", "ir", 2);
19907 return ret;
19908 case 16:
19909- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
19910+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19911 ret, "q", "", "er", 16);
19912 if (unlikely(ret))
19913 return ret;
19914 asm("":::"memory");
19915- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
19916+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
19917 ret, "q", "", "er", 8);
19918 return ret;
19919 default:
19920- return copy_user_generic((__force void *)dst, src, size);
19921+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19922 }
19923 }
19924
19925 static __always_inline __must_check
19926-int __copy_to_user(void __user *dst, const void *src, unsigned size)
19927+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
19928 {
19929 might_fault();
19930 return __copy_to_user_nocheck(dst, src, size);
19931 }
19932
19933 static __always_inline __must_check
19934-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19935+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19936 {
19937- int ret = 0;
19938+ unsigned ret = 0;
19939
19940 might_fault();
19941+
19942+ if (size > INT_MAX)
19943+ return size;
19944+
19945+#ifdef CONFIG_PAX_MEMORY_UDEREF
19946+ if (!access_ok_noprefault(VERIFY_READ, src, size))
19947+ return size;
19948+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
19949+ return size;
19950+#endif
19951+
19952 if (!__builtin_constant_p(size))
19953- return copy_user_generic((__force void *)dst,
19954- (__force void *)src, size);
19955+ return copy_user_generic((__force_kernel void *)____m(dst),
19956+ (__force_kernel const void *)____m(src), size);
19957 switch (size) {
19958 case 1: {
19959 u8 tmp;
19960- __get_user_asm(tmp, (u8 __user *)src,
19961+ __get_user_asm(tmp, (const u8 __user *)src,
19962 ret, "b", "b", "=q", 1);
19963 if (likely(!ret))
19964 __put_user_asm(tmp, (u8 __user *)dst,
19965@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19966 }
19967 case 2: {
19968 u16 tmp;
19969- __get_user_asm(tmp, (u16 __user *)src,
19970+ __get_user_asm(tmp, (const u16 __user *)src,
19971 ret, "w", "w", "=r", 2);
19972 if (likely(!ret))
19973 __put_user_asm(tmp, (u16 __user *)dst,
19974@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19975
19976 case 4: {
19977 u32 tmp;
19978- __get_user_asm(tmp, (u32 __user *)src,
19979+ __get_user_asm(tmp, (const u32 __user *)src,
19980 ret, "l", "k", "=r", 4);
19981 if (likely(!ret))
19982 __put_user_asm(tmp, (u32 __user *)dst,
19983@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19984 }
19985 case 8: {
19986 u64 tmp;
19987- __get_user_asm(tmp, (u64 __user *)src,
19988+ __get_user_asm(tmp, (const u64 __user *)src,
19989 ret, "q", "", "=r", 8);
19990 if (likely(!ret))
19991 __put_user_asm(tmp, (u64 __user *)dst,
19992@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19993 return ret;
19994 }
19995 default:
19996- return copy_user_generic((__force void *)dst,
19997- (__force void *)src, size);
19998+ return copy_user_generic((__force_kernel void *)____m(dst),
19999+ (__force_kernel const void *)____m(src), size);
20000 }
20001 }
20002
20003-static __must_check __always_inline int
20004-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20005+static __must_check __always_inline unsigned long
20006+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20007 {
20008- return __copy_from_user_nocheck(dst, (__force const void *)src, size);
20009+ return __copy_from_user_nocheck(dst, src, size);
20010 }
20011
20012-static __must_check __always_inline int
20013-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20014+static __must_check __always_inline unsigned long
20015+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20016 {
20017- return __copy_to_user_nocheck((__force void *)dst, src, size);
20018+ return __copy_to_user_nocheck(dst, src, size);
20019 }
20020
20021-extern long __copy_user_nocache(void *dst, const void __user *src,
20022- unsigned size, int zerorest);
20023+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20024+ unsigned long size, int zerorest);
20025
20026-static inline int
20027-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20028+static inline unsigned long
20029+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20030 {
20031 might_fault();
20032+
20033+ if (size > INT_MAX)
20034+ return size;
20035+
20036+#ifdef CONFIG_PAX_MEMORY_UDEREF
20037+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20038+ return size;
20039+#endif
20040+
20041 return __copy_user_nocache(dst, src, size, 1);
20042 }
20043
20044-static inline int
20045+static inline unsigned long
20046 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20047- unsigned size)
20048+ unsigned long size)
20049 {
20050+ if (size > INT_MAX)
20051+ return size;
20052+
20053+#ifdef CONFIG_PAX_MEMORY_UDEREF
20054+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20055+ return size;
20056+#endif
20057+
20058 return __copy_user_nocache(dst, src, size, 0);
20059 }
20060
20061 unsigned long
20062-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20063+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20064
20065 #endif /* _ASM_X86_UACCESS_64_H */
20066diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20067index 5b238981..77fdd78 100644
20068--- a/arch/x86/include/asm/word-at-a-time.h
20069+++ b/arch/x86/include/asm/word-at-a-time.h
20070@@ -11,7 +11,7 @@
20071 * and shift, for example.
20072 */
20073 struct word_at_a_time {
20074- const unsigned long one_bits, high_bits;
20075+ unsigned long one_bits, high_bits;
20076 };
20077
20078 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20079diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20080index 0f1be11..f7542bf 100644
20081--- a/arch/x86/include/asm/x86_init.h
20082+++ b/arch/x86/include/asm/x86_init.h
20083@@ -129,7 +129,7 @@ struct x86_init_ops {
20084 struct x86_init_timers timers;
20085 struct x86_init_iommu iommu;
20086 struct x86_init_pci pci;
20087-};
20088+} __no_const;
20089
20090 /**
20091 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20092@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20093 void (*setup_percpu_clockev)(void);
20094 void (*early_percpu_clock_init)(void);
20095 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20096-};
20097+} __no_const;
20098
20099 struct timespec;
20100
20101@@ -168,7 +168,7 @@ struct x86_platform_ops {
20102 void (*save_sched_clock_state)(void);
20103 void (*restore_sched_clock_state)(void);
20104 void (*apic_post_init)(void);
20105-};
20106+} __no_const;
20107
20108 struct pci_dev;
20109 struct msi_msg;
20110@@ -185,7 +185,7 @@ struct x86_msi_ops {
20111 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20112 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
20113 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
20114-};
20115+} __no_const;
20116
20117 struct IO_APIC_route_entry;
20118 struct io_apic_irq_attr;
20119@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
20120 unsigned int destination, int vector,
20121 struct io_apic_irq_attr *attr);
20122 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20123-};
20124+} __no_const;
20125
20126 extern struct x86_init_ops x86_init;
20127 extern struct x86_cpuinit_ops x86_cpuinit;
20128diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20129index b913915..4f5a581 100644
20130--- a/arch/x86/include/asm/xen/page.h
20131+++ b/arch/x86/include/asm/xen/page.h
20132@@ -56,7 +56,7 @@ extern int m2p_remove_override(struct page *page,
20133 extern struct page *m2p_find_override(unsigned long mfn);
20134 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
20135
20136-static inline unsigned long pfn_to_mfn(unsigned long pfn)
20137+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
20138 {
20139 unsigned long mfn;
20140
20141diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20142index 0415cda..3b22adc 100644
20143--- a/arch/x86/include/asm/xsave.h
20144+++ b/arch/x86/include/asm/xsave.h
20145@@ -70,8 +70,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20146 if (unlikely(err))
20147 return -EFAULT;
20148
20149+ pax_open_userland();
20150 __asm__ __volatile__(ASM_STAC "\n"
20151- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
20152+ "1:"
20153+ __copyuser_seg
20154+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
20155 "2: " ASM_CLAC "\n"
20156 ".section .fixup,\"ax\"\n"
20157 "3: movl $-1,%[err]\n"
20158@@ -81,18 +84,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20159 : [err] "=r" (err)
20160 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20161 : "memory");
20162+ pax_close_userland();
20163 return err;
20164 }
20165
20166 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20167 {
20168 int err;
20169- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20170+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20171 u32 lmask = mask;
20172 u32 hmask = mask >> 32;
20173
20174+ pax_open_userland();
20175 __asm__ __volatile__(ASM_STAC "\n"
20176- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20177+ "1:"
20178+ __copyuser_seg
20179+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
20180 "2: " ASM_CLAC "\n"
20181 ".section .fixup,\"ax\"\n"
20182 "3: movl $-1,%[err]\n"
20183@@ -102,6 +109,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20184 : [err] "=r" (err)
20185 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20186 : "memory"); /* memory required? */
20187+ pax_close_userland();
20188 return err;
20189 }
20190
20191diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20192index bbae024..e1528f9 100644
20193--- a/arch/x86/include/uapi/asm/e820.h
20194+++ b/arch/x86/include/uapi/asm/e820.h
20195@@ -63,7 +63,7 @@ struct e820map {
20196 #define ISA_START_ADDRESS 0xa0000
20197 #define ISA_END_ADDRESS 0x100000
20198
20199-#define BIOS_BEGIN 0x000a0000
20200+#define BIOS_BEGIN 0x000c0000
20201 #define BIOS_END 0x00100000
20202
20203 #define BIOS_ROM_BASE 0xffe00000
20204diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20205index 7b0a55a..ad115bf 100644
20206--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20207+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20208@@ -49,7 +49,6 @@
20209 #define EFLAGS 144
20210 #define RSP 152
20211 #define SS 160
20212-#define ARGOFFSET R11
20213 #endif /* __ASSEMBLY__ */
20214
20215 /* top of stack page */
20216diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20217index 9b0a34e..fc7e553 100644
20218--- a/arch/x86/kernel/Makefile
20219+++ b/arch/x86/kernel/Makefile
20220@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20221 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20222 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20223 obj-y += probe_roms.o
20224-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20225+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20226 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20227 obj-y += syscall_$(BITS).o
20228 obj-$(CONFIG_X86_64) += vsyscall_64.o
20229diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20230index 6c0b43b..e67bb31 100644
20231--- a/arch/x86/kernel/acpi/boot.c
20232+++ b/arch/x86/kernel/acpi/boot.c
20233@@ -1315,7 +1315,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20234 * If your system is blacklisted here, but you find that acpi=force
20235 * works for you, please contact linux-acpi@vger.kernel.org
20236 */
20237-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20238+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20239 /*
20240 * Boxes that need ACPI disabled
20241 */
20242@@ -1390,7 +1390,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20243 };
20244
20245 /* second table for DMI checks that should run after early-quirks */
20246-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20247+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20248 /*
20249 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20250 * which includes some code which overrides all temperature
20251diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20252index 3a2ae4c..9db31d6 100644
20253--- a/arch/x86/kernel/acpi/sleep.c
20254+++ b/arch/x86/kernel/acpi/sleep.c
20255@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20256 #else /* CONFIG_64BIT */
20257 #ifdef CONFIG_SMP
20258 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20259+
20260+ pax_open_kernel();
20261 early_gdt_descr.address =
20262 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20263+ pax_close_kernel();
20264+
20265 initial_gs = per_cpu_offset(smp_processor_id());
20266 #endif
20267 initial_code = (unsigned long)wakeup_long64;
20268diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20269index 665c6b7..eae4d56 100644
20270--- a/arch/x86/kernel/acpi/wakeup_32.S
20271+++ b/arch/x86/kernel/acpi/wakeup_32.S
20272@@ -29,13 +29,11 @@ wakeup_pmode_return:
20273 # and restore the stack ... but you need gdt for this to work
20274 movl saved_context_esp, %esp
20275
20276- movl %cs:saved_magic, %eax
20277- cmpl $0x12345678, %eax
20278+ cmpl $0x12345678, saved_magic
20279 jne bogus_magic
20280
20281 # jump to place where we left off
20282- movl saved_eip, %eax
20283- jmp *%eax
20284+ jmp *(saved_eip)
20285
20286 bogus_magic:
20287 jmp bogus_magic
20288diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20289index df94598..f3b29bf 100644
20290--- a/arch/x86/kernel/alternative.c
20291+++ b/arch/x86/kernel/alternative.c
20292@@ -269,6 +269,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20293 */
20294 for (a = start; a < end; a++) {
20295 instr = (u8 *)&a->instr_offset + a->instr_offset;
20296+
20297+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20298+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20299+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20300+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20301+#endif
20302+
20303 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20304 BUG_ON(a->replacementlen > a->instrlen);
20305 BUG_ON(a->instrlen > sizeof(insnbuf));
20306@@ -300,10 +307,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20307 for (poff = start; poff < end; poff++) {
20308 u8 *ptr = (u8 *)poff + *poff;
20309
20310+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20311+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20312+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20313+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20314+#endif
20315+
20316 if (!*poff || ptr < text || ptr >= text_end)
20317 continue;
20318 /* turn DS segment override prefix into lock prefix */
20319- if (*ptr == 0x3e)
20320+ if (*ktla_ktva(ptr) == 0x3e)
20321 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20322 }
20323 mutex_unlock(&text_mutex);
20324@@ -318,10 +331,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20325 for (poff = start; poff < end; poff++) {
20326 u8 *ptr = (u8 *)poff + *poff;
20327
20328+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20329+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20330+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20331+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20332+#endif
20333+
20334 if (!*poff || ptr < text || ptr >= text_end)
20335 continue;
20336 /* turn lock prefix into DS segment override prefix */
20337- if (*ptr == 0xf0)
20338+ if (*ktla_ktva(ptr) == 0xf0)
20339 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20340 }
20341 mutex_unlock(&text_mutex);
20342@@ -458,7 +477,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20343
20344 BUG_ON(p->len > MAX_PATCH_LEN);
20345 /* prep the buffer with the original instructions */
20346- memcpy(insnbuf, p->instr, p->len);
20347+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20348 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20349 (unsigned long)p->instr, p->len);
20350
20351@@ -505,7 +524,7 @@ void __init alternative_instructions(void)
20352 if (!uniproc_patched || num_possible_cpus() == 1)
20353 free_init_pages("SMP alternatives",
20354 (unsigned long)__smp_locks,
20355- (unsigned long)__smp_locks_end);
20356+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20357 #endif
20358
20359 apply_paravirt(__parainstructions, __parainstructions_end);
20360@@ -525,13 +544,17 @@ void __init alternative_instructions(void)
20361 * instructions. And on the local CPU you need to be protected again NMI or MCE
20362 * handlers seeing an inconsistent instruction while you patch.
20363 */
20364-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20365+void *__kprobes text_poke_early(void *addr, const void *opcode,
20366 size_t len)
20367 {
20368 unsigned long flags;
20369 local_irq_save(flags);
20370- memcpy(addr, opcode, len);
20371+
20372+ pax_open_kernel();
20373+ memcpy(ktla_ktva(addr), opcode, len);
20374 sync_core();
20375+ pax_close_kernel();
20376+
20377 local_irq_restore(flags);
20378 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20379 that causes hangs on some VIA CPUs. */
20380@@ -553,36 +576,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20381 */
20382 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
20383 {
20384- unsigned long flags;
20385- char *vaddr;
20386+ unsigned char *vaddr = ktla_ktva(addr);
20387 struct page *pages[2];
20388- int i;
20389+ size_t i;
20390
20391 if (!core_kernel_text((unsigned long)addr)) {
20392- pages[0] = vmalloc_to_page(addr);
20393- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20394+ pages[0] = vmalloc_to_page(vaddr);
20395+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20396 } else {
20397- pages[0] = virt_to_page(addr);
20398+ pages[0] = virt_to_page(vaddr);
20399 WARN_ON(!PageReserved(pages[0]));
20400- pages[1] = virt_to_page(addr + PAGE_SIZE);
20401+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20402 }
20403 BUG_ON(!pages[0]);
20404- local_irq_save(flags);
20405- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20406- if (pages[1])
20407- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20408- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20409- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20410- clear_fixmap(FIX_TEXT_POKE0);
20411- if (pages[1])
20412- clear_fixmap(FIX_TEXT_POKE1);
20413- local_flush_tlb();
20414- sync_core();
20415- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20416- that causes hangs on some VIA CPUs. */
20417+ text_poke_early(addr, opcode, len);
20418 for (i = 0; i < len; i++)
20419- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20420- local_irq_restore(flags);
20421+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20422 return addr;
20423 }
20424
20425@@ -602,7 +611,7 @@ int poke_int3_handler(struct pt_regs *regs)
20426 if (likely(!bp_patching_in_progress))
20427 return 0;
20428
20429- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20430+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20431 return 0;
20432
20433 /* set up the specified breakpoint handler */
20434@@ -636,7 +645,7 @@ int poke_int3_handler(struct pt_regs *regs)
20435 */
20436 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20437 {
20438- unsigned char int3 = 0xcc;
20439+ const unsigned char int3 = 0xcc;
20440
20441 bp_int3_handler = handler;
20442 bp_int3_addr = (u8 *)addr + sizeof(int3);
20443diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20444index d278736..0b4af9a8 100644
20445--- a/arch/x86/kernel/apic/apic.c
20446+++ b/arch/x86/kernel/apic/apic.c
20447@@ -191,7 +191,7 @@ int first_system_vector = 0xfe;
20448 /*
20449 * Debug level, exported for io_apic.c
20450 */
20451-unsigned int apic_verbosity;
20452+int apic_verbosity;
20453
20454 int pic_mode;
20455
20456@@ -1986,7 +1986,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20457 apic_write(APIC_ESR, 0);
20458 v1 = apic_read(APIC_ESR);
20459 ack_APIC_irq();
20460- atomic_inc(&irq_err_count);
20461+ atomic_inc_unchecked(&irq_err_count);
20462
20463 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
20464 smp_processor_id(), v0 , v1);
20465diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20466index 00c77cf..2dc6a2d 100644
20467--- a/arch/x86/kernel/apic/apic_flat_64.c
20468+++ b/arch/x86/kernel/apic/apic_flat_64.c
20469@@ -157,7 +157,7 @@ static int flat_probe(void)
20470 return 1;
20471 }
20472
20473-static struct apic apic_flat = {
20474+static struct apic apic_flat __read_only = {
20475 .name = "flat",
20476 .probe = flat_probe,
20477 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20478@@ -271,7 +271,7 @@ static int physflat_probe(void)
20479 return 0;
20480 }
20481
20482-static struct apic apic_physflat = {
20483+static struct apic apic_physflat __read_only = {
20484
20485 .name = "physical flat",
20486 .probe = physflat_probe,
20487diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20488index e145f28..2752888 100644
20489--- a/arch/x86/kernel/apic/apic_noop.c
20490+++ b/arch/x86/kernel/apic/apic_noop.c
20491@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
20492 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20493 }
20494
20495-struct apic apic_noop = {
20496+struct apic apic_noop __read_only = {
20497 .name = "noop",
20498 .probe = noop_probe,
20499 .acpi_madt_oem_check = NULL,
20500diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20501index d50e364..543bee3 100644
20502--- a/arch/x86/kernel/apic/bigsmp_32.c
20503+++ b/arch/x86/kernel/apic/bigsmp_32.c
20504@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
20505 return dmi_bigsmp;
20506 }
20507
20508-static struct apic apic_bigsmp = {
20509+static struct apic apic_bigsmp __read_only = {
20510
20511 .name = "bigsmp",
20512 .probe = probe_bigsmp,
20513diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
20514index c552247..587a316 100644
20515--- a/arch/x86/kernel/apic/es7000_32.c
20516+++ b/arch/x86/kernel/apic/es7000_32.c
20517@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
20518 return ret && es7000_apic_is_cluster();
20519 }
20520
20521-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
20522-static struct apic __refdata apic_es7000_cluster = {
20523+static struct apic apic_es7000_cluster __read_only = {
20524
20525 .name = "es7000",
20526 .probe = probe_es7000,
20527@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
20528 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
20529 };
20530
20531-static struct apic __refdata apic_es7000 = {
20532+static struct apic apic_es7000 __read_only = {
20533
20534 .name = "es7000",
20535 .probe = probe_es7000,
20536diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20537index e63a5bd..c0babf8 100644
20538--- a/arch/x86/kernel/apic/io_apic.c
20539+++ b/arch/x86/kernel/apic/io_apic.c
20540@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
20541 }
20542 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
20543
20544-void lock_vector_lock(void)
20545+void lock_vector_lock(void) __acquires(vector_lock)
20546 {
20547 /* Used to the online set of cpus does not change
20548 * during assign_irq_vector.
20549@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
20550 raw_spin_lock(&vector_lock);
20551 }
20552
20553-void unlock_vector_lock(void)
20554+void unlock_vector_lock(void) __releases(vector_lock)
20555 {
20556 raw_spin_unlock(&vector_lock);
20557 }
20558@@ -2367,7 +2367,7 @@ static void ack_apic_edge(struct irq_data *data)
20559 ack_APIC_irq();
20560 }
20561
20562-atomic_t irq_mis_count;
20563+atomic_unchecked_t irq_mis_count;
20564
20565 #ifdef CONFIG_GENERIC_PENDING_IRQ
20566 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20567@@ -2508,7 +2508,7 @@ static void ack_apic_level(struct irq_data *data)
20568 * at the cpu.
20569 */
20570 if (!(v & (1 << (i & 0x1f)))) {
20571- atomic_inc(&irq_mis_count);
20572+ atomic_inc_unchecked(&irq_mis_count);
20573
20574 eoi_ioapic_irq(irq, cfg);
20575 }
20576diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
20577index 1e42e8f..daacf44 100644
20578--- a/arch/x86/kernel/apic/numaq_32.c
20579+++ b/arch/x86/kernel/apic/numaq_32.c
20580@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
20581 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
20582 }
20583
20584-/* Use __refdata to keep false positive warning calm. */
20585-static struct apic __refdata apic_numaq = {
20586+static struct apic apic_numaq __read_only = {
20587
20588 .name = "NUMAQ",
20589 .probe = probe_numaq,
20590diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20591index eb35ef9..f184a21 100644
20592--- a/arch/x86/kernel/apic/probe_32.c
20593+++ b/arch/x86/kernel/apic/probe_32.c
20594@@ -72,7 +72,7 @@ static int probe_default(void)
20595 return 1;
20596 }
20597
20598-static struct apic apic_default = {
20599+static struct apic apic_default __read_only = {
20600
20601 .name = "default",
20602 .probe = probe_default,
20603diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
20604index 77c95c0..434f8a4 100644
20605--- a/arch/x86/kernel/apic/summit_32.c
20606+++ b/arch/x86/kernel/apic/summit_32.c
20607@@ -486,7 +486,7 @@ void setup_summit(void)
20608 }
20609 #endif
20610
20611-static struct apic apic_summit = {
20612+static struct apic apic_summit __read_only = {
20613
20614 .name = "summit",
20615 .probe = probe_summit,
20616diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20617index 140e29d..d88bc95 100644
20618--- a/arch/x86/kernel/apic/x2apic_cluster.c
20619+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20620@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20621 return notifier_from_errno(err);
20622 }
20623
20624-static struct notifier_block __refdata x2apic_cpu_notifier = {
20625+static struct notifier_block x2apic_cpu_notifier = {
20626 .notifier_call = update_clusterinfo,
20627 };
20628
20629@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20630 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20631 }
20632
20633-static struct apic apic_x2apic_cluster = {
20634+static struct apic apic_x2apic_cluster __read_only = {
20635
20636 .name = "cluster x2apic",
20637 .probe = x2apic_cluster_probe,
20638diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20639index 562a76d..a003c0f 100644
20640--- a/arch/x86/kernel/apic/x2apic_phys.c
20641+++ b/arch/x86/kernel/apic/x2apic_phys.c
20642@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
20643 return apic == &apic_x2apic_phys;
20644 }
20645
20646-static struct apic apic_x2apic_phys = {
20647+static struct apic apic_x2apic_phys __read_only = {
20648
20649 .name = "physical x2apic",
20650 .probe = x2apic_phys_probe,
20651diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20652index ad0dc04..0d9cc56 100644
20653--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20654+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20655@@ -350,7 +350,7 @@ static int uv_probe(void)
20656 return apic == &apic_x2apic_uv_x;
20657 }
20658
20659-static struct apic __refdata apic_x2apic_uv_x = {
20660+static struct apic apic_x2apic_uv_x __read_only = {
20661
20662 .name = "UV large system",
20663 .probe = uv_probe,
20664diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20665index 3ab0343..814c4787 100644
20666--- a/arch/x86/kernel/apm_32.c
20667+++ b/arch/x86/kernel/apm_32.c
20668@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
20669 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20670 * even though they are called in protected mode.
20671 */
20672-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20673+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20674 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20675
20676 static const char driver_version[] = "1.16ac"; /* no spaces */
20677@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
20678 BUG_ON(cpu != 0);
20679 gdt = get_cpu_gdt_table(cpu);
20680 save_desc_40 = gdt[0x40 / 8];
20681+
20682+ pax_open_kernel();
20683 gdt[0x40 / 8] = bad_bios_desc;
20684+ pax_close_kernel();
20685
20686 apm_irq_save(flags);
20687 APM_DO_SAVE_SEGS;
20688@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
20689 &call->esi);
20690 APM_DO_RESTORE_SEGS;
20691 apm_irq_restore(flags);
20692+
20693+ pax_open_kernel();
20694 gdt[0x40 / 8] = save_desc_40;
20695+ pax_close_kernel();
20696+
20697 put_cpu();
20698
20699 return call->eax & 0xff;
20700@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
20701 BUG_ON(cpu != 0);
20702 gdt = get_cpu_gdt_table(cpu);
20703 save_desc_40 = gdt[0x40 / 8];
20704+
20705+ pax_open_kernel();
20706 gdt[0x40 / 8] = bad_bios_desc;
20707+ pax_close_kernel();
20708
20709 apm_irq_save(flags);
20710 APM_DO_SAVE_SEGS;
20711@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
20712 &call->eax);
20713 APM_DO_RESTORE_SEGS;
20714 apm_irq_restore(flags);
20715+
20716+ pax_open_kernel();
20717 gdt[0x40 / 8] = save_desc_40;
20718+ pax_close_kernel();
20719+
20720 put_cpu();
20721 return error;
20722 }
20723@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
20724 * code to that CPU.
20725 */
20726 gdt = get_cpu_gdt_table(0);
20727+
20728+ pax_open_kernel();
20729 set_desc_base(&gdt[APM_CS >> 3],
20730 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
20731 set_desc_base(&gdt[APM_CS_16 >> 3],
20732 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
20733 set_desc_base(&gdt[APM_DS >> 3],
20734 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
20735+ pax_close_kernel();
20736
20737 proc_create("apm", 0, NULL, &apm_file_ops);
20738
20739diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
20740index 9f6b934..cf5ffb3 100644
20741--- a/arch/x86/kernel/asm-offsets.c
20742+++ b/arch/x86/kernel/asm-offsets.c
20743@@ -32,6 +32,8 @@ void common(void) {
20744 OFFSET(TI_flags, thread_info, flags);
20745 OFFSET(TI_status, thread_info, status);
20746 OFFSET(TI_addr_limit, thread_info, addr_limit);
20747+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
20748+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
20749
20750 BLANK();
20751 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
20752@@ -52,8 +54,26 @@ void common(void) {
20753 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
20754 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
20755 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
20756+
20757+#ifdef CONFIG_PAX_KERNEXEC
20758+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
20759 #endif
20760
20761+#ifdef CONFIG_PAX_MEMORY_UDEREF
20762+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
20763+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
20764+#ifdef CONFIG_X86_64
20765+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
20766+#endif
20767+#endif
20768+
20769+#endif
20770+
20771+ BLANK();
20772+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
20773+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
20774+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
20775+
20776 #ifdef CONFIG_XEN
20777 BLANK();
20778 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
20779diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
20780index e7c798b..2b2019b 100644
20781--- a/arch/x86/kernel/asm-offsets_64.c
20782+++ b/arch/x86/kernel/asm-offsets_64.c
20783@@ -77,6 +77,7 @@ int main(void)
20784 BLANK();
20785 #undef ENTRY
20786
20787+ DEFINE(TSS_size, sizeof(struct tss_struct));
20788 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
20789 BLANK();
20790
20791diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
20792index 47b56a7..efc2bc6 100644
20793--- a/arch/x86/kernel/cpu/Makefile
20794+++ b/arch/x86/kernel/cpu/Makefile
20795@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
20796 CFLAGS_REMOVE_perf_event.o = -pg
20797 endif
20798
20799-# Make sure load_percpu_segment has no stackprotector
20800-nostackp := $(call cc-option, -fno-stack-protector)
20801-CFLAGS_common.o := $(nostackp)
20802-
20803 obj-y := intel_cacheinfo.o scattered.o topology.o
20804 obj-y += proc.o capflags.o powerflags.o common.o
20805 obj-y += rdrand.o
20806diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
20807index 59bfebc..d8f27bd 100644
20808--- a/arch/x86/kernel/cpu/amd.c
20809+++ b/arch/x86/kernel/cpu/amd.c
20810@@ -753,7 +753,7 @@ static void init_amd(struct cpuinfo_x86 *c)
20811 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
20812 {
20813 /* AMD errata T13 (order #21922) */
20814- if ((c->x86 == 6)) {
20815+ if (c->x86 == 6) {
20816 /* Duron Rev A0 */
20817 if (c->x86_model == 3 && c->x86_mask == 0)
20818 size = 64;
20819diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
20820index fe2bdd0..77b0d1b 100644
20821--- a/arch/x86/kernel/cpu/common.c
20822+++ b/arch/x86/kernel/cpu/common.c
20823@@ -88,60 +88,6 @@ static const struct cpu_dev default_cpu = {
20824
20825 static const struct cpu_dev *this_cpu = &default_cpu;
20826
20827-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
20828-#ifdef CONFIG_X86_64
20829- /*
20830- * We need valid kernel segments for data and code in long mode too
20831- * IRET will check the segment types kkeil 2000/10/28
20832- * Also sysret mandates a special GDT layout
20833- *
20834- * TLS descriptors are currently at a different place compared to i386.
20835- * Hopefully nobody expects them at a fixed place (Wine?)
20836- */
20837- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
20838- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
20839- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
20840- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
20841- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
20842- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
20843-#else
20844- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
20845- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20846- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
20847- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
20848- /*
20849- * Segments used for calling PnP BIOS have byte granularity.
20850- * They code segments and data segments have fixed 64k limits,
20851- * the transfer segment sizes are set at run time.
20852- */
20853- /* 32-bit code */
20854- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
20855- /* 16-bit code */
20856- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
20857- /* 16-bit data */
20858- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
20859- /* 16-bit data */
20860- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
20861- /* 16-bit data */
20862- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
20863- /*
20864- * The APM segments have byte granularity and their bases
20865- * are set at run time. All have 64k limits.
20866- */
20867- /* 32-bit code */
20868- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
20869- /* 16-bit code */
20870- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
20871- /* data */
20872- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
20873-
20874- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20875- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20876- GDT_STACK_CANARY_INIT
20877-#endif
20878-} };
20879-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
20880-
20881 static int __init x86_xsave_setup(char *s)
20882 {
20883 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
20884@@ -293,6 +239,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
20885 }
20886 }
20887
20888+#ifdef CONFIG_X86_64
20889+static __init int setup_disable_pcid(char *arg)
20890+{
20891+ setup_clear_cpu_cap(X86_FEATURE_PCID);
20892+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
20893+
20894+#ifdef CONFIG_PAX_MEMORY_UDEREF
20895+ if (clone_pgd_mask != ~(pgdval_t)0UL)
20896+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
20897+#endif
20898+
20899+ return 1;
20900+}
20901+__setup("nopcid", setup_disable_pcid);
20902+
20903+static void setup_pcid(struct cpuinfo_x86 *c)
20904+{
20905+ if (!cpu_has(c, X86_FEATURE_PCID)) {
20906+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
20907+
20908+#ifdef CONFIG_PAX_MEMORY_UDEREF
20909+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
20910+ pax_open_kernel();
20911+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
20912+ pax_close_kernel();
20913+ printk("PAX: slow and weak UDEREF enabled\n");
20914+ } else
20915+ printk("PAX: UDEREF disabled\n");
20916+#endif
20917+
20918+ return;
20919+ }
20920+
20921+ printk("PAX: PCID detected\n");
20922+ set_in_cr4(X86_CR4_PCIDE);
20923+
20924+#ifdef CONFIG_PAX_MEMORY_UDEREF
20925+ pax_open_kernel();
20926+ clone_pgd_mask = ~(pgdval_t)0UL;
20927+ pax_close_kernel();
20928+ if (pax_user_shadow_base)
20929+ printk("PAX: weak UDEREF enabled\n");
20930+ else {
20931+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
20932+ printk("PAX: strong UDEREF enabled\n");
20933+ }
20934+#endif
20935+
20936+ if (cpu_has(c, X86_FEATURE_INVPCID))
20937+ printk("PAX: INVPCID detected\n");
20938+}
20939+#endif
20940+
20941 /*
20942 * Some CPU features depend on higher CPUID levels, which may not always
20943 * be available due to CPUID level capping or broken virtualization
20944@@ -393,7 +392,7 @@ void switch_to_new_gdt(int cpu)
20945 {
20946 struct desc_ptr gdt_descr;
20947
20948- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
20949+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20950 gdt_descr.size = GDT_SIZE - 1;
20951 load_gdt(&gdt_descr);
20952 /* Reload the per-cpu base */
20953@@ -882,6 +881,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
20954 setup_smep(c);
20955 setup_smap(c);
20956
20957+#ifdef CONFIG_X86_64
20958+ setup_pcid(c);
20959+#endif
20960+
20961 /*
20962 * The vendor-specific functions might have changed features.
20963 * Now we do "generic changes."
20964@@ -890,6 +893,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
20965 /* Filter out anything that depends on CPUID levels we don't have */
20966 filter_cpuid_features(c, true);
20967
20968+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
20969+ setup_clear_cpu_cap(X86_FEATURE_SEP);
20970+#endif
20971+
20972 /* If the model name is still unset, do table lookup. */
20973 if (!c->x86_model_id[0]) {
20974 const char *p;
20975@@ -1077,10 +1084,12 @@ static __init int setup_disablecpuid(char *arg)
20976 }
20977 __setup("clearcpuid=", setup_disablecpuid);
20978
20979+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
20980+EXPORT_PER_CPU_SYMBOL(current_tinfo);
20981+
20982 #ifdef CONFIG_X86_64
20983-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
20984-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
20985- (unsigned long) debug_idt_table };
20986+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
20987+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
20988
20989 DEFINE_PER_CPU_FIRST(union irq_stack_union,
20990 irq_stack_union) __aligned(PAGE_SIZE) __visible;
20991@@ -1094,7 +1103,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
20992 EXPORT_PER_CPU_SYMBOL(current_task);
20993
20994 DEFINE_PER_CPU(unsigned long, kernel_stack) =
20995- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
20996+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
20997 EXPORT_PER_CPU_SYMBOL(kernel_stack);
20998
20999 DEFINE_PER_CPU(char *, irq_stack_ptr) =
21000@@ -1244,7 +1253,7 @@ void cpu_init(void)
21001 load_ucode_ap();
21002
21003 cpu = stack_smp_processor_id();
21004- t = &per_cpu(init_tss, cpu);
21005+ t = init_tss + cpu;
21006 oist = &per_cpu(orig_ist, cpu);
21007
21008 #ifdef CONFIG_NUMA
21009@@ -1279,7 +1288,6 @@ void cpu_init(void)
21010 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21011 barrier();
21012
21013- x86_configure_nx();
21014 enable_x2apic();
21015
21016 /*
21017@@ -1331,7 +1339,7 @@ void cpu_init(void)
21018 {
21019 int cpu = smp_processor_id();
21020 struct task_struct *curr = current;
21021- struct tss_struct *t = &per_cpu(init_tss, cpu);
21022+ struct tss_struct *t = init_tss + cpu;
21023 struct thread_struct *thread = &curr->thread;
21024
21025 show_ucode_info_early();
21026diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21027index 0641113..06f5ba4 100644
21028--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21029+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21030@@ -1014,6 +1014,22 @@ static struct attribute *default_attrs[] = {
21031 };
21032
21033 #ifdef CONFIG_AMD_NB
21034+static struct attribute *default_attrs_amd_nb[] = {
21035+ &type.attr,
21036+ &level.attr,
21037+ &coherency_line_size.attr,
21038+ &physical_line_partition.attr,
21039+ &ways_of_associativity.attr,
21040+ &number_of_sets.attr,
21041+ &size.attr,
21042+ &shared_cpu_map.attr,
21043+ &shared_cpu_list.attr,
21044+ NULL,
21045+ NULL,
21046+ NULL,
21047+ NULL
21048+};
21049+
21050 static struct attribute **amd_l3_attrs(void)
21051 {
21052 static struct attribute **attrs;
21053@@ -1024,18 +1040,7 @@ static struct attribute **amd_l3_attrs(void)
21054
21055 n = ARRAY_SIZE(default_attrs);
21056
21057- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21058- n += 2;
21059-
21060- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21061- n += 1;
21062-
21063- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21064- if (attrs == NULL)
21065- return attrs = default_attrs;
21066-
21067- for (n = 0; default_attrs[n]; n++)
21068- attrs[n] = default_attrs[n];
21069+ attrs = default_attrs_amd_nb;
21070
21071 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21072 attrs[n++] = &cache_disable_0.attr;
21073@@ -1086,6 +1091,13 @@ static struct kobj_type ktype_cache = {
21074 .default_attrs = default_attrs,
21075 };
21076
21077+#ifdef CONFIG_AMD_NB
21078+static struct kobj_type ktype_cache_amd_nb = {
21079+ .sysfs_ops = &sysfs_ops,
21080+ .default_attrs = default_attrs_amd_nb,
21081+};
21082+#endif
21083+
21084 static struct kobj_type ktype_percpu_entry = {
21085 .sysfs_ops = &sysfs_ops,
21086 };
21087@@ -1151,20 +1163,26 @@ static int cache_add_dev(struct device *dev)
21088 return retval;
21089 }
21090
21091+#ifdef CONFIG_AMD_NB
21092+ amd_l3_attrs();
21093+#endif
21094+
21095 for (i = 0; i < num_cache_leaves; i++) {
21096+ struct kobj_type *ktype;
21097+
21098 this_object = INDEX_KOBJECT_PTR(cpu, i);
21099 this_object->cpu = cpu;
21100 this_object->index = i;
21101
21102 this_leaf = CPUID4_INFO_IDX(cpu, i);
21103
21104- ktype_cache.default_attrs = default_attrs;
21105+ ktype = &ktype_cache;
21106 #ifdef CONFIG_AMD_NB
21107 if (this_leaf->base.nb)
21108- ktype_cache.default_attrs = amd_l3_attrs();
21109+ ktype = &ktype_cache_amd_nb;
21110 #endif
21111 retval = kobject_init_and_add(&(this_object->kobj),
21112- &ktype_cache,
21113+ ktype,
21114 per_cpu(ici_cache_kobject, cpu),
21115 "index%1lu", i);
21116 if (unlikely(retval)) {
21117diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21118index b3218cd..99a75de 100644
21119--- a/arch/x86/kernel/cpu/mcheck/mce.c
21120+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21121@@ -45,6 +45,7 @@
21122 #include <asm/processor.h>
21123 #include <asm/mce.h>
21124 #include <asm/msr.h>
21125+#include <asm/local.h>
21126
21127 #include "mce-internal.h"
21128
21129@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
21130 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21131 m->cs, m->ip);
21132
21133- if (m->cs == __KERNEL_CS)
21134+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21135 print_symbol("{%s}", m->ip);
21136 pr_cont("\n");
21137 }
21138@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
21139
21140 #define PANIC_TIMEOUT 5 /* 5 seconds */
21141
21142-static atomic_t mce_paniced;
21143+static atomic_unchecked_t mce_paniced;
21144
21145 static int fake_panic;
21146-static atomic_t mce_fake_paniced;
21147+static atomic_unchecked_t mce_fake_paniced;
21148
21149 /* Panic in progress. Enable interrupts and wait for final IPI */
21150 static void wait_for_panic(void)
21151@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21152 /*
21153 * Make sure only one CPU runs in machine check panic
21154 */
21155- if (atomic_inc_return(&mce_paniced) > 1)
21156+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
21157 wait_for_panic();
21158 barrier();
21159
21160@@ -326,7 +327,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21161 console_verbose();
21162 } else {
21163 /* Don't log too much for fake panic */
21164- if (atomic_inc_return(&mce_fake_paniced) > 1)
21165+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
21166 return;
21167 }
21168 /* First print corrected ones that are still unlogged */
21169@@ -365,7 +366,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21170 if (!fake_panic) {
21171 if (panic_timeout == 0)
21172 panic_timeout = mca_cfg.panic_timeout;
21173- panic(msg);
21174+ panic("%s", msg);
21175 } else
21176 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21177 }
21178@@ -695,7 +696,7 @@ static int mce_timed_out(u64 *t)
21179 * might have been modified by someone else.
21180 */
21181 rmb();
21182- if (atomic_read(&mce_paniced))
21183+ if (atomic_read_unchecked(&mce_paniced))
21184 wait_for_panic();
21185 if (!mca_cfg.monarch_timeout)
21186 goto out;
21187@@ -1666,7 +1667,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21188 }
21189
21190 /* Call the installed machine check handler for this CPU setup. */
21191-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21192+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21193 unexpected_machine_check;
21194
21195 /*
21196@@ -1689,7 +1690,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21197 return;
21198 }
21199
21200+ pax_open_kernel();
21201 machine_check_vector = do_machine_check;
21202+ pax_close_kernel();
21203
21204 __mcheck_cpu_init_generic();
21205 __mcheck_cpu_init_vendor(c);
21206@@ -1703,7 +1706,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21207 */
21208
21209 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21210-static int mce_chrdev_open_count; /* #times opened */
21211+static local_t mce_chrdev_open_count; /* #times opened */
21212 static int mce_chrdev_open_exclu; /* already open exclusive? */
21213
21214 static int mce_chrdev_open(struct inode *inode, struct file *file)
21215@@ -1711,7 +1714,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21216 spin_lock(&mce_chrdev_state_lock);
21217
21218 if (mce_chrdev_open_exclu ||
21219- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21220+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21221 spin_unlock(&mce_chrdev_state_lock);
21222
21223 return -EBUSY;
21224@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21225
21226 if (file->f_flags & O_EXCL)
21227 mce_chrdev_open_exclu = 1;
21228- mce_chrdev_open_count++;
21229+ local_inc(&mce_chrdev_open_count);
21230
21231 spin_unlock(&mce_chrdev_state_lock);
21232
21233@@ -1730,7 +1733,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21234 {
21235 spin_lock(&mce_chrdev_state_lock);
21236
21237- mce_chrdev_open_count--;
21238+ local_dec(&mce_chrdev_open_count);
21239 mce_chrdev_open_exclu = 0;
21240
21241 spin_unlock(&mce_chrdev_state_lock);
21242@@ -2404,7 +2407,7 @@ static __init void mce_init_banks(void)
21243
21244 for (i = 0; i < mca_cfg.banks; i++) {
21245 struct mce_bank *b = &mce_banks[i];
21246- struct device_attribute *a = &b->attr;
21247+ device_attribute_no_const *a = &b->attr;
21248
21249 sysfs_attr_init(&a->attr);
21250 a->attr.name = b->attrname;
21251@@ -2472,7 +2475,7 @@ struct dentry *mce_get_debugfs_dir(void)
21252 static void mce_reset(void)
21253 {
21254 cpu_missing = 0;
21255- atomic_set(&mce_fake_paniced, 0);
21256+ atomic_set_unchecked(&mce_fake_paniced, 0);
21257 atomic_set(&mce_executing, 0);
21258 atomic_set(&mce_callin, 0);
21259 atomic_set(&global_nwo, 0);
21260diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21261index 1c044b1..37a2a43 100644
21262--- a/arch/x86/kernel/cpu/mcheck/p5.c
21263+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21264@@ -11,6 +11,7 @@
21265 #include <asm/processor.h>
21266 #include <asm/mce.h>
21267 #include <asm/msr.h>
21268+#include <asm/pgtable.h>
21269
21270 /* By default disabled */
21271 int mce_p5_enabled __read_mostly;
21272@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21273 if (!cpu_has(c, X86_FEATURE_MCE))
21274 return;
21275
21276+ pax_open_kernel();
21277 machine_check_vector = pentium_machine_check;
21278+ pax_close_kernel();
21279 /* Make sure the vector pointer is visible before we enable MCEs: */
21280 wmb();
21281
21282diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21283index e9a701a..35317d6 100644
21284--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21285+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21286@@ -10,6 +10,7 @@
21287 #include <asm/processor.h>
21288 #include <asm/mce.h>
21289 #include <asm/msr.h>
21290+#include <asm/pgtable.h>
21291
21292 /* Machine check handler for WinChip C6: */
21293 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21294@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21295 {
21296 u32 lo, hi;
21297
21298+ pax_open_kernel();
21299 machine_check_vector = winchip_machine_check;
21300+ pax_close_kernel();
21301 /* Make sure the vector pointer is visible before we enable MCEs: */
21302 wmb();
21303
21304diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21305index f961de9..8a9d332 100644
21306--- a/arch/x86/kernel/cpu/mtrr/main.c
21307+++ b/arch/x86/kernel/cpu/mtrr/main.c
21308@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21309 u64 size_or_mask, size_and_mask;
21310 static bool mtrr_aps_delayed_init;
21311
21312-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21313+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21314
21315 const struct mtrr_ops *mtrr_if;
21316
21317diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21318index df5e41f..816c719 100644
21319--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21320+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21321@@ -25,7 +25,7 @@ struct mtrr_ops {
21322 int (*validate_add_page)(unsigned long base, unsigned long size,
21323 unsigned int type);
21324 int (*have_wrcomb)(void);
21325-};
21326+} __do_const;
21327
21328 extern int generic_get_free_region(unsigned long base, unsigned long size,
21329 int replace_reg);
21330diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21331index db6cdbe..faaf834 100644
21332--- a/arch/x86/kernel/cpu/perf_event.c
21333+++ b/arch/x86/kernel/cpu/perf_event.c
21334@@ -1351,7 +1351,7 @@ static void __init pmu_check_apic(void)
21335 pr_info("no hardware sampling interrupt available.\n");
21336 }
21337
21338-static struct attribute_group x86_pmu_format_group = {
21339+static attribute_group_no_const x86_pmu_format_group = {
21340 .name = "format",
21341 .attrs = NULL,
21342 };
21343@@ -1450,7 +1450,7 @@ static struct attribute *events_attr[] = {
21344 NULL,
21345 };
21346
21347-static struct attribute_group x86_pmu_events_group = {
21348+static attribute_group_no_const x86_pmu_events_group = {
21349 .name = "events",
21350 .attrs = events_attr,
21351 };
21352@@ -1961,7 +1961,7 @@ static unsigned long get_segment_base(unsigned int segment)
21353 if (idx > GDT_ENTRIES)
21354 return 0;
21355
21356- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
21357+ desc = get_cpu_gdt_table(smp_processor_id());
21358 }
21359
21360 return get_desc_base(desc + idx);
21361@@ -2051,7 +2051,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21362 break;
21363
21364 perf_callchain_store(entry, frame.return_address);
21365- fp = frame.next_frame;
21366+ fp = (const void __force_user *)frame.next_frame;
21367 }
21368 }
21369
21370diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21371index 639d128..e92d7e5 100644
21372--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21373+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21374@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21375 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21376 {
21377 struct attribute **attrs;
21378- struct attribute_group *attr_group;
21379+ attribute_group_no_const *attr_group;
21380 int i = 0, j;
21381
21382 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21383diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21384index 0fa4f24..17990ed 100644
21385--- a/arch/x86/kernel/cpu/perf_event_intel.c
21386+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21387@@ -2314,10 +2314,10 @@ __init int intel_pmu_init(void)
21388 * v2 and above have a perf capabilities MSR
21389 */
21390 if (version > 1) {
21391- u64 capabilities;
21392+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21393
21394- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21395- x86_pmu.intel_cap.capabilities = capabilities;
21396+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21397+ x86_pmu.intel_cap.capabilities = capabilities;
21398 }
21399
21400 intel_ds_init();
21401diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21402index 29c2487..a5606fa 100644
21403--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21404+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21405@@ -3318,7 +3318,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21406 static int __init uncore_type_init(struct intel_uncore_type *type)
21407 {
21408 struct intel_uncore_pmu *pmus;
21409- struct attribute_group *attr_group;
21410+ attribute_group_no_const *attr_group;
21411 struct attribute **attrs;
21412 int i, j;
21413
21414diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21415index a80ab71..4089da5 100644
21416--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21417+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21418@@ -498,7 +498,7 @@ struct intel_uncore_box {
21419 struct uncore_event_desc {
21420 struct kobj_attribute attr;
21421 const char *config;
21422-};
21423+} __do_const;
21424
21425 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
21426 { \
21427diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21428index 7d9481c..99c7e4b 100644
21429--- a/arch/x86/kernel/cpuid.c
21430+++ b/arch/x86/kernel/cpuid.c
21431@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21432 return notifier_from_errno(err);
21433 }
21434
21435-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21436+static struct notifier_block cpuid_class_cpu_notifier =
21437 {
21438 .notifier_call = cpuid_class_cpu_callback,
21439 };
21440diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21441index 18677a9..f67c45b 100644
21442--- a/arch/x86/kernel/crash.c
21443+++ b/arch/x86/kernel/crash.c
21444@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21445 {
21446 #ifdef CONFIG_X86_32
21447 struct pt_regs fixed_regs;
21448-#endif
21449
21450-#ifdef CONFIG_X86_32
21451- if (!user_mode_vm(regs)) {
21452+ if (!user_mode(regs)) {
21453 crash_fixup_ss_esp(&fixed_regs, regs);
21454 regs = &fixed_regs;
21455 }
21456diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21457index afa64ad..dce67dd 100644
21458--- a/arch/x86/kernel/crash_dump_64.c
21459+++ b/arch/x86/kernel/crash_dump_64.c
21460@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21461 return -ENOMEM;
21462
21463 if (userbuf) {
21464- if (copy_to_user(buf, vaddr + offset, csize)) {
21465+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21466 iounmap(vaddr);
21467 return -EFAULT;
21468 }
21469diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21470index 5d3fe8d..02e1429 100644
21471--- a/arch/x86/kernel/doublefault.c
21472+++ b/arch/x86/kernel/doublefault.c
21473@@ -13,7 +13,7 @@
21474
21475 #define DOUBLEFAULT_STACKSIZE (1024)
21476 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21477-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21478+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21479
21480 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21481
21482@@ -23,7 +23,7 @@ static void doublefault_fn(void)
21483 unsigned long gdt, tss;
21484
21485 native_store_gdt(&gdt_desc);
21486- gdt = gdt_desc.address;
21487+ gdt = (unsigned long)gdt_desc.address;
21488
21489 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21490
21491@@ -60,10 +60,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21492 /* 0x2 bit is always set */
21493 .flags = X86_EFLAGS_SF | 0x2,
21494 .sp = STACK_START,
21495- .es = __USER_DS,
21496+ .es = __KERNEL_DS,
21497 .cs = __KERNEL_CS,
21498 .ss = __KERNEL_DS,
21499- .ds = __USER_DS,
21500+ .ds = __KERNEL_DS,
21501 .fs = __KERNEL_PERCPU,
21502
21503 .__cr3 = __pa_nodebug(swapper_pg_dir),
21504diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21505index d9c12d3..7858b62 100644
21506--- a/arch/x86/kernel/dumpstack.c
21507+++ b/arch/x86/kernel/dumpstack.c
21508@@ -2,6 +2,9 @@
21509 * Copyright (C) 1991, 1992 Linus Torvalds
21510 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21511 */
21512+#ifdef CONFIG_GRKERNSEC_HIDESYM
21513+#define __INCLUDED_BY_HIDESYM 1
21514+#endif
21515 #include <linux/kallsyms.h>
21516 #include <linux/kprobes.h>
21517 #include <linux/uaccess.h>
21518@@ -40,16 +43,14 @@ void printk_address(unsigned long address)
21519 static void
21520 print_ftrace_graph_addr(unsigned long addr, void *data,
21521 const struct stacktrace_ops *ops,
21522- struct thread_info *tinfo, int *graph)
21523+ struct task_struct *task, int *graph)
21524 {
21525- struct task_struct *task;
21526 unsigned long ret_addr;
21527 int index;
21528
21529 if (addr != (unsigned long)return_to_handler)
21530 return;
21531
21532- task = tinfo->task;
21533 index = task->curr_ret_stack;
21534
21535 if (!task->ret_stack || index < *graph)
21536@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21537 static inline void
21538 print_ftrace_graph_addr(unsigned long addr, void *data,
21539 const struct stacktrace_ops *ops,
21540- struct thread_info *tinfo, int *graph)
21541+ struct task_struct *task, int *graph)
21542 { }
21543 #endif
21544
21545@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21546 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21547 */
21548
21549-static inline int valid_stack_ptr(struct thread_info *tinfo,
21550- void *p, unsigned int size, void *end)
21551+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21552 {
21553- void *t = tinfo;
21554 if (end) {
21555 if (p < end && p >= (end-THREAD_SIZE))
21556 return 1;
21557@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21558 }
21559
21560 unsigned long
21561-print_context_stack(struct thread_info *tinfo,
21562+print_context_stack(struct task_struct *task, void *stack_start,
21563 unsigned long *stack, unsigned long bp,
21564 const struct stacktrace_ops *ops, void *data,
21565 unsigned long *end, int *graph)
21566 {
21567 struct stack_frame *frame = (struct stack_frame *)bp;
21568
21569- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21570+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21571 unsigned long addr;
21572
21573 addr = *stack;
21574@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21575 } else {
21576 ops->address(data, addr, 0);
21577 }
21578- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21579+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21580 }
21581 stack++;
21582 }
21583@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21584 EXPORT_SYMBOL_GPL(print_context_stack);
21585
21586 unsigned long
21587-print_context_stack_bp(struct thread_info *tinfo,
21588+print_context_stack_bp(struct task_struct *task, void *stack_start,
21589 unsigned long *stack, unsigned long bp,
21590 const struct stacktrace_ops *ops, void *data,
21591 unsigned long *end, int *graph)
21592@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21593 struct stack_frame *frame = (struct stack_frame *)bp;
21594 unsigned long *ret_addr = &frame->return_address;
21595
21596- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21597+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21598 unsigned long addr = *ret_addr;
21599
21600 if (!__kernel_text_address(addr))
21601@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21602 ops->address(data, addr, 1);
21603 frame = frame->next_frame;
21604 ret_addr = &frame->return_address;
21605- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21606+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21607 }
21608
21609 return (unsigned long)frame;
21610@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
21611 static void print_trace_address(void *data, unsigned long addr, int reliable)
21612 {
21613 touch_nmi_watchdog();
21614- printk(data);
21615+ printk("%s", (char *)data);
21616 printk_stack_address(addr, reliable);
21617 }
21618
21619@@ -224,6 +223,8 @@ unsigned __kprobes long oops_begin(void)
21620 }
21621 EXPORT_SYMBOL_GPL(oops_begin);
21622
21623+extern void gr_handle_kernel_exploit(void);
21624+
21625 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21626 {
21627 if (regs && kexec_should_crash(current))
21628@@ -245,7 +246,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21629 panic("Fatal exception in interrupt");
21630 if (panic_on_oops)
21631 panic("Fatal exception");
21632- do_exit(signr);
21633+
21634+ gr_handle_kernel_exploit();
21635+
21636+ do_group_exit(signr);
21637 }
21638
21639 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
21640@@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
21641 print_modules();
21642 show_regs(regs);
21643 #ifdef CONFIG_X86_32
21644- if (user_mode_vm(regs)) {
21645+ if (user_mode(regs)) {
21646 sp = regs->sp;
21647 ss = regs->ss & 0xffff;
21648 } else {
21649@@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
21650 unsigned long flags = oops_begin();
21651 int sig = SIGSEGV;
21652
21653- if (!user_mode_vm(regs))
21654+ if (!user_mode(regs))
21655 report_bug(regs->ip, regs);
21656
21657 if (__die(str, regs, err))
21658diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
21659index f2a1770..540657f 100644
21660--- a/arch/x86/kernel/dumpstack_32.c
21661+++ b/arch/x86/kernel/dumpstack_32.c
21662@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21663 bp = stack_frame(task, regs);
21664
21665 for (;;) {
21666- struct thread_info *context;
21667+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21668
21669- context = (struct thread_info *)
21670- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
21671- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
21672+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
21673
21674- stack = (unsigned long *)context->previous_esp;
21675- if (!stack)
21676+ if (stack_start == task_stack_page(task))
21677 break;
21678+ stack = *(unsigned long **)stack_start;
21679 if (ops->stack(data, "IRQ") < 0)
21680 break;
21681 touch_nmi_watchdog();
21682@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
21683 int i;
21684
21685 show_regs_print_info(KERN_EMERG);
21686- __show_regs(regs, !user_mode_vm(regs));
21687+ __show_regs(regs, !user_mode(regs));
21688
21689 /*
21690 * When in-kernel, we also print out the stack and code at the
21691 * time of the fault..
21692 */
21693- if (!user_mode_vm(regs)) {
21694+ if (!user_mode(regs)) {
21695 unsigned int code_prologue = code_bytes * 43 / 64;
21696 unsigned int code_len = code_bytes;
21697 unsigned char c;
21698 u8 *ip;
21699+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
21700
21701 pr_emerg("Stack:\n");
21702 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
21703
21704 pr_emerg("Code:");
21705
21706- ip = (u8 *)regs->ip - code_prologue;
21707+ ip = (u8 *)regs->ip - code_prologue + cs_base;
21708 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
21709 /* try starting at IP */
21710- ip = (u8 *)regs->ip;
21711+ ip = (u8 *)regs->ip + cs_base;
21712 code_len = code_len - code_prologue + 1;
21713 }
21714 for (i = 0; i < code_len; i++, ip++) {
21715@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
21716 pr_cont(" Bad EIP value.");
21717 break;
21718 }
21719- if (ip == (u8 *)regs->ip)
21720+ if (ip == (u8 *)regs->ip + cs_base)
21721 pr_cont(" <%02x>", c);
21722 else
21723 pr_cont(" %02x", c);
21724@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
21725 {
21726 unsigned short ud2;
21727
21728+ ip = ktla_ktva(ip);
21729 if (ip < PAGE_OFFSET)
21730 return 0;
21731 if (probe_kernel_address((unsigned short *)ip, ud2))
21732@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
21733
21734 return ud2 == 0x0b0f;
21735 }
21736+
21737+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21738+void pax_check_alloca(unsigned long size)
21739+{
21740+ unsigned long sp = (unsigned long)&sp, stack_left;
21741+
21742+ /* all kernel stacks are of the same size */
21743+ stack_left = sp & (THREAD_SIZE - 1);
21744+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21745+}
21746+EXPORT_SYMBOL(pax_check_alloca);
21747+#endif
21748diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
21749index addb207..99635fa 100644
21750--- a/arch/x86/kernel/dumpstack_64.c
21751+++ b/arch/x86/kernel/dumpstack_64.c
21752@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21753 unsigned long *irq_stack_end =
21754 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
21755 unsigned used = 0;
21756- struct thread_info *tinfo;
21757 int graph = 0;
21758 unsigned long dummy;
21759+ void *stack_start;
21760
21761 if (!task)
21762 task = current;
21763@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21764 * current stack address. If the stacks consist of nested
21765 * exceptions
21766 */
21767- tinfo = task_thread_info(task);
21768 for (;;) {
21769 char *id;
21770 unsigned long *estack_end;
21771+
21772 estack_end = in_exception_stack(cpu, (unsigned long)stack,
21773 &used, &id);
21774
21775@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21776 if (ops->stack(data, id) < 0)
21777 break;
21778
21779- bp = ops->walk_stack(tinfo, stack, bp, ops,
21780+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
21781 data, estack_end, &graph);
21782 ops->stack(data, "<EOE>");
21783 /*
21784@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21785 * second-to-last pointer (index -2 to end) in the
21786 * exception stack:
21787 */
21788+ if ((u16)estack_end[-1] != __KERNEL_DS)
21789+ goto out;
21790 stack = (unsigned long *) estack_end[-2];
21791 continue;
21792 }
21793@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21794 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
21795 if (ops->stack(data, "IRQ") < 0)
21796 break;
21797- bp = ops->walk_stack(tinfo, stack, bp,
21798+ bp = ops->walk_stack(task, irq_stack, stack, bp,
21799 ops, data, irq_stack_end, &graph);
21800 /*
21801 * We link to the next stack (which would be
21802@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21803 /*
21804 * This handles the process stack:
21805 */
21806- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
21807+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21808+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
21809+out:
21810 put_cpu();
21811 }
21812 EXPORT_SYMBOL(dump_trace);
21813@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
21814
21815 return ud2 == 0x0b0f;
21816 }
21817+
21818+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21819+void pax_check_alloca(unsigned long size)
21820+{
21821+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
21822+ unsigned cpu, used;
21823+ char *id;
21824+
21825+ /* check the process stack first */
21826+ stack_start = (unsigned long)task_stack_page(current);
21827+ stack_end = stack_start + THREAD_SIZE;
21828+ if (likely(stack_start <= sp && sp < stack_end)) {
21829+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
21830+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21831+ return;
21832+ }
21833+
21834+ cpu = get_cpu();
21835+
21836+ /* check the irq stacks */
21837+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
21838+ stack_start = stack_end - IRQ_STACK_SIZE;
21839+ if (stack_start <= sp && sp < stack_end) {
21840+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
21841+ put_cpu();
21842+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21843+ return;
21844+ }
21845+
21846+ /* check the exception stacks */
21847+ used = 0;
21848+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
21849+ stack_start = stack_end - EXCEPTION_STKSZ;
21850+ if (stack_end && stack_start <= sp && sp < stack_end) {
21851+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
21852+ put_cpu();
21853+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
21854+ return;
21855+ }
21856+
21857+ put_cpu();
21858+
21859+ /* unknown stack */
21860+ BUG();
21861+}
21862+EXPORT_SYMBOL(pax_check_alloca);
21863+#endif
21864diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
21865index 174da5f..5e55606 100644
21866--- a/arch/x86/kernel/e820.c
21867+++ b/arch/x86/kernel/e820.c
21868@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
21869
21870 static void early_panic(char *msg)
21871 {
21872- early_printk(msg);
21873- panic(msg);
21874+ early_printk("%s", msg);
21875+ panic("%s", msg);
21876 }
21877
21878 static int userdef __initdata;
21879diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
21880index 01d1c18..8073693 100644
21881--- a/arch/x86/kernel/early_printk.c
21882+++ b/arch/x86/kernel/early_printk.c
21883@@ -7,6 +7,7 @@
21884 #include <linux/pci_regs.h>
21885 #include <linux/pci_ids.h>
21886 #include <linux/errno.h>
21887+#include <linux/sched.h>
21888 #include <asm/io.h>
21889 #include <asm/processor.h>
21890 #include <asm/fcntl.h>
21891diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
21892index a2a4f46..6cab058 100644
21893--- a/arch/x86/kernel/entry_32.S
21894+++ b/arch/x86/kernel/entry_32.S
21895@@ -177,13 +177,153 @@
21896 /*CFI_REL_OFFSET gs, PT_GS*/
21897 .endm
21898 .macro SET_KERNEL_GS reg
21899+
21900+#ifdef CONFIG_CC_STACKPROTECTOR
21901 movl $(__KERNEL_STACK_CANARY), \reg
21902+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
21903+ movl $(__USER_DS), \reg
21904+#else
21905+ xorl \reg, \reg
21906+#endif
21907+
21908 movl \reg, %gs
21909 .endm
21910
21911 #endif /* CONFIG_X86_32_LAZY_GS */
21912
21913-.macro SAVE_ALL
21914+.macro pax_enter_kernel
21915+#ifdef CONFIG_PAX_KERNEXEC
21916+ call pax_enter_kernel
21917+#endif
21918+.endm
21919+
21920+.macro pax_exit_kernel
21921+#ifdef CONFIG_PAX_KERNEXEC
21922+ call pax_exit_kernel
21923+#endif
21924+.endm
21925+
21926+#ifdef CONFIG_PAX_KERNEXEC
21927+ENTRY(pax_enter_kernel)
21928+#ifdef CONFIG_PARAVIRT
21929+ pushl %eax
21930+ pushl %ecx
21931+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
21932+ mov %eax, %esi
21933+#else
21934+ mov %cr0, %esi
21935+#endif
21936+ bts $16, %esi
21937+ jnc 1f
21938+ mov %cs, %esi
21939+ cmp $__KERNEL_CS, %esi
21940+ jz 3f
21941+ ljmp $__KERNEL_CS, $3f
21942+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
21943+2:
21944+#ifdef CONFIG_PARAVIRT
21945+ mov %esi, %eax
21946+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
21947+#else
21948+ mov %esi, %cr0
21949+#endif
21950+3:
21951+#ifdef CONFIG_PARAVIRT
21952+ popl %ecx
21953+ popl %eax
21954+#endif
21955+ ret
21956+ENDPROC(pax_enter_kernel)
21957+
21958+ENTRY(pax_exit_kernel)
21959+#ifdef CONFIG_PARAVIRT
21960+ pushl %eax
21961+ pushl %ecx
21962+#endif
21963+ mov %cs, %esi
21964+ cmp $__KERNEXEC_KERNEL_CS, %esi
21965+ jnz 2f
21966+#ifdef CONFIG_PARAVIRT
21967+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
21968+ mov %eax, %esi
21969+#else
21970+ mov %cr0, %esi
21971+#endif
21972+ btr $16, %esi
21973+ ljmp $__KERNEL_CS, $1f
21974+1:
21975+#ifdef CONFIG_PARAVIRT
21976+ mov %esi, %eax
21977+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
21978+#else
21979+ mov %esi, %cr0
21980+#endif
21981+2:
21982+#ifdef CONFIG_PARAVIRT
21983+ popl %ecx
21984+ popl %eax
21985+#endif
21986+ ret
21987+ENDPROC(pax_exit_kernel)
21988+#endif
21989+
21990+ .macro pax_erase_kstack
21991+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21992+ call pax_erase_kstack
21993+#endif
21994+ .endm
21995+
21996+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21997+/*
21998+ * ebp: thread_info
21999+ */
22000+ENTRY(pax_erase_kstack)
22001+ pushl %edi
22002+ pushl %ecx
22003+ pushl %eax
22004+
22005+ mov TI_lowest_stack(%ebp), %edi
22006+ mov $-0xBEEF, %eax
22007+ std
22008+
22009+1: mov %edi, %ecx
22010+ and $THREAD_SIZE_asm - 1, %ecx
22011+ shr $2, %ecx
22012+ repne scasl
22013+ jecxz 2f
22014+
22015+ cmp $2*16, %ecx
22016+ jc 2f
22017+
22018+ mov $2*16, %ecx
22019+ repe scasl
22020+ jecxz 2f
22021+ jne 1b
22022+
22023+2: cld
22024+ mov %esp, %ecx
22025+ sub %edi, %ecx
22026+
22027+ cmp $THREAD_SIZE_asm, %ecx
22028+ jb 3f
22029+ ud2
22030+3:
22031+
22032+ shr $2, %ecx
22033+ rep stosl
22034+
22035+ mov TI_task_thread_sp0(%ebp), %edi
22036+ sub $128, %edi
22037+ mov %edi, TI_lowest_stack(%ebp)
22038+
22039+ popl %eax
22040+ popl %ecx
22041+ popl %edi
22042+ ret
22043+ENDPROC(pax_erase_kstack)
22044+#endif
22045+
22046+.macro __SAVE_ALL _DS
22047 cld
22048 PUSH_GS
22049 pushl_cfi %fs
22050@@ -206,7 +346,7 @@
22051 CFI_REL_OFFSET ecx, 0
22052 pushl_cfi %ebx
22053 CFI_REL_OFFSET ebx, 0
22054- movl $(__USER_DS), %edx
22055+ movl $\_DS, %edx
22056 movl %edx, %ds
22057 movl %edx, %es
22058 movl $(__KERNEL_PERCPU), %edx
22059@@ -214,6 +354,15 @@
22060 SET_KERNEL_GS %edx
22061 .endm
22062
22063+.macro SAVE_ALL
22064+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22065+ __SAVE_ALL __KERNEL_DS
22066+ pax_enter_kernel
22067+#else
22068+ __SAVE_ALL __USER_DS
22069+#endif
22070+.endm
22071+
22072 .macro RESTORE_INT_REGS
22073 popl_cfi %ebx
22074 CFI_RESTORE ebx
22075@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
22076 popfl_cfi
22077 jmp syscall_exit
22078 CFI_ENDPROC
22079-END(ret_from_fork)
22080+ENDPROC(ret_from_fork)
22081
22082 ENTRY(ret_from_kernel_thread)
22083 CFI_STARTPROC
22084@@ -344,7 +493,15 @@ ret_from_intr:
22085 andl $SEGMENT_RPL_MASK, %eax
22086 #endif
22087 cmpl $USER_RPL, %eax
22088+
22089+#ifdef CONFIG_PAX_KERNEXEC
22090+ jae resume_userspace
22091+
22092+ pax_exit_kernel
22093+ jmp resume_kernel
22094+#else
22095 jb resume_kernel # not returning to v8086 or userspace
22096+#endif
22097
22098 ENTRY(resume_userspace)
22099 LOCKDEP_SYS_EXIT
22100@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
22101 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22102 # int/exception return?
22103 jne work_pending
22104- jmp restore_all
22105-END(ret_from_exception)
22106+ jmp restore_all_pax
22107+ENDPROC(ret_from_exception)
22108
22109 #ifdef CONFIG_PREEMPT
22110 ENTRY(resume_kernel)
22111@@ -369,7 +526,7 @@ need_resched:
22112 jz restore_all
22113 call preempt_schedule_irq
22114 jmp need_resched
22115-END(resume_kernel)
22116+ENDPROC(resume_kernel)
22117 #endif
22118 CFI_ENDPROC
22119 /*
22120@@ -403,30 +560,45 @@ sysenter_past_esp:
22121 /*CFI_REL_OFFSET cs, 0*/
22122 /*
22123 * Push current_thread_info()->sysenter_return to the stack.
22124- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22125- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22126 */
22127- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22128+ pushl_cfi $0
22129 CFI_REL_OFFSET eip, 0
22130
22131 pushl_cfi %eax
22132 SAVE_ALL
22133+ GET_THREAD_INFO(%ebp)
22134+ movl TI_sysenter_return(%ebp),%ebp
22135+ movl %ebp,PT_EIP(%esp)
22136 ENABLE_INTERRUPTS(CLBR_NONE)
22137
22138 /*
22139 * Load the potential sixth argument from user stack.
22140 * Careful about security.
22141 */
22142+ movl PT_OLDESP(%esp),%ebp
22143+
22144+#ifdef CONFIG_PAX_MEMORY_UDEREF
22145+ mov PT_OLDSS(%esp),%ds
22146+1: movl %ds:(%ebp),%ebp
22147+ push %ss
22148+ pop %ds
22149+#else
22150 cmpl $__PAGE_OFFSET-3,%ebp
22151 jae syscall_fault
22152 ASM_STAC
22153 1: movl (%ebp),%ebp
22154 ASM_CLAC
22155+#endif
22156+
22157 movl %ebp,PT_EBP(%esp)
22158 _ASM_EXTABLE(1b,syscall_fault)
22159
22160 GET_THREAD_INFO(%ebp)
22161
22162+#ifdef CONFIG_PAX_RANDKSTACK
22163+ pax_erase_kstack
22164+#endif
22165+
22166 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22167 jnz sysenter_audit
22168 sysenter_do_call:
22169@@ -441,12 +613,24 @@ sysenter_do_call:
22170 testl $_TIF_ALLWORK_MASK, %ecx
22171 jne sysexit_audit
22172 sysenter_exit:
22173+
22174+#ifdef CONFIG_PAX_RANDKSTACK
22175+ pushl_cfi %eax
22176+ movl %esp, %eax
22177+ call pax_randomize_kstack
22178+ popl_cfi %eax
22179+#endif
22180+
22181+ pax_erase_kstack
22182+
22183 /* if something modifies registers it must also disable sysexit */
22184 movl PT_EIP(%esp), %edx
22185 movl PT_OLDESP(%esp), %ecx
22186 xorl %ebp,%ebp
22187 TRACE_IRQS_ON
22188 1: mov PT_FS(%esp), %fs
22189+2: mov PT_DS(%esp), %ds
22190+3: mov PT_ES(%esp), %es
22191 PTGS_TO_GS
22192 ENABLE_INTERRUPTS_SYSEXIT
22193
22194@@ -463,6 +647,9 @@ sysenter_audit:
22195 movl %eax,%edx /* 2nd arg: syscall number */
22196 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
22197 call __audit_syscall_entry
22198+
22199+ pax_erase_kstack
22200+
22201 pushl_cfi %ebx
22202 movl PT_EAX(%esp),%eax /* reload syscall number */
22203 jmp sysenter_do_call
22204@@ -488,10 +675,16 @@ sysexit_audit:
22205
22206 CFI_ENDPROC
22207 .pushsection .fixup,"ax"
22208-2: movl $0,PT_FS(%esp)
22209+4: movl $0,PT_FS(%esp)
22210+ jmp 1b
22211+5: movl $0,PT_DS(%esp)
22212+ jmp 1b
22213+6: movl $0,PT_ES(%esp)
22214 jmp 1b
22215 .popsection
22216- _ASM_EXTABLE(1b,2b)
22217+ _ASM_EXTABLE(1b,4b)
22218+ _ASM_EXTABLE(2b,5b)
22219+ _ASM_EXTABLE(3b,6b)
22220 PTGS_TO_GS_EX
22221 ENDPROC(ia32_sysenter_target)
22222
22223@@ -506,6 +699,11 @@ ENTRY(system_call)
22224 pushl_cfi %eax # save orig_eax
22225 SAVE_ALL
22226 GET_THREAD_INFO(%ebp)
22227+
22228+#ifdef CONFIG_PAX_RANDKSTACK
22229+ pax_erase_kstack
22230+#endif
22231+
22232 # system call tracing in operation / emulation
22233 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22234 jnz syscall_trace_entry
22235@@ -524,6 +722,15 @@ syscall_exit:
22236 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22237 jne syscall_exit_work
22238
22239+restore_all_pax:
22240+
22241+#ifdef CONFIG_PAX_RANDKSTACK
22242+ movl %esp, %eax
22243+ call pax_randomize_kstack
22244+#endif
22245+
22246+ pax_erase_kstack
22247+
22248 restore_all:
22249 TRACE_IRQS_IRET
22250 restore_all_notrace:
22251@@ -580,14 +787,34 @@ ldt_ss:
22252 * compensating for the offset by changing to the ESPFIX segment with
22253 * a base address that matches for the difference.
22254 */
22255-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22256+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22257 mov %esp, %edx /* load kernel esp */
22258 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22259 mov %dx, %ax /* eax: new kernel esp */
22260 sub %eax, %edx /* offset (low word is 0) */
22261+#ifdef CONFIG_SMP
22262+ movl PER_CPU_VAR(cpu_number), %ebx
22263+ shll $PAGE_SHIFT_asm, %ebx
22264+ addl $cpu_gdt_table, %ebx
22265+#else
22266+ movl $cpu_gdt_table, %ebx
22267+#endif
22268 shr $16, %edx
22269- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22270- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22271+
22272+#ifdef CONFIG_PAX_KERNEXEC
22273+ mov %cr0, %esi
22274+ btr $16, %esi
22275+ mov %esi, %cr0
22276+#endif
22277+
22278+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22279+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22280+
22281+#ifdef CONFIG_PAX_KERNEXEC
22282+ bts $16, %esi
22283+ mov %esi, %cr0
22284+#endif
22285+
22286 pushl_cfi $__ESPFIX_SS
22287 pushl_cfi %eax /* new kernel esp */
22288 /* Disable interrupts, but do not irqtrace this section: we
22289@@ -616,20 +843,18 @@ work_resched:
22290 movl TI_flags(%ebp), %ecx
22291 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22292 # than syscall tracing?
22293- jz restore_all
22294+ jz restore_all_pax
22295 testb $_TIF_NEED_RESCHED, %cl
22296 jnz work_resched
22297
22298 work_notifysig: # deal with pending signals and
22299 # notify-resume requests
22300+ movl %esp, %eax
22301 #ifdef CONFIG_VM86
22302 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22303- movl %esp, %eax
22304 jne work_notifysig_v86 # returning to kernel-space or
22305 # vm86-space
22306 1:
22307-#else
22308- movl %esp, %eax
22309 #endif
22310 TRACE_IRQS_ON
22311 ENABLE_INTERRUPTS(CLBR_NONE)
22312@@ -650,7 +875,7 @@ work_notifysig_v86:
22313 movl %eax, %esp
22314 jmp 1b
22315 #endif
22316-END(work_pending)
22317+ENDPROC(work_pending)
22318
22319 # perform syscall exit tracing
22320 ALIGN
22321@@ -658,11 +883,14 @@ syscall_trace_entry:
22322 movl $-ENOSYS,PT_EAX(%esp)
22323 movl %esp, %eax
22324 call syscall_trace_enter
22325+
22326+ pax_erase_kstack
22327+
22328 /* What it returned is what we'll actually use. */
22329 cmpl $(NR_syscalls), %eax
22330 jnae syscall_call
22331 jmp syscall_exit
22332-END(syscall_trace_entry)
22333+ENDPROC(syscall_trace_entry)
22334
22335 # perform syscall exit tracing
22336 ALIGN
22337@@ -675,21 +903,25 @@ syscall_exit_work:
22338 movl %esp, %eax
22339 call syscall_trace_leave
22340 jmp resume_userspace
22341-END(syscall_exit_work)
22342+ENDPROC(syscall_exit_work)
22343 CFI_ENDPROC
22344
22345 RING0_INT_FRAME # can't unwind into user space anyway
22346 syscall_fault:
22347+#ifdef CONFIG_PAX_MEMORY_UDEREF
22348+ push %ss
22349+ pop %ds
22350+#endif
22351 ASM_CLAC
22352 GET_THREAD_INFO(%ebp)
22353 movl $-EFAULT,PT_EAX(%esp)
22354 jmp resume_userspace
22355-END(syscall_fault)
22356+ENDPROC(syscall_fault)
22357
22358 syscall_badsys:
22359 movl $-ENOSYS,PT_EAX(%esp)
22360 jmp resume_userspace
22361-END(syscall_badsys)
22362+ENDPROC(syscall_badsys)
22363 CFI_ENDPROC
22364 /*
22365 * End of kprobes section
22366@@ -705,8 +937,15 @@ END(syscall_badsys)
22367 * normal stack and adjusts ESP with the matching offset.
22368 */
22369 /* fixup the stack */
22370- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22371- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22372+#ifdef CONFIG_SMP
22373+ movl PER_CPU_VAR(cpu_number), %ebx
22374+ shll $PAGE_SHIFT_asm, %ebx
22375+ addl $cpu_gdt_table, %ebx
22376+#else
22377+ movl $cpu_gdt_table, %ebx
22378+#endif
22379+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22380+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22381 shl $16, %eax
22382 addl %esp, %eax /* the adjusted stack pointer */
22383 pushl_cfi $__KERNEL_DS
22384@@ -759,7 +998,7 @@ vector=vector+1
22385 .endr
22386 2: jmp common_interrupt
22387 .endr
22388-END(irq_entries_start)
22389+ENDPROC(irq_entries_start)
22390
22391 .previous
22392 END(interrupt)
22393@@ -820,7 +1059,7 @@ ENTRY(coprocessor_error)
22394 pushl_cfi $do_coprocessor_error
22395 jmp error_code
22396 CFI_ENDPROC
22397-END(coprocessor_error)
22398+ENDPROC(coprocessor_error)
22399
22400 ENTRY(simd_coprocessor_error)
22401 RING0_INT_FRAME
22402@@ -833,7 +1072,7 @@ ENTRY(simd_coprocessor_error)
22403 .section .altinstructions,"a"
22404 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22405 .previous
22406-.section .altinstr_replacement,"ax"
22407+.section .altinstr_replacement,"a"
22408 663: pushl $do_simd_coprocessor_error
22409 664:
22410 .previous
22411@@ -842,7 +1081,7 @@ ENTRY(simd_coprocessor_error)
22412 #endif
22413 jmp error_code
22414 CFI_ENDPROC
22415-END(simd_coprocessor_error)
22416+ENDPROC(simd_coprocessor_error)
22417
22418 ENTRY(device_not_available)
22419 RING0_INT_FRAME
22420@@ -851,18 +1090,18 @@ ENTRY(device_not_available)
22421 pushl_cfi $do_device_not_available
22422 jmp error_code
22423 CFI_ENDPROC
22424-END(device_not_available)
22425+ENDPROC(device_not_available)
22426
22427 #ifdef CONFIG_PARAVIRT
22428 ENTRY(native_iret)
22429 iret
22430 _ASM_EXTABLE(native_iret, iret_exc)
22431-END(native_iret)
22432+ENDPROC(native_iret)
22433
22434 ENTRY(native_irq_enable_sysexit)
22435 sti
22436 sysexit
22437-END(native_irq_enable_sysexit)
22438+ENDPROC(native_irq_enable_sysexit)
22439 #endif
22440
22441 ENTRY(overflow)
22442@@ -872,7 +1111,7 @@ ENTRY(overflow)
22443 pushl_cfi $do_overflow
22444 jmp error_code
22445 CFI_ENDPROC
22446-END(overflow)
22447+ENDPROC(overflow)
22448
22449 ENTRY(bounds)
22450 RING0_INT_FRAME
22451@@ -881,7 +1120,7 @@ ENTRY(bounds)
22452 pushl_cfi $do_bounds
22453 jmp error_code
22454 CFI_ENDPROC
22455-END(bounds)
22456+ENDPROC(bounds)
22457
22458 ENTRY(invalid_op)
22459 RING0_INT_FRAME
22460@@ -890,7 +1129,7 @@ ENTRY(invalid_op)
22461 pushl_cfi $do_invalid_op
22462 jmp error_code
22463 CFI_ENDPROC
22464-END(invalid_op)
22465+ENDPROC(invalid_op)
22466
22467 ENTRY(coprocessor_segment_overrun)
22468 RING0_INT_FRAME
22469@@ -899,7 +1138,7 @@ ENTRY(coprocessor_segment_overrun)
22470 pushl_cfi $do_coprocessor_segment_overrun
22471 jmp error_code
22472 CFI_ENDPROC
22473-END(coprocessor_segment_overrun)
22474+ENDPROC(coprocessor_segment_overrun)
22475
22476 ENTRY(invalid_TSS)
22477 RING0_EC_FRAME
22478@@ -907,7 +1146,7 @@ ENTRY(invalid_TSS)
22479 pushl_cfi $do_invalid_TSS
22480 jmp error_code
22481 CFI_ENDPROC
22482-END(invalid_TSS)
22483+ENDPROC(invalid_TSS)
22484
22485 ENTRY(segment_not_present)
22486 RING0_EC_FRAME
22487@@ -915,7 +1154,7 @@ ENTRY(segment_not_present)
22488 pushl_cfi $do_segment_not_present
22489 jmp error_code
22490 CFI_ENDPROC
22491-END(segment_not_present)
22492+ENDPROC(segment_not_present)
22493
22494 ENTRY(stack_segment)
22495 RING0_EC_FRAME
22496@@ -923,7 +1162,7 @@ ENTRY(stack_segment)
22497 pushl_cfi $do_stack_segment
22498 jmp error_code
22499 CFI_ENDPROC
22500-END(stack_segment)
22501+ENDPROC(stack_segment)
22502
22503 ENTRY(alignment_check)
22504 RING0_EC_FRAME
22505@@ -931,7 +1170,7 @@ ENTRY(alignment_check)
22506 pushl_cfi $do_alignment_check
22507 jmp error_code
22508 CFI_ENDPROC
22509-END(alignment_check)
22510+ENDPROC(alignment_check)
22511
22512 ENTRY(divide_error)
22513 RING0_INT_FRAME
22514@@ -940,7 +1179,7 @@ ENTRY(divide_error)
22515 pushl_cfi $do_divide_error
22516 jmp error_code
22517 CFI_ENDPROC
22518-END(divide_error)
22519+ENDPROC(divide_error)
22520
22521 #ifdef CONFIG_X86_MCE
22522 ENTRY(machine_check)
22523@@ -950,7 +1189,7 @@ ENTRY(machine_check)
22524 pushl_cfi machine_check_vector
22525 jmp error_code
22526 CFI_ENDPROC
22527-END(machine_check)
22528+ENDPROC(machine_check)
22529 #endif
22530
22531 ENTRY(spurious_interrupt_bug)
22532@@ -960,7 +1199,7 @@ ENTRY(spurious_interrupt_bug)
22533 pushl_cfi $do_spurious_interrupt_bug
22534 jmp error_code
22535 CFI_ENDPROC
22536-END(spurious_interrupt_bug)
22537+ENDPROC(spurious_interrupt_bug)
22538 /*
22539 * End of kprobes section
22540 */
22541@@ -1070,7 +1309,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22542
22543 ENTRY(mcount)
22544 ret
22545-END(mcount)
22546+ENDPROC(mcount)
22547
22548 ENTRY(ftrace_caller)
22549 cmpl $0, function_trace_stop
22550@@ -1103,7 +1342,7 @@ ftrace_graph_call:
22551 .globl ftrace_stub
22552 ftrace_stub:
22553 ret
22554-END(ftrace_caller)
22555+ENDPROC(ftrace_caller)
22556
22557 ENTRY(ftrace_regs_caller)
22558 pushf /* push flags before compare (in cs location) */
22559@@ -1207,7 +1446,7 @@ trace:
22560 popl %ecx
22561 popl %eax
22562 jmp ftrace_stub
22563-END(mcount)
22564+ENDPROC(mcount)
22565 #endif /* CONFIG_DYNAMIC_FTRACE */
22566 #endif /* CONFIG_FUNCTION_TRACER */
22567
22568@@ -1225,7 +1464,7 @@ ENTRY(ftrace_graph_caller)
22569 popl %ecx
22570 popl %eax
22571 ret
22572-END(ftrace_graph_caller)
22573+ENDPROC(ftrace_graph_caller)
22574
22575 .globl return_to_handler
22576 return_to_handler:
22577@@ -1291,15 +1530,18 @@ error_code:
22578 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22579 REG_TO_PTGS %ecx
22580 SET_KERNEL_GS %ecx
22581- movl $(__USER_DS), %ecx
22582+ movl $(__KERNEL_DS), %ecx
22583 movl %ecx, %ds
22584 movl %ecx, %es
22585+
22586+ pax_enter_kernel
22587+
22588 TRACE_IRQS_OFF
22589 movl %esp,%eax # pt_regs pointer
22590 call *%edi
22591 jmp ret_from_exception
22592 CFI_ENDPROC
22593-END(page_fault)
22594+ENDPROC(page_fault)
22595
22596 /*
22597 * Debug traps and NMI can happen at the one SYSENTER instruction
22598@@ -1342,7 +1584,7 @@ debug_stack_correct:
22599 call do_debug
22600 jmp ret_from_exception
22601 CFI_ENDPROC
22602-END(debug)
22603+ENDPROC(debug)
22604
22605 /*
22606 * NMI is doubly nasty. It can happen _while_ we're handling
22607@@ -1380,6 +1622,9 @@ nmi_stack_correct:
22608 xorl %edx,%edx # zero error code
22609 movl %esp,%eax # pt_regs pointer
22610 call do_nmi
22611+
22612+ pax_exit_kernel
22613+
22614 jmp restore_all_notrace
22615 CFI_ENDPROC
22616
22617@@ -1416,12 +1661,15 @@ nmi_espfix_stack:
22618 FIXUP_ESPFIX_STACK # %eax == %esp
22619 xorl %edx,%edx # zero error code
22620 call do_nmi
22621+
22622+ pax_exit_kernel
22623+
22624 RESTORE_REGS
22625 lss 12+4(%esp), %esp # back to espfix stack
22626 CFI_ADJUST_CFA_OFFSET -24
22627 jmp irq_return
22628 CFI_ENDPROC
22629-END(nmi)
22630+ENDPROC(nmi)
22631
22632 ENTRY(int3)
22633 RING0_INT_FRAME
22634@@ -1434,14 +1682,14 @@ ENTRY(int3)
22635 call do_int3
22636 jmp ret_from_exception
22637 CFI_ENDPROC
22638-END(int3)
22639+ENDPROC(int3)
22640
22641 ENTRY(general_protection)
22642 RING0_EC_FRAME
22643 pushl_cfi $do_general_protection
22644 jmp error_code
22645 CFI_ENDPROC
22646-END(general_protection)
22647+ENDPROC(general_protection)
22648
22649 #ifdef CONFIG_KVM_GUEST
22650 ENTRY(async_page_fault)
22651@@ -1450,7 +1698,7 @@ ENTRY(async_page_fault)
22652 pushl_cfi $do_async_page_fault
22653 jmp error_code
22654 CFI_ENDPROC
22655-END(async_page_fault)
22656+ENDPROC(async_page_fault)
22657 #endif
22658
22659 /*
22660diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
22661index 1e96c36..3ff710a 100644
22662--- a/arch/x86/kernel/entry_64.S
22663+++ b/arch/x86/kernel/entry_64.S
22664@@ -59,6 +59,8 @@
22665 #include <asm/context_tracking.h>
22666 #include <asm/smap.h>
22667 #include <linux/err.h>
22668+#include <asm/pgtable.h>
22669+#include <asm/alternative-asm.h>
22670
22671 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
22672 #include <linux/elf-em.h>
22673@@ -80,8 +82,9 @@
22674 #ifdef CONFIG_DYNAMIC_FTRACE
22675
22676 ENTRY(function_hook)
22677+ pax_force_retaddr
22678 retq
22679-END(function_hook)
22680+ENDPROC(function_hook)
22681
22682 /* skip is set if stack has been adjusted */
22683 .macro ftrace_caller_setup skip=0
22684@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
22685 #endif
22686
22687 GLOBAL(ftrace_stub)
22688+ pax_force_retaddr
22689 retq
22690-END(ftrace_caller)
22691+ENDPROC(ftrace_caller)
22692
22693 ENTRY(ftrace_regs_caller)
22694 /* Save the current flags before compare (in SS location)*/
22695@@ -191,7 +195,7 @@ ftrace_restore_flags:
22696 popfq
22697 jmp ftrace_stub
22698
22699-END(ftrace_regs_caller)
22700+ENDPROC(ftrace_regs_caller)
22701
22702
22703 #else /* ! CONFIG_DYNAMIC_FTRACE */
22704@@ -212,6 +216,7 @@ ENTRY(function_hook)
22705 #endif
22706
22707 GLOBAL(ftrace_stub)
22708+ pax_force_retaddr
22709 retq
22710
22711 trace:
22712@@ -225,12 +230,13 @@ trace:
22713 #endif
22714 subq $MCOUNT_INSN_SIZE, %rdi
22715
22716+ pax_force_fptr ftrace_trace_function
22717 call *ftrace_trace_function
22718
22719 MCOUNT_RESTORE_FRAME
22720
22721 jmp ftrace_stub
22722-END(function_hook)
22723+ENDPROC(function_hook)
22724 #endif /* CONFIG_DYNAMIC_FTRACE */
22725 #endif /* CONFIG_FUNCTION_TRACER */
22726
22727@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
22728
22729 MCOUNT_RESTORE_FRAME
22730
22731+ pax_force_retaddr
22732 retq
22733-END(ftrace_graph_caller)
22734+ENDPROC(ftrace_graph_caller)
22735
22736 GLOBAL(return_to_handler)
22737 subq $24, %rsp
22738@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
22739 movq 8(%rsp), %rdx
22740 movq (%rsp), %rax
22741 addq $24, %rsp
22742+ pax_force_fptr %rdi
22743 jmp *%rdi
22744+ENDPROC(return_to_handler)
22745 #endif
22746
22747
22748@@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
22749 ENDPROC(native_usergs_sysret64)
22750 #endif /* CONFIG_PARAVIRT */
22751
22752+ .macro ljmpq sel, off
22753+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
22754+ .byte 0x48; ljmp *1234f(%rip)
22755+ .pushsection .rodata
22756+ .align 16
22757+ 1234: .quad \off; .word \sel
22758+ .popsection
22759+#else
22760+ pushq $\sel
22761+ pushq $\off
22762+ lretq
22763+#endif
22764+ .endm
22765+
22766+ .macro pax_enter_kernel
22767+ pax_set_fptr_mask
22768+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22769+ call pax_enter_kernel
22770+#endif
22771+ .endm
22772+
22773+ .macro pax_exit_kernel
22774+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22775+ call pax_exit_kernel
22776+#endif
22777+
22778+ .endm
22779+
22780+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22781+ENTRY(pax_enter_kernel)
22782+ pushq %rdi
22783+
22784+#ifdef CONFIG_PARAVIRT
22785+ PV_SAVE_REGS(CLBR_RDI)
22786+#endif
22787+
22788+#ifdef CONFIG_PAX_KERNEXEC
22789+ GET_CR0_INTO_RDI
22790+ bts $16,%rdi
22791+ jnc 3f
22792+ mov %cs,%edi
22793+ cmp $__KERNEL_CS,%edi
22794+ jnz 2f
22795+1:
22796+#endif
22797+
22798+#ifdef CONFIG_PAX_MEMORY_UDEREF
22799+ 661: jmp 111f
22800+ .pushsection .altinstr_replacement, "a"
22801+ 662: ASM_NOP2
22802+ .popsection
22803+ .pushsection .altinstructions, "a"
22804+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22805+ .popsection
22806+ GET_CR3_INTO_RDI
22807+ cmp $0,%dil
22808+ jnz 112f
22809+ mov $__KERNEL_DS,%edi
22810+ mov %edi,%ss
22811+ jmp 111f
22812+112: cmp $1,%dil
22813+ jz 113f
22814+ ud2
22815+113: sub $4097,%rdi
22816+ bts $63,%rdi
22817+ SET_RDI_INTO_CR3
22818+ mov $__UDEREF_KERNEL_DS,%edi
22819+ mov %edi,%ss
22820+111:
22821+#endif
22822+
22823+#ifdef CONFIG_PARAVIRT
22824+ PV_RESTORE_REGS(CLBR_RDI)
22825+#endif
22826+
22827+ popq %rdi
22828+ pax_force_retaddr
22829+ retq
22830+
22831+#ifdef CONFIG_PAX_KERNEXEC
22832+2: ljmpq __KERNEL_CS,1b
22833+3: ljmpq __KERNEXEC_KERNEL_CS,4f
22834+4: SET_RDI_INTO_CR0
22835+ jmp 1b
22836+#endif
22837+ENDPROC(pax_enter_kernel)
22838+
22839+ENTRY(pax_exit_kernel)
22840+ pushq %rdi
22841+
22842+#ifdef CONFIG_PARAVIRT
22843+ PV_SAVE_REGS(CLBR_RDI)
22844+#endif
22845+
22846+#ifdef CONFIG_PAX_KERNEXEC
22847+ mov %cs,%rdi
22848+ cmp $__KERNEXEC_KERNEL_CS,%edi
22849+ jz 2f
22850+ GET_CR0_INTO_RDI
22851+ bts $16,%rdi
22852+ jnc 4f
22853+1:
22854+#endif
22855+
22856+#ifdef CONFIG_PAX_MEMORY_UDEREF
22857+ 661: jmp 111f
22858+ .pushsection .altinstr_replacement, "a"
22859+ 662: ASM_NOP2
22860+ .popsection
22861+ .pushsection .altinstructions, "a"
22862+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22863+ .popsection
22864+ mov %ss,%edi
22865+ cmp $__UDEREF_KERNEL_DS,%edi
22866+ jnz 111f
22867+ GET_CR3_INTO_RDI
22868+ cmp $0,%dil
22869+ jz 112f
22870+ ud2
22871+112: add $4097,%rdi
22872+ bts $63,%rdi
22873+ SET_RDI_INTO_CR3
22874+ mov $__KERNEL_DS,%edi
22875+ mov %edi,%ss
22876+111:
22877+#endif
22878+
22879+#ifdef CONFIG_PARAVIRT
22880+ PV_RESTORE_REGS(CLBR_RDI);
22881+#endif
22882+
22883+ popq %rdi
22884+ pax_force_retaddr
22885+ retq
22886+
22887+#ifdef CONFIG_PAX_KERNEXEC
22888+2: GET_CR0_INTO_RDI
22889+ btr $16,%rdi
22890+ jnc 4f
22891+ ljmpq __KERNEL_CS,3f
22892+3: SET_RDI_INTO_CR0
22893+ jmp 1b
22894+4: ud2
22895+ jmp 4b
22896+#endif
22897+ENDPROC(pax_exit_kernel)
22898+#endif
22899+
22900+ .macro pax_enter_kernel_user
22901+ pax_set_fptr_mask
22902+#ifdef CONFIG_PAX_MEMORY_UDEREF
22903+ call pax_enter_kernel_user
22904+#endif
22905+ .endm
22906+
22907+ .macro pax_exit_kernel_user
22908+#ifdef CONFIG_PAX_MEMORY_UDEREF
22909+ call pax_exit_kernel_user
22910+#endif
22911+#ifdef CONFIG_PAX_RANDKSTACK
22912+ pushq %rax
22913+ pushq %r11
22914+ call pax_randomize_kstack
22915+ popq %r11
22916+ popq %rax
22917+#endif
22918+ .endm
22919+
22920+#ifdef CONFIG_PAX_MEMORY_UDEREF
22921+ENTRY(pax_enter_kernel_user)
22922+ pushq %rdi
22923+ pushq %rbx
22924+
22925+#ifdef CONFIG_PARAVIRT
22926+ PV_SAVE_REGS(CLBR_RDI)
22927+#endif
22928+
22929+ 661: jmp 111f
22930+ .pushsection .altinstr_replacement, "a"
22931+ 662: ASM_NOP2
22932+ .popsection
22933+ .pushsection .altinstructions, "a"
22934+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22935+ .popsection
22936+ GET_CR3_INTO_RDI
22937+ cmp $1,%dil
22938+ jnz 4f
22939+ sub $4097,%rdi
22940+ bts $63,%rdi
22941+ SET_RDI_INTO_CR3
22942+ jmp 3f
22943+111:
22944+
22945+ GET_CR3_INTO_RDI
22946+ mov %rdi,%rbx
22947+ add $__START_KERNEL_map,%rbx
22948+ sub phys_base(%rip),%rbx
22949+
22950+#ifdef CONFIG_PARAVIRT
22951+ cmpl $0, pv_info+PARAVIRT_enabled
22952+ jz 1f
22953+ pushq %rdi
22954+ i = 0
22955+ .rept USER_PGD_PTRS
22956+ mov i*8(%rbx),%rsi
22957+ mov $0,%sil
22958+ lea i*8(%rbx),%rdi
22959+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
22960+ i = i + 1
22961+ .endr
22962+ popq %rdi
22963+ jmp 2f
22964+1:
22965+#endif
22966+
22967+ i = 0
22968+ .rept USER_PGD_PTRS
22969+ movb $0,i*8(%rbx)
22970+ i = i + 1
22971+ .endr
22972+
22973+2: SET_RDI_INTO_CR3
22974+
22975+#ifdef CONFIG_PAX_KERNEXEC
22976+ GET_CR0_INTO_RDI
22977+ bts $16,%rdi
22978+ SET_RDI_INTO_CR0
22979+#endif
22980+
22981+3:
22982+
22983+#ifdef CONFIG_PARAVIRT
22984+ PV_RESTORE_REGS(CLBR_RDI)
22985+#endif
22986+
22987+ popq %rbx
22988+ popq %rdi
22989+ pax_force_retaddr
22990+ retq
22991+4: ud2
22992+ENDPROC(pax_enter_kernel_user)
22993+
22994+ENTRY(pax_exit_kernel_user)
22995+ pushq %rdi
22996+ pushq %rbx
22997+
22998+#ifdef CONFIG_PARAVIRT
22999+ PV_SAVE_REGS(CLBR_RDI)
23000+#endif
23001+
23002+ GET_CR3_INTO_RDI
23003+ 661: jmp 1f
23004+ .pushsection .altinstr_replacement, "a"
23005+ 662: ASM_NOP2
23006+ .popsection
23007+ .pushsection .altinstructions, "a"
23008+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23009+ .popsection
23010+ cmp $0,%dil
23011+ jnz 3f
23012+ add $4097,%rdi
23013+ bts $63,%rdi
23014+ SET_RDI_INTO_CR3
23015+ jmp 2f
23016+1:
23017+
23018+ mov %rdi,%rbx
23019+
23020+#ifdef CONFIG_PAX_KERNEXEC
23021+ GET_CR0_INTO_RDI
23022+ btr $16,%rdi
23023+ jnc 3f
23024+ SET_RDI_INTO_CR0
23025+#endif
23026+
23027+ add $__START_KERNEL_map,%rbx
23028+ sub phys_base(%rip),%rbx
23029+
23030+#ifdef CONFIG_PARAVIRT
23031+ cmpl $0, pv_info+PARAVIRT_enabled
23032+ jz 1f
23033+ i = 0
23034+ .rept USER_PGD_PTRS
23035+ mov i*8(%rbx),%rsi
23036+ mov $0x67,%sil
23037+ lea i*8(%rbx),%rdi
23038+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23039+ i = i + 1
23040+ .endr
23041+ jmp 2f
23042+1:
23043+#endif
23044+
23045+ i = 0
23046+ .rept USER_PGD_PTRS
23047+ movb $0x67,i*8(%rbx)
23048+ i = i + 1
23049+ .endr
23050+2:
23051+
23052+#ifdef CONFIG_PARAVIRT
23053+ PV_RESTORE_REGS(CLBR_RDI)
23054+#endif
23055+
23056+ popq %rbx
23057+ popq %rdi
23058+ pax_force_retaddr
23059+ retq
23060+3: ud2
23061+ENDPROC(pax_exit_kernel_user)
23062+#endif
23063+
23064+ .macro pax_enter_kernel_nmi
23065+ pax_set_fptr_mask
23066+
23067+#ifdef CONFIG_PAX_KERNEXEC
23068+ GET_CR0_INTO_RDI
23069+ bts $16,%rdi
23070+ jc 110f
23071+ SET_RDI_INTO_CR0
23072+ or $2,%ebx
23073+110:
23074+#endif
23075+
23076+#ifdef CONFIG_PAX_MEMORY_UDEREF
23077+ 661: jmp 111f
23078+ .pushsection .altinstr_replacement, "a"
23079+ 662: ASM_NOP2
23080+ .popsection
23081+ .pushsection .altinstructions, "a"
23082+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23083+ .popsection
23084+ GET_CR3_INTO_RDI
23085+ cmp $0,%dil
23086+ jz 111f
23087+ sub $4097,%rdi
23088+ or $4,%ebx
23089+ bts $63,%rdi
23090+ SET_RDI_INTO_CR3
23091+ mov $__UDEREF_KERNEL_DS,%edi
23092+ mov %edi,%ss
23093+111:
23094+#endif
23095+ .endm
23096+
23097+ .macro pax_exit_kernel_nmi
23098+#ifdef CONFIG_PAX_KERNEXEC
23099+ btr $1,%ebx
23100+ jnc 110f
23101+ GET_CR0_INTO_RDI
23102+ btr $16,%rdi
23103+ SET_RDI_INTO_CR0
23104+110:
23105+#endif
23106+
23107+#ifdef CONFIG_PAX_MEMORY_UDEREF
23108+ btr $2,%ebx
23109+ jnc 111f
23110+ GET_CR3_INTO_RDI
23111+ add $4097,%rdi
23112+ bts $63,%rdi
23113+ SET_RDI_INTO_CR3
23114+ mov $__KERNEL_DS,%edi
23115+ mov %edi,%ss
23116+111:
23117+#endif
23118+ .endm
23119+
23120+ .macro pax_erase_kstack
23121+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23122+ call pax_erase_kstack
23123+#endif
23124+ .endm
23125+
23126+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23127+ENTRY(pax_erase_kstack)
23128+ pushq %rdi
23129+ pushq %rcx
23130+ pushq %rax
23131+ pushq %r11
23132+
23133+ GET_THREAD_INFO(%r11)
23134+ mov TI_lowest_stack(%r11), %rdi
23135+ mov $-0xBEEF, %rax
23136+ std
23137+
23138+1: mov %edi, %ecx
23139+ and $THREAD_SIZE_asm - 1, %ecx
23140+ shr $3, %ecx
23141+ repne scasq
23142+ jecxz 2f
23143+
23144+ cmp $2*8, %ecx
23145+ jc 2f
23146+
23147+ mov $2*8, %ecx
23148+ repe scasq
23149+ jecxz 2f
23150+ jne 1b
23151+
23152+2: cld
23153+ mov %esp, %ecx
23154+ sub %edi, %ecx
23155+
23156+ cmp $THREAD_SIZE_asm, %rcx
23157+ jb 3f
23158+ ud2
23159+3:
23160+
23161+ shr $3, %ecx
23162+ rep stosq
23163+
23164+ mov TI_task_thread_sp0(%r11), %rdi
23165+ sub $256, %rdi
23166+ mov %rdi, TI_lowest_stack(%r11)
23167+
23168+ popq %r11
23169+ popq %rax
23170+ popq %rcx
23171+ popq %rdi
23172+ pax_force_retaddr
23173+ ret
23174+ENDPROC(pax_erase_kstack)
23175+#endif
23176
23177 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23178 #ifdef CONFIG_TRACE_IRQFLAGS
23179@@ -320,7 +753,7 @@ ENDPROC(native_usergs_sysret64)
23180 .endm
23181
23182 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23183- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23184+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23185 jnc 1f
23186 TRACE_IRQS_ON_DEBUG
23187 1:
23188@@ -358,27 +791,6 @@ ENDPROC(native_usergs_sysret64)
23189 movq \tmp,R11+\offset(%rsp)
23190 .endm
23191
23192- .macro FAKE_STACK_FRAME child_rip
23193- /* push in order ss, rsp, eflags, cs, rip */
23194- xorl %eax, %eax
23195- pushq_cfi $__KERNEL_DS /* ss */
23196- /*CFI_REL_OFFSET ss,0*/
23197- pushq_cfi %rax /* rsp */
23198- CFI_REL_OFFSET rsp,0
23199- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23200- /*CFI_REL_OFFSET rflags,0*/
23201- pushq_cfi $__KERNEL_CS /* cs */
23202- /*CFI_REL_OFFSET cs,0*/
23203- pushq_cfi \child_rip /* rip */
23204- CFI_REL_OFFSET rip,0
23205- pushq_cfi %rax /* orig rax */
23206- .endm
23207-
23208- .macro UNFAKE_STACK_FRAME
23209- addq $8*6, %rsp
23210- CFI_ADJUST_CFA_OFFSET -(6*8)
23211- .endm
23212-
23213 /*
23214 * initial frame state for interrupts (and exceptions without error code)
23215 */
23216@@ -445,25 +857,26 @@ ENDPROC(native_usergs_sysret64)
23217 /* save partial stack frame */
23218 .macro SAVE_ARGS_IRQ
23219 cld
23220- /* start from rbp in pt_regs and jump over */
23221- movq_cfi rdi, (RDI-RBP)
23222- movq_cfi rsi, (RSI-RBP)
23223- movq_cfi rdx, (RDX-RBP)
23224- movq_cfi rcx, (RCX-RBP)
23225- movq_cfi rax, (RAX-RBP)
23226- movq_cfi r8, (R8-RBP)
23227- movq_cfi r9, (R9-RBP)
23228- movq_cfi r10, (R10-RBP)
23229- movq_cfi r11, (R11-RBP)
23230+ /* start from r15 in pt_regs and jump over */
23231+ movq_cfi rdi, RDI
23232+ movq_cfi rsi, RSI
23233+ movq_cfi rdx, RDX
23234+ movq_cfi rcx, RCX
23235+ movq_cfi rax, RAX
23236+ movq_cfi r8, R8
23237+ movq_cfi r9, R9
23238+ movq_cfi r10, R10
23239+ movq_cfi r11, R11
23240+ movq_cfi r12, R12
23241
23242 /* Save rbp so that we can unwind from get_irq_regs() */
23243- movq_cfi rbp, 0
23244+ movq_cfi rbp, RBP
23245
23246 /* Save previous stack value */
23247 movq %rsp, %rsi
23248
23249- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23250- testl $3, CS-RBP(%rsi)
23251+ movq %rsp,%rdi /* arg1 for handler */
23252+ testb $3, CS(%rsi)
23253 je 1f
23254 SWAPGS
23255 /*
23256@@ -483,6 +896,18 @@ ENDPROC(native_usergs_sysret64)
23257 0x06 /* DW_OP_deref */, \
23258 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23259 0x22 /* DW_OP_plus */
23260+
23261+#ifdef CONFIG_PAX_MEMORY_UDEREF
23262+ testb $3, CS(%rdi)
23263+ jnz 1f
23264+ pax_enter_kernel
23265+ jmp 2f
23266+1: pax_enter_kernel_user
23267+2:
23268+#else
23269+ pax_enter_kernel
23270+#endif
23271+
23272 /* We entered an interrupt context - irqs are off: */
23273 TRACE_IRQS_OFF
23274 .endm
23275@@ -514,9 +939,52 @@ ENTRY(save_paranoid)
23276 js 1f /* negative -> in kernel */
23277 SWAPGS
23278 xorl %ebx,%ebx
23279-1: ret
23280+1:
23281+#ifdef CONFIG_PAX_MEMORY_UDEREF
23282+ testb $3, CS+8(%rsp)
23283+ jnz 1f
23284+ pax_enter_kernel
23285+ jmp 2f
23286+1: pax_enter_kernel_user
23287+2:
23288+#else
23289+ pax_enter_kernel
23290+#endif
23291+ pax_force_retaddr
23292+ ret
23293 CFI_ENDPROC
23294-END(save_paranoid)
23295+ENDPROC(save_paranoid)
23296+
23297+ENTRY(save_paranoid_nmi)
23298+ XCPT_FRAME 1 RDI+8
23299+ cld
23300+ movq_cfi rdi, RDI+8
23301+ movq_cfi rsi, RSI+8
23302+ movq_cfi rdx, RDX+8
23303+ movq_cfi rcx, RCX+8
23304+ movq_cfi rax, RAX+8
23305+ movq_cfi r8, R8+8
23306+ movq_cfi r9, R9+8
23307+ movq_cfi r10, R10+8
23308+ movq_cfi r11, R11+8
23309+ movq_cfi rbx, RBX+8
23310+ movq_cfi rbp, RBP+8
23311+ movq_cfi r12, R12+8
23312+ movq_cfi r13, R13+8
23313+ movq_cfi r14, R14+8
23314+ movq_cfi r15, R15+8
23315+ movl $1,%ebx
23316+ movl $MSR_GS_BASE,%ecx
23317+ rdmsr
23318+ testl %edx,%edx
23319+ js 1f /* negative -> in kernel */
23320+ SWAPGS
23321+ xorl %ebx,%ebx
23322+1: pax_enter_kernel_nmi
23323+ pax_force_retaddr
23324+ ret
23325+ CFI_ENDPROC
23326+ENDPROC(save_paranoid_nmi)
23327 .popsection
23328
23329 /*
23330@@ -538,7 +1006,7 @@ ENTRY(ret_from_fork)
23331
23332 RESTORE_REST
23333
23334- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23335+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23336 jz 1f
23337
23338 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
23339@@ -548,15 +1016,13 @@ ENTRY(ret_from_fork)
23340 jmp ret_from_sys_call # go to the SYSRET fastpath
23341
23342 1:
23343- subq $REST_SKIP, %rsp # leave space for volatiles
23344- CFI_ADJUST_CFA_OFFSET REST_SKIP
23345 movq %rbp, %rdi
23346 call *%rbx
23347 movl $0, RAX(%rsp)
23348 RESTORE_REST
23349 jmp int_ret_from_sys_call
23350 CFI_ENDPROC
23351-END(ret_from_fork)
23352+ENDPROC(ret_from_fork)
23353
23354 /*
23355 * System call entry. Up to 6 arguments in registers are supported.
23356@@ -593,7 +1059,7 @@ END(ret_from_fork)
23357 ENTRY(system_call)
23358 CFI_STARTPROC simple
23359 CFI_SIGNAL_FRAME
23360- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23361+ CFI_DEF_CFA rsp,0
23362 CFI_REGISTER rip,rcx
23363 /*CFI_REGISTER rflags,r11*/
23364 SWAPGS_UNSAFE_STACK
23365@@ -606,16 +1072,23 @@ GLOBAL(system_call_after_swapgs)
23366
23367 movq %rsp,PER_CPU_VAR(old_rsp)
23368 movq PER_CPU_VAR(kernel_stack),%rsp
23369+ SAVE_ARGS 8*6,0
23370+ pax_enter_kernel_user
23371+
23372+#ifdef CONFIG_PAX_RANDKSTACK
23373+ pax_erase_kstack
23374+#endif
23375+
23376 /*
23377 * No need to follow this irqs off/on section - it's straight
23378 * and short:
23379 */
23380 ENABLE_INTERRUPTS(CLBR_NONE)
23381- SAVE_ARGS 8,0
23382 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
23383 movq %rcx,RIP-ARGOFFSET(%rsp)
23384 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23385- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23386+ GET_THREAD_INFO(%rcx)
23387+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23388 jnz tracesys
23389 system_call_fastpath:
23390 #if __SYSCALL_MASK == ~0
23391@@ -639,10 +1112,13 @@ sysret_check:
23392 LOCKDEP_SYS_EXIT
23393 DISABLE_INTERRUPTS(CLBR_NONE)
23394 TRACE_IRQS_OFF
23395- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23396+ GET_THREAD_INFO(%rcx)
23397+ movl TI_flags(%rcx),%edx
23398 andl %edi,%edx
23399 jnz sysret_careful
23400 CFI_REMEMBER_STATE
23401+ pax_exit_kernel_user
23402+ pax_erase_kstack
23403 /*
23404 * sysretq will re-enable interrupts:
23405 */
23406@@ -701,6 +1177,9 @@ auditsys:
23407 movq %rax,%rsi /* 2nd arg: syscall number */
23408 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
23409 call __audit_syscall_entry
23410+
23411+ pax_erase_kstack
23412+
23413 LOAD_ARGS 0 /* reload call-clobbered registers */
23414 jmp system_call_fastpath
23415
23416@@ -722,7 +1201,7 @@ sysret_audit:
23417 /* Do syscall tracing */
23418 tracesys:
23419 #ifdef CONFIG_AUDITSYSCALL
23420- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23421+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
23422 jz auditsys
23423 #endif
23424 SAVE_REST
23425@@ -730,12 +1209,15 @@ tracesys:
23426 FIXUP_TOP_OF_STACK %rdi
23427 movq %rsp,%rdi
23428 call syscall_trace_enter
23429+
23430+ pax_erase_kstack
23431+
23432 /*
23433 * Reload arg registers from stack in case ptrace changed them.
23434 * We don't reload %rax because syscall_trace_enter() returned
23435 * the value it wants us to use in the table lookup.
23436 */
23437- LOAD_ARGS ARGOFFSET, 1
23438+ LOAD_ARGS 1
23439 RESTORE_REST
23440 #if __SYSCALL_MASK == ~0
23441 cmpq $__NR_syscall_max,%rax
23442@@ -765,7 +1247,9 @@ GLOBAL(int_with_check)
23443 andl %edi,%edx
23444 jnz int_careful
23445 andl $~TS_COMPAT,TI_status(%rcx)
23446- jmp retint_swapgs
23447+ pax_exit_kernel_user
23448+ pax_erase_kstack
23449+ jmp retint_swapgs_pax
23450
23451 /* Either reschedule or signal or syscall exit tracking needed. */
23452 /* First do a reschedule test. */
23453@@ -811,7 +1295,7 @@ int_restore_rest:
23454 TRACE_IRQS_OFF
23455 jmp int_with_check
23456 CFI_ENDPROC
23457-END(system_call)
23458+ENDPROC(system_call)
23459
23460 .macro FORK_LIKE func
23461 ENTRY(stub_\func)
23462@@ -824,9 +1308,10 @@ ENTRY(stub_\func)
23463 DEFAULT_FRAME 0 8 /* offset 8: return address */
23464 call sys_\func
23465 RESTORE_TOP_OF_STACK %r11, 8
23466- ret $REST_SKIP /* pop extended registers */
23467+ pax_force_retaddr
23468+ ret
23469 CFI_ENDPROC
23470-END(stub_\func)
23471+ENDPROC(stub_\func)
23472 .endm
23473
23474 .macro FIXED_FRAME label,func
23475@@ -836,9 +1321,10 @@ ENTRY(\label)
23476 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23477 call \func
23478 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23479+ pax_force_retaddr
23480 ret
23481 CFI_ENDPROC
23482-END(\label)
23483+ENDPROC(\label)
23484 .endm
23485
23486 FORK_LIKE clone
23487@@ -846,19 +1332,6 @@ END(\label)
23488 FORK_LIKE vfork
23489 FIXED_FRAME stub_iopl, sys_iopl
23490
23491-ENTRY(ptregscall_common)
23492- DEFAULT_FRAME 1 8 /* offset 8: return address */
23493- RESTORE_TOP_OF_STACK %r11, 8
23494- movq_cfi_restore R15+8, r15
23495- movq_cfi_restore R14+8, r14
23496- movq_cfi_restore R13+8, r13
23497- movq_cfi_restore R12+8, r12
23498- movq_cfi_restore RBP+8, rbp
23499- movq_cfi_restore RBX+8, rbx
23500- ret $REST_SKIP /* pop extended registers */
23501- CFI_ENDPROC
23502-END(ptregscall_common)
23503-
23504 ENTRY(stub_execve)
23505 CFI_STARTPROC
23506 addq $8, %rsp
23507@@ -870,7 +1343,7 @@ ENTRY(stub_execve)
23508 RESTORE_REST
23509 jmp int_ret_from_sys_call
23510 CFI_ENDPROC
23511-END(stub_execve)
23512+ENDPROC(stub_execve)
23513
23514 /*
23515 * sigreturn is special because it needs to restore all registers on return.
23516@@ -887,7 +1360,7 @@ ENTRY(stub_rt_sigreturn)
23517 RESTORE_REST
23518 jmp int_ret_from_sys_call
23519 CFI_ENDPROC
23520-END(stub_rt_sigreturn)
23521+ENDPROC(stub_rt_sigreturn)
23522
23523 #ifdef CONFIG_X86_X32_ABI
23524 ENTRY(stub_x32_rt_sigreturn)
23525@@ -901,7 +1374,7 @@ ENTRY(stub_x32_rt_sigreturn)
23526 RESTORE_REST
23527 jmp int_ret_from_sys_call
23528 CFI_ENDPROC
23529-END(stub_x32_rt_sigreturn)
23530+ENDPROC(stub_x32_rt_sigreturn)
23531
23532 ENTRY(stub_x32_execve)
23533 CFI_STARTPROC
23534@@ -915,7 +1388,7 @@ ENTRY(stub_x32_execve)
23535 RESTORE_REST
23536 jmp int_ret_from_sys_call
23537 CFI_ENDPROC
23538-END(stub_x32_execve)
23539+ENDPROC(stub_x32_execve)
23540
23541 #endif
23542
23543@@ -952,7 +1425,7 @@ vector=vector+1
23544 2: jmp common_interrupt
23545 .endr
23546 CFI_ENDPROC
23547-END(irq_entries_start)
23548+ENDPROC(irq_entries_start)
23549
23550 .previous
23551 END(interrupt)
23552@@ -969,8 +1442,8 @@ END(interrupt)
23553 /* 0(%rsp): ~(interrupt number) */
23554 .macro interrupt func
23555 /* reserve pt_regs for scratch regs and rbp */
23556- subq $ORIG_RAX-RBP, %rsp
23557- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23558+ subq $ORIG_RAX, %rsp
23559+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23560 SAVE_ARGS_IRQ
23561 call \func
23562 .endm
23563@@ -997,14 +1470,14 @@ ret_from_intr:
23564
23565 /* Restore saved previous stack */
23566 popq %rsi
23567- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23568- leaq ARGOFFSET-RBP(%rsi), %rsp
23569+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23570+ movq %rsi, %rsp
23571 CFI_DEF_CFA_REGISTER rsp
23572- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23573+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23574
23575 exit_intr:
23576 GET_THREAD_INFO(%rcx)
23577- testl $3,CS-ARGOFFSET(%rsp)
23578+ testb $3,CS-ARGOFFSET(%rsp)
23579 je retint_kernel
23580
23581 /* Interrupt came from user space */
23582@@ -1026,12 +1499,16 @@ retint_swapgs: /* return to user-space */
23583 * The iretq could re-enable interrupts:
23584 */
23585 DISABLE_INTERRUPTS(CLBR_ANY)
23586+ pax_exit_kernel_user
23587+retint_swapgs_pax:
23588 TRACE_IRQS_IRETQ
23589 SWAPGS
23590 jmp restore_args
23591
23592 retint_restore_args: /* return to kernel space */
23593 DISABLE_INTERRUPTS(CLBR_ANY)
23594+ pax_exit_kernel
23595+ pax_force_retaddr (RIP-ARGOFFSET)
23596 /*
23597 * The iretq could re-enable interrupts:
23598 */
23599@@ -1112,7 +1589,7 @@ ENTRY(retint_kernel)
23600 #endif
23601
23602 CFI_ENDPROC
23603-END(common_interrupt)
23604+ENDPROC(common_interrupt)
23605 /*
23606 * End of kprobes section
23607 */
23608@@ -1130,7 +1607,7 @@ ENTRY(\sym)
23609 interrupt \do_sym
23610 jmp ret_from_intr
23611 CFI_ENDPROC
23612-END(\sym)
23613+ENDPROC(\sym)
23614 .endm
23615
23616 #ifdef CONFIG_TRACING
23617@@ -1218,7 +1695,7 @@ ENTRY(\sym)
23618 call \do_sym
23619 jmp error_exit /* %ebx: no swapgs flag */
23620 CFI_ENDPROC
23621-END(\sym)
23622+ENDPROC(\sym)
23623 .endm
23624
23625 .macro paranoidzeroentry sym do_sym
23626@@ -1236,10 +1713,10 @@ ENTRY(\sym)
23627 call \do_sym
23628 jmp paranoid_exit /* %ebx: no swapgs flag */
23629 CFI_ENDPROC
23630-END(\sym)
23631+ENDPROC(\sym)
23632 .endm
23633
23634-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23635+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23636 .macro paranoidzeroentry_ist sym do_sym ist
23637 ENTRY(\sym)
23638 INTR_FRAME
23639@@ -1252,12 +1729,18 @@ ENTRY(\sym)
23640 TRACE_IRQS_OFF_DEBUG
23641 movq %rsp,%rdi /* pt_regs pointer */
23642 xorl %esi,%esi /* no error code */
23643+#ifdef CONFIG_SMP
23644+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23645+ lea init_tss(%r13), %r13
23646+#else
23647+ lea init_tss(%rip), %r13
23648+#endif
23649 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
23650 call \do_sym
23651 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
23652 jmp paranoid_exit /* %ebx: no swapgs flag */
23653 CFI_ENDPROC
23654-END(\sym)
23655+ENDPROC(\sym)
23656 .endm
23657
23658 .macro errorentry sym do_sym
23659@@ -1275,7 +1758,7 @@ ENTRY(\sym)
23660 call \do_sym
23661 jmp error_exit /* %ebx: no swapgs flag */
23662 CFI_ENDPROC
23663-END(\sym)
23664+ENDPROC(\sym)
23665 .endm
23666
23667 #ifdef CONFIG_TRACING
23668@@ -1306,7 +1789,7 @@ ENTRY(\sym)
23669 call \do_sym
23670 jmp paranoid_exit /* %ebx: no swapgs flag */
23671 CFI_ENDPROC
23672-END(\sym)
23673+ENDPROC(\sym)
23674 .endm
23675
23676 zeroentry divide_error do_divide_error
23677@@ -1336,9 +1819,10 @@ gs_change:
23678 2: mfence /* workaround */
23679 SWAPGS
23680 popfq_cfi
23681+ pax_force_retaddr
23682 ret
23683 CFI_ENDPROC
23684-END(native_load_gs_index)
23685+ENDPROC(native_load_gs_index)
23686
23687 _ASM_EXTABLE(gs_change,bad_gs)
23688 .section .fixup,"ax"
23689@@ -1366,9 +1850,10 @@ ENTRY(do_softirq_own_stack)
23690 CFI_DEF_CFA_REGISTER rsp
23691 CFI_ADJUST_CFA_OFFSET -8
23692 decl PER_CPU_VAR(irq_count)
23693+ pax_force_retaddr
23694 ret
23695 CFI_ENDPROC
23696-END(do_softirq_own_stack)
23697+ENDPROC(do_softirq_own_stack)
23698
23699 #ifdef CONFIG_XEN
23700 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
23701@@ -1406,7 +1891,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
23702 decl PER_CPU_VAR(irq_count)
23703 jmp error_exit
23704 CFI_ENDPROC
23705-END(xen_do_hypervisor_callback)
23706+ENDPROC(xen_do_hypervisor_callback)
23707
23708 /*
23709 * Hypervisor uses this for application faults while it executes.
23710@@ -1465,7 +1950,7 @@ ENTRY(xen_failsafe_callback)
23711 SAVE_ALL
23712 jmp error_exit
23713 CFI_ENDPROC
23714-END(xen_failsafe_callback)
23715+ENDPROC(xen_failsafe_callback)
23716
23717 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
23718 xen_hvm_callback_vector xen_evtchn_do_upcall
23719@@ -1517,18 +2002,33 @@ ENTRY(paranoid_exit)
23720 DEFAULT_FRAME
23721 DISABLE_INTERRUPTS(CLBR_NONE)
23722 TRACE_IRQS_OFF_DEBUG
23723- testl %ebx,%ebx /* swapgs needed? */
23724+ testl $1,%ebx /* swapgs needed? */
23725 jnz paranoid_restore
23726- testl $3,CS(%rsp)
23727+ testb $3,CS(%rsp)
23728 jnz paranoid_userspace
23729+#ifdef CONFIG_PAX_MEMORY_UDEREF
23730+ pax_exit_kernel
23731+ TRACE_IRQS_IRETQ 0
23732+ SWAPGS_UNSAFE_STACK
23733+ RESTORE_ALL 8
23734+ pax_force_retaddr_bts
23735+ jmp irq_return
23736+#endif
23737 paranoid_swapgs:
23738+#ifdef CONFIG_PAX_MEMORY_UDEREF
23739+ pax_exit_kernel_user
23740+#else
23741+ pax_exit_kernel
23742+#endif
23743 TRACE_IRQS_IRETQ 0
23744 SWAPGS_UNSAFE_STACK
23745 RESTORE_ALL 8
23746 jmp irq_return
23747 paranoid_restore:
23748+ pax_exit_kernel
23749 TRACE_IRQS_IRETQ_DEBUG 0
23750 RESTORE_ALL 8
23751+ pax_force_retaddr_bts
23752 jmp irq_return
23753 paranoid_userspace:
23754 GET_THREAD_INFO(%rcx)
23755@@ -1557,7 +2057,7 @@ paranoid_schedule:
23756 TRACE_IRQS_OFF
23757 jmp paranoid_userspace
23758 CFI_ENDPROC
23759-END(paranoid_exit)
23760+ENDPROC(paranoid_exit)
23761
23762 /*
23763 * Exception entry point. This expects an error code/orig_rax on the stack.
23764@@ -1584,12 +2084,23 @@ ENTRY(error_entry)
23765 movq_cfi r14, R14+8
23766 movq_cfi r15, R15+8
23767 xorl %ebx,%ebx
23768- testl $3,CS+8(%rsp)
23769+ testb $3,CS+8(%rsp)
23770 je error_kernelspace
23771 error_swapgs:
23772 SWAPGS
23773 error_sti:
23774+#ifdef CONFIG_PAX_MEMORY_UDEREF
23775+ testb $3, CS+8(%rsp)
23776+ jnz 1f
23777+ pax_enter_kernel
23778+ jmp 2f
23779+1: pax_enter_kernel_user
23780+2:
23781+#else
23782+ pax_enter_kernel
23783+#endif
23784 TRACE_IRQS_OFF
23785+ pax_force_retaddr
23786 ret
23787
23788 /*
23789@@ -1616,7 +2127,7 @@ bstep_iret:
23790 movq %rcx,RIP+8(%rsp)
23791 jmp error_swapgs
23792 CFI_ENDPROC
23793-END(error_entry)
23794+ENDPROC(error_entry)
23795
23796
23797 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
23798@@ -1627,7 +2138,7 @@ ENTRY(error_exit)
23799 DISABLE_INTERRUPTS(CLBR_NONE)
23800 TRACE_IRQS_OFF
23801 GET_THREAD_INFO(%rcx)
23802- testl %eax,%eax
23803+ testl $1,%eax
23804 jne retint_kernel
23805 LOCKDEP_SYS_EXIT_IRQ
23806 movl TI_flags(%rcx),%edx
23807@@ -1636,7 +2147,7 @@ ENTRY(error_exit)
23808 jnz retint_careful
23809 jmp retint_swapgs
23810 CFI_ENDPROC
23811-END(error_exit)
23812+ENDPROC(error_exit)
23813
23814 /*
23815 * Test if a given stack is an NMI stack or not.
23816@@ -1694,9 +2205,11 @@ ENTRY(nmi)
23817 * If %cs was not the kernel segment, then the NMI triggered in user
23818 * space, which means it is definitely not nested.
23819 */
23820+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
23821+ je 1f
23822 cmpl $__KERNEL_CS, 16(%rsp)
23823 jne first_nmi
23824-
23825+1:
23826 /*
23827 * Check the special variable on the stack to see if NMIs are
23828 * executing.
23829@@ -1730,8 +2243,7 @@ nested_nmi:
23830
23831 1:
23832 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
23833- leaq -1*8(%rsp), %rdx
23834- movq %rdx, %rsp
23835+ subq $8, %rsp
23836 CFI_ADJUST_CFA_OFFSET 1*8
23837 leaq -10*8(%rsp), %rdx
23838 pushq_cfi $__KERNEL_DS
23839@@ -1749,6 +2261,7 @@ nested_nmi_out:
23840 CFI_RESTORE rdx
23841
23842 /* No need to check faults here */
23843+# pax_force_retaddr_bts
23844 INTERRUPT_RETURN
23845
23846 CFI_RESTORE_STATE
23847@@ -1845,13 +2358,13 @@ end_repeat_nmi:
23848 subq $ORIG_RAX-R15, %rsp
23849 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
23850 /*
23851- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
23852+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
23853 * as we should not be calling schedule in NMI context.
23854 * Even with normal interrupts enabled. An NMI should not be
23855 * setting NEED_RESCHED or anything that normal interrupts and
23856 * exceptions might do.
23857 */
23858- call save_paranoid
23859+ call save_paranoid_nmi
23860 DEFAULT_FRAME 0
23861
23862 /*
23863@@ -1861,9 +2374,9 @@ end_repeat_nmi:
23864 * NMI itself takes a page fault, the page fault that was preempted
23865 * will read the information from the NMI page fault and not the
23866 * origin fault. Save it off and restore it if it changes.
23867- * Use the r12 callee-saved register.
23868+ * Use the r13 callee-saved register.
23869 */
23870- movq %cr2, %r12
23871+ movq %cr2, %r13
23872
23873 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
23874 movq %rsp,%rdi
23875@@ -1872,31 +2385,36 @@ end_repeat_nmi:
23876
23877 /* Did the NMI take a page fault? Restore cr2 if it did */
23878 movq %cr2, %rcx
23879- cmpq %rcx, %r12
23880+ cmpq %rcx, %r13
23881 je 1f
23882- movq %r12, %cr2
23883+ movq %r13, %cr2
23884 1:
23885
23886- testl %ebx,%ebx /* swapgs needed? */
23887+ testl $1,%ebx /* swapgs needed? */
23888 jnz nmi_restore
23889 nmi_swapgs:
23890 SWAPGS_UNSAFE_STACK
23891 nmi_restore:
23892+ pax_exit_kernel_nmi
23893 /* Pop the extra iret frame at once */
23894 RESTORE_ALL 6*8
23895+ testb $3, 8(%rsp)
23896+ jnz 1f
23897+ pax_force_retaddr_bts
23898+1:
23899
23900 /* Clear the NMI executing stack variable */
23901 movq $0, 5*8(%rsp)
23902 jmp irq_return
23903 CFI_ENDPROC
23904-END(nmi)
23905+ENDPROC(nmi)
23906
23907 ENTRY(ignore_sysret)
23908 CFI_STARTPROC
23909 mov $-ENOSYS,%eax
23910 sysret
23911 CFI_ENDPROC
23912-END(ignore_sysret)
23913+ENDPROC(ignore_sysret)
23914
23915 /*
23916 * End of kprobes section
23917diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
23918index e625319..b9abb9d 100644
23919--- a/arch/x86/kernel/ftrace.c
23920+++ b/arch/x86/kernel/ftrace.c
23921@@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
23922 {
23923 unsigned char replaced[MCOUNT_INSN_SIZE];
23924
23925+ ip = ktla_ktva(ip);
23926+
23927 /*
23928 * Note: Due to modules and __init, code can
23929 * disappear and change, we need to protect against faulting
23930@@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
23931 unsigned char old[MCOUNT_INSN_SIZE];
23932 int ret;
23933
23934- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
23935+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
23936
23937 ftrace_update_func = ip;
23938 /* Make sure the breakpoints see the ftrace_update_func update */
23939@@ -306,7 +308,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
23940 * kernel identity mapping to modify code.
23941 */
23942 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
23943- ip = (unsigned long)__va(__pa_symbol(ip));
23944+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
23945
23946 return probe_kernel_write((void *)ip, val, size);
23947 }
23948@@ -316,7 +318,7 @@ static int add_break(unsigned long ip, const char *old)
23949 unsigned char replaced[MCOUNT_INSN_SIZE];
23950 unsigned char brk = BREAKPOINT_INSTRUCTION;
23951
23952- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
23953+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
23954 return -EFAULT;
23955
23956 /* Make sure it is what we expect it to be */
23957@@ -664,7 +666,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
23958 return ret;
23959
23960 fail_update:
23961- probe_kernel_write((void *)ip, &old_code[0], 1);
23962+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
23963 goto out;
23964 }
23965
23966diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
23967index 85126cc..1bbce17 100644
23968--- a/arch/x86/kernel/head64.c
23969+++ b/arch/x86/kernel/head64.c
23970@@ -67,12 +67,12 @@ again:
23971 pgd = *pgd_p;
23972
23973 /*
23974- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
23975- * critical -- __PAGE_OFFSET would point us back into the dynamic
23976+ * The use of __early_va rather than __va here is critical:
23977+ * __va would point us back into the dynamic
23978 * range and we might end up looping forever...
23979 */
23980 if (pgd)
23981- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
23982+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
23983 else {
23984 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
23985 reset_early_page_tables();
23986@@ -82,13 +82,13 @@ again:
23987 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
23988 for (i = 0; i < PTRS_PER_PUD; i++)
23989 pud_p[i] = 0;
23990- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
23991+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
23992 }
23993 pud_p += pud_index(address);
23994 pud = *pud_p;
23995
23996 if (pud)
23997- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
23998+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
23999 else {
24000 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24001 reset_early_page_tables();
24002@@ -98,7 +98,7 @@ again:
24003 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24004 for (i = 0; i < PTRS_PER_PMD; i++)
24005 pmd_p[i] = 0;
24006- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24007+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24008 }
24009 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24010 pmd_p[pmd_index(address)] = pmd;
24011@@ -175,7 +175,6 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
24012 if (console_loglevel == 10)
24013 early_printk("Kernel alive\n");
24014
24015- clear_page(init_level4_pgt);
24016 /* set init_level4_pgt kernel high mapping*/
24017 init_level4_pgt[511] = early_level4_pgt[511];
24018
24019diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24020index f36bd42..56ee1534 100644
24021--- a/arch/x86/kernel/head_32.S
24022+++ b/arch/x86/kernel/head_32.S
24023@@ -26,6 +26,12 @@
24024 /* Physical address */
24025 #define pa(X) ((X) - __PAGE_OFFSET)
24026
24027+#ifdef CONFIG_PAX_KERNEXEC
24028+#define ta(X) (X)
24029+#else
24030+#define ta(X) ((X) - __PAGE_OFFSET)
24031+#endif
24032+
24033 /*
24034 * References to members of the new_cpu_data structure.
24035 */
24036@@ -55,11 +61,7 @@
24037 * and small than max_low_pfn, otherwise will waste some page table entries
24038 */
24039
24040-#if PTRS_PER_PMD > 1
24041-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24042-#else
24043-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24044-#endif
24045+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24046
24047 /* Number of possible pages in the lowmem region */
24048 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24049@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24050 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24051
24052 /*
24053+ * Real beginning of normal "text" segment
24054+ */
24055+ENTRY(stext)
24056+ENTRY(_stext)
24057+
24058+/*
24059 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24060 * %esi points to the real-mode code as a 32-bit pointer.
24061 * CS and DS must be 4 GB flat segments, but we don't depend on
24062@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24063 * can.
24064 */
24065 __HEAD
24066+
24067+#ifdef CONFIG_PAX_KERNEXEC
24068+ jmp startup_32
24069+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24070+.fill PAGE_SIZE-5,1,0xcc
24071+#endif
24072+
24073 ENTRY(startup_32)
24074 movl pa(stack_start),%ecx
24075
24076@@ -106,6 +121,59 @@ ENTRY(startup_32)
24077 2:
24078 leal -__PAGE_OFFSET(%ecx),%esp
24079
24080+#ifdef CONFIG_SMP
24081+ movl $pa(cpu_gdt_table),%edi
24082+ movl $__per_cpu_load,%eax
24083+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24084+ rorl $16,%eax
24085+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24086+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24087+ movl $__per_cpu_end - 1,%eax
24088+ subl $__per_cpu_start,%eax
24089+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24090+#endif
24091+
24092+#ifdef CONFIG_PAX_MEMORY_UDEREF
24093+ movl $NR_CPUS,%ecx
24094+ movl $pa(cpu_gdt_table),%edi
24095+1:
24096+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24097+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24098+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24099+ addl $PAGE_SIZE_asm,%edi
24100+ loop 1b
24101+#endif
24102+
24103+#ifdef CONFIG_PAX_KERNEXEC
24104+ movl $pa(boot_gdt),%edi
24105+ movl $__LOAD_PHYSICAL_ADDR,%eax
24106+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24107+ rorl $16,%eax
24108+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24109+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24110+ rorl $16,%eax
24111+
24112+ ljmp $(__BOOT_CS),$1f
24113+1:
24114+
24115+ movl $NR_CPUS,%ecx
24116+ movl $pa(cpu_gdt_table),%edi
24117+ addl $__PAGE_OFFSET,%eax
24118+1:
24119+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24120+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24121+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24122+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24123+ rorl $16,%eax
24124+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24125+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24126+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24127+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24128+ rorl $16,%eax
24129+ addl $PAGE_SIZE_asm,%edi
24130+ loop 1b
24131+#endif
24132+
24133 /*
24134 * Clear BSS first so that there are no surprises...
24135 */
24136@@ -201,8 +269,11 @@ ENTRY(startup_32)
24137 movl %eax, pa(max_pfn_mapped)
24138
24139 /* Do early initialization of the fixmap area */
24140- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24141- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24142+#ifdef CONFIG_COMPAT_VDSO
24143+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24144+#else
24145+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24146+#endif
24147 #else /* Not PAE */
24148
24149 page_pde_offset = (__PAGE_OFFSET >> 20);
24150@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24151 movl %eax, pa(max_pfn_mapped)
24152
24153 /* Do early initialization of the fixmap area */
24154- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24155- movl %eax,pa(initial_page_table+0xffc)
24156+#ifdef CONFIG_COMPAT_VDSO
24157+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24158+#else
24159+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24160+#endif
24161 #endif
24162
24163 #ifdef CONFIG_PARAVIRT
24164@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24165 cmpl $num_subarch_entries, %eax
24166 jae bad_subarch
24167
24168- movl pa(subarch_entries)(,%eax,4), %eax
24169- subl $__PAGE_OFFSET, %eax
24170- jmp *%eax
24171+ jmp *pa(subarch_entries)(,%eax,4)
24172
24173 bad_subarch:
24174 WEAK(lguest_entry)
24175@@ -261,10 +333,10 @@ WEAK(xen_entry)
24176 __INITDATA
24177
24178 subarch_entries:
24179- .long default_entry /* normal x86/PC */
24180- .long lguest_entry /* lguest hypervisor */
24181- .long xen_entry /* Xen hypervisor */
24182- .long default_entry /* Moorestown MID */
24183+ .long ta(default_entry) /* normal x86/PC */
24184+ .long ta(lguest_entry) /* lguest hypervisor */
24185+ .long ta(xen_entry) /* Xen hypervisor */
24186+ .long ta(default_entry) /* Moorestown MID */
24187 num_subarch_entries = (. - subarch_entries) / 4
24188 .previous
24189 #else
24190@@ -354,6 +426,7 @@ default_entry:
24191 movl pa(mmu_cr4_features),%eax
24192 movl %eax,%cr4
24193
24194+#ifdef CONFIG_X86_PAE
24195 testb $X86_CR4_PAE, %al # check if PAE is enabled
24196 jz enable_paging
24197
24198@@ -382,6 +455,9 @@ default_entry:
24199 /* Make changes effective */
24200 wrmsr
24201
24202+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24203+#endif
24204+
24205 enable_paging:
24206
24207 /*
24208@@ -449,14 +525,20 @@ is486:
24209 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24210 movl %eax,%ss # after changing gdt.
24211
24212- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24213+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24214 movl %eax,%ds
24215 movl %eax,%es
24216
24217 movl $(__KERNEL_PERCPU), %eax
24218 movl %eax,%fs # set this cpu's percpu
24219
24220+#ifdef CONFIG_CC_STACKPROTECTOR
24221 movl $(__KERNEL_STACK_CANARY),%eax
24222+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24223+ movl $(__USER_DS),%eax
24224+#else
24225+ xorl %eax,%eax
24226+#endif
24227 movl %eax,%gs
24228
24229 xorl %eax,%eax # Clear LDT
24230@@ -512,8 +594,11 @@ setup_once:
24231 * relocation. Manually set base address in stack canary
24232 * segment descriptor.
24233 */
24234- movl $gdt_page,%eax
24235+ movl $cpu_gdt_table,%eax
24236 movl $stack_canary,%ecx
24237+#ifdef CONFIG_SMP
24238+ addl $__per_cpu_load,%ecx
24239+#endif
24240 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24241 shrl $16, %ecx
24242 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24243@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24244 cmpl $2,(%esp) # X86_TRAP_NMI
24245 je is_nmi # Ignore NMI
24246
24247- cmpl $2,%ss:early_recursion_flag
24248+ cmpl $1,%ss:early_recursion_flag
24249 je hlt_loop
24250 incl %ss:early_recursion_flag
24251
24252@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24253 pushl (20+6*4)(%esp) /* trapno */
24254 pushl $fault_msg
24255 call printk
24256-#endif
24257 call dump_stack
24258+#endif
24259 hlt_loop:
24260 hlt
24261 jmp hlt_loop
24262@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24263 /* This is the default interrupt "handler" :-) */
24264 ALIGN
24265 ignore_int:
24266- cld
24267 #ifdef CONFIG_PRINTK
24268+ cmpl $2,%ss:early_recursion_flag
24269+ je hlt_loop
24270+ incl %ss:early_recursion_flag
24271+ cld
24272 pushl %eax
24273 pushl %ecx
24274 pushl %edx
24275@@ -617,9 +705,6 @@ ignore_int:
24276 movl $(__KERNEL_DS),%eax
24277 movl %eax,%ds
24278 movl %eax,%es
24279- cmpl $2,early_recursion_flag
24280- je hlt_loop
24281- incl early_recursion_flag
24282 pushl 16(%esp)
24283 pushl 24(%esp)
24284 pushl 32(%esp)
24285@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24286 /*
24287 * BSS section
24288 */
24289-__PAGE_ALIGNED_BSS
24290- .align PAGE_SIZE
24291 #ifdef CONFIG_X86_PAE
24292+.section .initial_pg_pmd,"a",@progbits
24293 initial_pg_pmd:
24294 .fill 1024*KPMDS,4,0
24295 #else
24296+.section .initial_page_table,"a",@progbits
24297 ENTRY(initial_page_table)
24298 .fill 1024,4,0
24299 #endif
24300+.section .initial_pg_fixmap,"a",@progbits
24301 initial_pg_fixmap:
24302 .fill 1024,4,0
24303+.section .empty_zero_page,"a",@progbits
24304 ENTRY(empty_zero_page)
24305 .fill 4096,1,0
24306+.section .swapper_pg_dir,"a",@progbits
24307 ENTRY(swapper_pg_dir)
24308+#ifdef CONFIG_X86_PAE
24309+ .fill 4,8,0
24310+#else
24311 .fill 1024,4,0
24312+#endif
24313
24314 /*
24315 * This starts the data section.
24316 */
24317 #ifdef CONFIG_X86_PAE
24318-__PAGE_ALIGNED_DATA
24319- /* Page-aligned for the benefit of paravirt? */
24320- .align PAGE_SIZE
24321+.section .initial_page_table,"a",@progbits
24322 ENTRY(initial_page_table)
24323 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24324 # if KPMDS == 3
24325@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24326 # error "Kernel PMDs should be 1, 2 or 3"
24327 # endif
24328 .align PAGE_SIZE /* needs to be page-sized too */
24329+
24330+#ifdef CONFIG_PAX_PER_CPU_PGD
24331+ENTRY(cpu_pgd)
24332+ .rept 2*NR_CPUS
24333+ .fill 4,8,0
24334+ .endr
24335+#endif
24336+
24337 #endif
24338
24339 .data
24340 .balign 4
24341 ENTRY(stack_start)
24342- .long init_thread_union+THREAD_SIZE
24343+ .long init_thread_union+THREAD_SIZE-8
24344
24345 __INITRODATA
24346 int_msg:
24347@@ -727,7 +825,7 @@ fault_msg:
24348 * segment size, and 32-bit linear address value:
24349 */
24350
24351- .data
24352+.section .rodata,"a",@progbits
24353 .globl boot_gdt_descr
24354 .globl idt_descr
24355
24356@@ -736,7 +834,7 @@ fault_msg:
24357 .word 0 # 32 bit align gdt_desc.address
24358 boot_gdt_descr:
24359 .word __BOOT_DS+7
24360- .long boot_gdt - __PAGE_OFFSET
24361+ .long pa(boot_gdt)
24362
24363 .word 0 # 32-bit align idt_desc.address
24364 idt_descr:
24365@@ -747,7 +845,7 @@ idt_descr:
24366 .word 0 # 32 bit align gdt_desc.address
24367 ENTRY(early_gdt_descr)
24368 .word GDT_ENTRIES*8-1
24369- .long gdt_page /* Overwritten for secondary CPUs */
24370+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24371
24372 /*
24373 * The boot_gdt must mirror the equivalent in setup.S and is
24374@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24375 .align L1_CACHE_BYTES
24376 ENTRY(boot_gdt)
24377 .fill GDT_ENTRY_BOOT_CS,8,0
24378- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24379- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24380+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24381+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24382+
24383+ .align PAGE_SIZE_asm
24384+ENTRY(cpu_gdt_table)
24385+ .rept NR_CPUS
24386+ .quad 0x0000000000000000 /* NULL descriptor */
24387+ .quad 0x0000000000000000 /* 0x0b reserved */
24388+ .quad 0x0000000000000000 /* 0x13 reserved */
24389+ .quad 0x0000000000000000 /* 0x1b reserved */
24390+
24391+#ifdef CONFIG_PAX_KERNEXEC
24392+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24393+#else
24394+ .quad 0x0000000000000000 /* 0x20 unused */
24395+#endif
24396+
24397+ .quad 0x0000000000000000 /* 0x28 unused */
24398+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24399+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24400+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24401+ .quad 0x0000000000000000 /* 0x4b reserved */
24402+ .quad 0x0000000000000000 /* 0x53 reserved */
24403+ .quad 0x0000000000000000 /* 0x5b reserved */
24404+
24405+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24406+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24407+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24408+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24409+
24410+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24411+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24412+
24413+ /*
24414+ * Segments used for calling PnP BIOS have byte granularity.
24415+ * The code segments and data segments have fixed 64k limits,
24416+ * the transfer segment sizes are set at run time.
24417+ */
24418+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24419+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24420+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24421+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24422+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24423+
24424+ /*
24425+ * The APM segments have byte granularity and their bases
24426+ * are set at run time. All have 64k limits.
24427+ */
24428+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24429+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24430+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24431+
24432+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
24433+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24434+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24435+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24436+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24437+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24438+
24439+ /* Be sure this is zeroed to avoid false validations in Xen */
24440+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24441+ .endr
24442diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24443index a468c0a..b9aed84 100644
24444--- a/arch/x86/kernel/head_64.S
24445+++ b/arch/x86/kernel/head_64.S
24446@@ -20,6 +20,8 @@
24447 #include <asm/processor-flags.h>
24448 #include <asm/percpu.h>
24449 #include <asm/nops.h>
24450+#include <asm/cpufeature.h>
24451+#include <asm/alternative-asm.h>
24452
24453 #ifdef CONFIG_PARAVIRT
24454 #include <asm/asm-offsets.h>
24455@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24456 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24457 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24458 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24459+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24460+L3_VMALLOC_START = pud_index(VMALLOC_START)
24461+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24462+L3_VMALLOC_END = pud_index(VMALLOC_END)
24463+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24464+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24465
24466 .text
24467 __HEAD
24468@@ -89,11 +97,24 @@ startup_64:
24469 * Fixup the physical addresses in the page table
24470 */
24471 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24472+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24473+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24474+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24475+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24476+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24477
24478- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24479- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24480+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24481+#ifndef CONFIG_XEN
24482+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24483+#endif
24484+
24485+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24486+
24487+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24488+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24489
24490 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24491+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24492
24493 /*
24494 * Set up the identity mapping for the switchover. These
24495@@ -177,8 +198,8 @@ ENTRY(secondary_startup_64)
24496 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24497 1:
24498
24499- /* Enable PAE mode and PGE */
24500- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24501+ /* Enable PAE mode and PSE/PGE */
24502+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24503 movq %rcx, %cr4
24504
24505 /* Setup early boot stage 4 level pagetables. */
24506@@ -199,10 +220,19 @@ ENTRY(secondary_startup_64)
24507 movl $MSR_EFER, %ecx
24508 rdmsr
24509 btsl $_EFER_SCE, %eax /* Enable System Call */
24510- btl $20,%edi /* No Execute supported? */
24511+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24512 jnc 1f
24513 btsl $_EFER_NX, %eax
24514 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24515+#ifndef CONFIG_EFI
24516+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24517+#endif
24518+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24519+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24520+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24521+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24522+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24523+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24524 1: wrmsr /* Make changes effective */
24525
24526 /* Setup cr0 */
24527@@ -282,6 +312,7 @@ ENTRY(secondary_startup_64)
24528 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24529 * address given in m16:64.
24530 */
24531+ pax_set_fptr_mask
24532 movq initial_code(%rip),%rax
24533 pushq $0 # fake return address to stop unwinder
24534 pushq $__KERNEL_CS # set correct cs
24535@@ -391,7 +422,7 @@ ENTRY(early_idt_handler)
24536 call dump_stack
24537 #ifdef CONFIG_KALLSYMS
24538 leaq early_idt_ripmsg(%rip),%rdi
24539- movq 40(%rsp),%rsi # %rip again
24540+ movq 88(%rsp),%rsi # %rip again
24541 call __print_symbol
24542 #endif
24543 #endif /* EARLY_PRINTK */
24544@@ -420,6 +451,7 @@ ENDPROC(early_idt_handler)
24545 early_recursion_flag:
24546 .long 0
24547
24548+ .section .rodata,"a",@progbits
24549 #ifdef CONFIG_EARLY_PRINTK
24550 early_idt_msg:
24551 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24552@@ -447,29 +479,52 @@ NEXT_PAGE(early_level4_pgt)
24553 NEXT_PAGE(early_dynamic_pgts)
24554 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24555
24556- .data
24557+ .section .rodata,"a",@progbits
24558
24559-#ifndef CONFIG_XEN
24560 NEXT_PAGE(init_level4_pgt)
24561- .fill 512,8,0
24562-#else
24563-NEXT_PAGE(init_level4_pgt)
24564- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24565 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24566 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24567+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24568+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24569+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24570+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24571+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24572+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24573 .org init_level4_pgt + L4_START_KERNEL*8, 0
24574 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24575 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24576
24577+#ifdef CONFIG_PAX_PER_CPU_PGD
24578+NEXT_PAGE(cpu_pgd)
24579+ .rept 2*NR_CPUS
24580+ .fill 512,8,0
24581+ .endr
24582+#endif
24583+
24584 NEXT_PAGE(level3_ident_pgt)
24585 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24586+#ifdef CONFIG_XEN
24587 .fill 511, 8, 0
24588+#else
24589+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24590+ .fill 510,8,0
24591+#endif
24592+
24593+NEXT_PAGE(level3_vmalloc_start_pgt)
24594+ .fill 512,8,0
24595+
24596+NEXT_PAGE(level3_vmalloc_end_pgt)
24597+ .fill 512,8,0
24598+
24599+NEXT_PAGE(level3_vmemmap_pgt)
24600+ .fill L3_VMEMMAP_START,8,0
24601+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24602+
24603 NEXT_PAGE(level2_ident_pgt)
24604- /* Since I easily can, map the first 1G.
24605+ /* Since I easily can, map the first 2G.
24606 * Don't set NX because code runs from these pages.
24607 */
24608- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24609-#endif
24610+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
24611
24612 NEXT_PAGE(level3_kernel_pgt)
24613 .fill L3_START_KERNEL,8,0
24614@@ -477,6 +532,9 @@ NEXT_PAGE(level3_kernel_pgt)
24615 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
24616 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24617
24618+NEXT_PAGE(level2_vmemmap_pgt)
24619+ .fill 512,8,0
24620+
24621 NEXT_PAGE(level2_kernel_pgt)
24622 /*
24623 * 512 MB kernel mapping. We spend a full page on this pagetable
24624@@ -494,28 +552,64 @@ NEXT_PAGE(level2_kernel_pgt)
24625 NEXT_PAGE(level2_fixmap_pgt)
24626 .fill 506,8,0
24627 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24628- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24629- .fill 5,8,0
24630+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24631+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24632+ .fill 4,8,0
24633
24634 NEXT_PAGE(level1_fixmap_pgt)
24635 .fill 512,8,0
24636
24637+NEXT_PAGE(level1_vsyscall_pgt)
24638+ .fill 512,8,0
24639+
24640 #undef PMDS
24641
24642- .data
24643+ .align PAGE_SIZE
24644+ENTRY(cpu_gdt_table)
24645+ .rept NR_CPUS
24646+ .quad 0x0000000000000000 /* NULL descriptor */
24647+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24648+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
24649+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
24650+ .quad 0x00cffb000000ffff /* __USER32_CS */
24651+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24652+ .quad 0x00affb000000ffff /* __USER_CS */
24653+
24654+#ifdef CONFIG_PAX_KERNEXEC
24655+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
24656+#else
24657+ .quad 0x0 /* unused */
24658+#endif
24659+
24660+ .quad 0,0 /* TSS */
24661+ .quad 0,0 /* LDT */
24662+ .quad 0,0,0 /* three TLS descriptors */
24663+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
24664+ /* asm/segment.h:GDT_ENTRIES must match this */
24665+
24666+#ifdef CONFIG_PAX_MEMORY_UDEREF
24667+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
24668+#else
24669+ .quad 0x0 /* unused */
24670+#endif
24671+
24672+ /* zero the remaining page */
24673+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
24674+ .endr
24675+
24676 .align 16
24677 .globl early_gdt_descr
24678 early_gdt_descr:
24679 .word GDT_ENTRIES*8-1
24680 early_gdt_descr_base:
24681- .quad INIT_PER_CPU_VAR(gdt_page)
24682+ .quad cpu_gdt_table
24683
24684 ENTRY(phys_base)
24685 /* This must match the first entry in level2_kernel_pgt */
24686 .quad 0x0000000000000000
24687
24688 #include "../../x86/xen/xen-head.S"
24689-
24690- __PAGE_ALIGNED_BSS
24691+
24692+ .section .rodata,"a",@progbits
24693 NEXT_PAGE(empty_zero_page)
24694 .skip PAGE_SIZE
24695diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
24696index 05fd74f..c3548b1 100644
24697--- a/arch/x86/kernel/i386_ksyms_32.c
24698+++ b/arch/x86/kernel/i386_ksyms_32.c
24699@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
24700 EXPORT_SYMBOL(cmpxchg8b_emu);
24701 #endif
24702
24703+EXPORT_SYMBOL_GPL(cpu_gdt_table);
24704+
24705 /* Networking helper routines. */
24706 EXPORT_SYMBOL(csum_partial_copy_generic);
24707+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
24708+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
24709
24710 EXPORT_SYMBOL(__get_user_1);
24711 EXPORT_SYMBOL(__get_user_2);
24712@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
24713 EXPORT_SYMBOL(___preempt_schedule_context);
24714 #endif
24715 #endif
24716+
24717+#ifdef CONFIG_PAX_KERNEXEC
24718+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
24719+#endif
24720+
24721+#ifdef CONFIG_PAX_PER_CPU_PGD
24722+EXPORT_SYMBOL(cpu_pgd);
24723+#endif
24724diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
24725index d5dd808..b6432cf 100644
24726--- a/arch/x86/kernel/i387.c
24727+++ b/arch/x86/kernel/i387.c
24728@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
24729 static inline bool interrupted_user_mode(void)
24730 {
24731 struct pt_regs *regs = get_irq_regs();
24732- return regs && user_mode_vm(regs);
24733+ return regs && user_mode(regs);
24734 }
24735
24736 /*
24737diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
24738index 2e977b5..5f2c273 100644
24739--- a/arch/x86/kernel/i8259.c
24740+++ b/arch/x86/kernel/i8259.c
24741@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
24742 static void make_8259A_irq(unsigned int irq)
24743 {
24744 disable_irq_nosync(irq);
24745- io_apic_irqs &= ~(1<<irq);
24746+ io_apic_irqs &= ~(1UL<<irq);
24747 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
24748 i8259A_chip.name);
24749 enable_irq(irq);
24750@@ -209,7 +209,7 @@ spurious_8259A_irq:
24751 "spurious 8259A interrupt: IRQ%d.\n", irq);
24752 spurious_irq_mask |= irqmask;
24753 }
24754- atomic_inc(&irq_err_count);
24755+ atomic_inc_unchecked(&irq_err_count);
24756 /*
24757 * Theoretically we do not have to handle this IRQ,
24758 * but in Linux this does not cause problems and is
24759@@ -332,14 +332,16 @@ static void init_8259A(int auto_eoi)
24760 /* (slave's support for AEOI in flat mode is to be investigated) */
24761 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
24762
24763+ pax_open_kernel();
24764 if (auto_eoi)
24765 /*
24766 * In AEOI mode we just have to mask the interrupt
24767 * when acking.
24768 */
24769- i8259A_chip.irq_mask_ack = disable_8259A_irq;
24770+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
24771 else
24772- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
24773+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
24774+ pax_close_kernel();
24775
24776 udelay(100); /* wait for 8259A to initialize */
24777
24778diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
24779index a979b5b..1d6db75 100644
24780--- a/arch/x86/kernel/io_delay.c
24781+++ b/arch/x86/kernel/io_delay.c
24782@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
24783 * Quirk table for systems that misbehave (lock up, etc.) if port
24784 * 0x80 is used:
24785 */
24786-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
24787+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
24788 {
24789 .callback = dmi_io_delay_0xed_port,
24790 .ident = "Compaq Presario V6000",
24791diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
24792index 4ddaf66..49d5c18 100644
24793--- a/arch/x86/kernel/ioport.c
24794+++ b/arch/x86/kernel/ioport.c
24795@@ -6,6 +6,7 @@
24796 #include <linux/sched.h>
24797 #include <linux/kernel.h>
24798 #include <linux/capability.h>
24799+#include <linux/security.h>
24800 #include <linux/errno.h>
24801 #include <linux/types.h>
24802 #include <linux/ioport.h>
24803@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
24804 return -EINVAL;
24805 if (turn_on && !capable(CAP_SYS_RAWIO))
24806 return -EPERM;
24807+#ifdef CONFIG_GRKERNSEC_IO
24808+ if (turn_on && grsec_disable_privio) {
24809+ gr_handle_ioperm();
24810+ return -ENODEV;
24811+ }
24812+#endif
24813
24814 /*
24815 * If it's the first ioperm() call in this thread's lifetime, set the
24816@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
24817 * because the ->io_bitmap_max value must match the bitmap
24818 * contents:
24819 */
24820- tss = &per_cpu(init_tss, get_cpu());
24821+ tss = init_tss + get_cpu();
24822
24823 if (turn_on)
24824 bitmap_clear(t->io_bitmap_ptr, from, num);
24825@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
24826 if (level > old) {
24827 if (!capable(CAP_SYS_RAWIO))
24828 return -EPERM;
24829+#ifdef CONFIG_GRKERNSEC_IO
24830+ if (grsec_disable_privio) {
24831+ gr_handle_iopl();
24832+ return -ENODEV;
24833+ }
24834+#endif
24835 }
24836 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
24837 t->iopl = level << 12;
24838diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
24839index 22d0687..e07b2a5 100644
24840--- a/arch/x86/kernel/irq.c
24841+++ b/arch/x86/kernel/irq.c
24842@@ -21,7 +21,7 @@
24843 #define CREATE_TRACE_POINTS
24844 #include <asm/trace/irq_vectors.h>
24845
24846-atomic_t irq_err_count;
24847+atomic_unchecked_t irq_err_count;
24848
24849 /* Function pointer for generic interrupt vector handling */
24850 void (*x86_platform_ipi_callback)(void) = NULL;
24851@@ -125,9 +125,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
24852 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
24853 seq_printf(p, " Machine check polls\n");
24854 #endif
24855- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
24856+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
24857 #if defined(CONFIG_X86_IO_APIC)
24858- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
24859+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
24860 #endif
24861 return 0;
24862 }
24863@@ -167,7 +167,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
24864
24865 u64 arch_irq_stat(void)
24866 {
24867- u64 sum = atomic_read(&irq_err_count);
24868+ u64 sum = atomic_read_unchecked(&irq_err_count);
24869 return sum;
24870 }
24871
24872diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
24873index d7fcbed..1f747f7 100644
24874--- a/arch/x86/kernel/irq_32.c
24875+++ b/arch/x86/kernel/irq_32.c
24876@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
24877 __asm__ __volatile__("andl %%esp,%0" :
24878 "=r" (sp) : "0" (THREAD_SIZE - 1));
24879
24880- return sp < (sizeof(struct thread_info) + STACK_WARN);
24881+ return sp < STACK_WARN;
24882 }
24883
24884 static void print_stack_overflow(void)
24885@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
24886 * per-CPU IRQ handling contexts (thread information and stack)
24887 */
24888 union irq_ctx {
24889- struct thread_info tinfo;
24890- u32 stack[THREAD_SIZE/sizeof(u32)];
24891+ unsigned long previous_esp;
24892+ u32 stack[THREAD_SIZE/sizeof(u32)];
24893 } __attribute__((aligned(THREAD_SIZE)));
24894
24895 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
24896@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
24897 static inline int
24898 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24899 {
24900- union irq_ctx *curctx, *irqctx;
24901+ union irq_ctx *irqctx;
24902 u32 *isp, arg1, arg2;
24903
24904- curctx = (union irq_ctx *) current_thread_info();
24905 irqctx = __this_cpu_read(hardirq_ctx);
24906
24907 /*
24908@@ -92,13 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24909 * handler) we can't do that and just have to keep using the
24910 * current stack (which is the irq stack already after all)
24911 */
24912- if (unlikely(curctx == irqctx))
24913+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
24914 return 0;
24915
24916 /* build the stack frame on the IRQ stack */
24917- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
24918- irqctx->tinfo.task = curctx->tinfo.task;
24919- irqctx->tinfo.previous_esp = current_stack_pointer;
24920+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
24921+ irqctx->previous_esp = current_stack_pointer;
24922+
24923+#ifdef CONFIG_PAX_MEMORY_UDEREF
24924+ __set_fs(MAKE_MM_SEG(0));
24925+#endif
24926
24927 if (unlikely(overflow))
24928 call_on_stack(print_stack_overflow, isp);
24929@@ -110,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24930 : "0" (irq), "1" (desc), "2" (isp),
24931 "D" (desc->handle_irq)
24932 : "memory", "cc", "ecx");
24933+
24934+#ifdef CONFIG_PAX_MEMORY_UDEREF
24935+ __set_fs(current_thread_info()->addr_limit);
24936+#endif
24937+
24938 return 1;
24939 }
24940
24941@@ -118,48 +125,34 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24942 */
24943 void irq_ctx_init(int cpu)
24944 {
24945- union irq_ctx *irqctx;
24946-
24947 if (per_cpu(hardirq_ctx, cpu))
24948 return;
24949
24950- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
24951- THREADINFO_GFP,
24952- THREAD_SIZE_ORDER));
24953- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
24954- irqctx->tinfo.cpu = cpu;
24955- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
24956-
24957- per_cpu(hardirq_ctx, cpu) = irqctx;
24958-
24959- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
24960- THREADINFO_GFP,
24961- THREAD_SIZE_ORDER));
24962- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
24963- irqctx->tinfo.cpu = cpu;
24964- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
24965-
24966- per_cpu(softirq_ctx, cpu) = irqctx;
24967-
24968- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
24969- cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
24970+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
24971+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
24972 }
24973
24974 void do_softirq_own_stack(void)
24975 {
24976- struct thread_info *curctx;
24977 union irq_ctx *irqctx;
24978 u32 *isp;
24979
24980- curctx = current_thread_info();
24981 irqctx = __this_cpu_read(softirq_ctx);
24982- irqctx->tinfo.task = curctx->task;
24983- irqctx->tinfo.previous_esp = current_stack_pointer;
24984+ irqctx->previous_esp = current_stack_pointer;
24985
24986 /* build the stack frame on the softirq stack */
24987- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
24988+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
24989+
24990+#ifdef CONFIG_PAX_MEMORY_UDEREF
24991+ __set_fs(MAKE_MM_SEG(0));
24992+#endif
24993
24994 call_on_stack(__do_softirq, isp);
24995+
24996+#ifdef CONFIG_PAX_MEMORY_UDEREF
24997+ __set_fs(current_thread_info()->addr_limit);
24998+#endif
24999+
25000 }
25001
25002 bool handle_irq(unsigned irq, struct pt_regs *regs)
25003@@ -173,7 +166,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25004 if (unlikely(!desc))
25005 return false;
25006
25007- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25008+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25009 if (unlikely(overflow))
25010 print_stack_overflow();
25011 desc->handle_irq(irq, desc);
25012diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25013index 4d1c746..232961d 100644
25014--- a/arch/x86/kernel/irq_64.c
25015+++ b/arch/x86/kernel/irq_64.c
25016@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25017 u64 estack_top, estack_bottom;
25018 u64 curbase = (u64)task_stack_page(current);
25019
25020- if (user_mode_vm(regs))
25021+ if (user_mode(regs))
25022 return;
25023
25024 if (regs->sp >= curbase + sizeof(struct thread_info) +
25025diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25026index 26d5a55..a01160a 100644
25027--- a/arch/x86/kernel/jump_label.c
25028+++ b/arch/x86/kernel/jump_label.c
25029@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25030 * Jump label is enabled for the first time.
25031 * So we expect a default_nop...
25032 */
25033- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25034+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25035 != 0))
25036 bug_at((void *)entry->code, __LINE__);
25037 } else {
25038@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25039 * ...otherwise expect an ideal_nop. Otherwise
25040 * something went horribly wrong.
25041 */
25042- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25043+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25044 != 0))
25045 bug_at((void *)entry->code, __LINE__);
25046 }
25047@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25048 * are converting the default nop to the ideal nop.
25049 */
25050 if (init) {
25051- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25052+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25053 bug_at((void *)entry->code, __LINE__);
25054 } else {
25055 code.jump = 0xe9;
25056 code.offset = entry->target -
25057 (entry->code + JUMP_LABEL_NOP_SIZE);
25058- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25059+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25060 bug_at((void *)entry->code, __LINE__);
25061 }
25062 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25063diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25064index 836f832..a8bda67 100644
25065--- a/arch/x86/kernel/kgdb.c
25066+++ b/arch/x86/kernel/kgdb.c
25067@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25068 #ifdef CONFIG_X86_32
25069 switch (regno) {
25070 case GDB_SS:
25071- if (!user_mode_vm(regs))
25072+ if (!user_mode(regs))
25073 *(unsigned long *)mem = __KERNEL_DS;
25074 break;
25075 case GDB_SP:
25076- if (!user_mode_vm(regs))
25077+ if (!user_mode(regs))
25078 *(unsigned long *)mem = kernel_stack_pointer(regs);
25079 break;
25080 case GDB_GS:
25081@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
25082 bp->attr.bp_addr = breakinfo[breakno].addr;
25083 bp->attr.bp_len = breakinfo[breakno].len;
25084 bp->attr.bp_type = breakinfo[breakno].type;
25085- info->address = breakinfo[breakno].addr;
25086+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25087+ info->address = ktla_ktva(breakinfo[breakno].addr);
25088+ else
25089+ info->address = breakinfo[breakno].addr;
25090 info->len = breakinfo[breakno].len;
25091 info->type = breakinfo[breakno].type;
25092 val = arch_install_hw_breakpoint(bp);
25093@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25094 case 'k':
25095 /* clear the trace bit */
25096 linux_regs->flags &= ~X86_EFLAGS_TF;
25097- atomic_set(&kgdb_cpu_doing_single_step, -1);
25098+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25099
25100 /* set the trace bit if we're stepping */
25101 if (remcomInBuffer[0] == 's') {
25102 linux_regs->flags |= X86_EFLAGS_TF;
25103- atomic_set(&kgdb_cpu_doing_single_step,
25104+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25105 raw_smp_processor_id());
25106 }
25107
25108@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25109
25110 switch (cmd) {
25111 case DIE_DEBUG:
25112- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25113+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25114 if (user_mode(regs))
25115 return single_step_cont(regs, args);
25116 break;
25117@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25118 #endif /* CONFIG_DEBUG_RODATA */
25119
25120 bpt->type = BP_BREAKPOINT;
25121- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25122+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25123 BREAK_INSTR_SIZE);
25124 if (err)
25125 return err;
25126- err = probe_kernel_write((char *)bpt->bpt_addr,
25127+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25128 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25129 #ifdef CONFIG_DEBUG_RODATA
25130 if (!err)
25131@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25132 return -EBUSY;
25133 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25134 BREAK_INSTR_SIZE);
25135- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25136+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25137 if (err)
25138 return err;
25139 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25140@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25141 if (mutex_is_locked(&text_mutex))
25142 goto knl_write;
25143 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25144- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25145+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25146 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25147 goto knl_write;
25148 return err;
25149 knl_write:
25150 #endif /* CONFIG_DEBUG_RODATA */
25151- return probe_kernel_write((char *)bpt->bpt_addr,
25152+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25153 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25154 }
25155
25156diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25157index 79a3f96..6ba030a 100644
25158--- a/arch/x86/kernel/kprobes/core.c
25159+++ b/arch/x86/kernel/kprobes/core.c
25160@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
25161 s32 raddr;
25162 } __packed *insn;
25163
25164- insn = (struct __arch_relative_insn *)from;
25165+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25166+
25167+ pax_open_kernel();
25168 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25169 insn->op = op;
25170+ pax_close_kernel();
25171 }
25172
25173 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25174@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
25175 kprobe_opcode_t opcode;
25176 kprobe_opcode_t *orig_opcodes = opcodes;
25177
25178- if (search_exception_tables((unsigned long)opcodes))
25179+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25180 return 0; /* Page fault may occur on this address. */
25181
25182 retry:
25183@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25184 * for the first byte, we can recover the original instruction
25185 * from it and kp->opcode.
25186 */
25187- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25188+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25189 buf[0] = kp->opcode;
25190- return (unsigned long)buf;
25191+ return ktva_ktla((unsigned long)buf);
25192 }
25193
25194 /*
25195@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
25196 /* Another subsystem puts a breakpoint, failed to recover */
25197 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25198 return 0;
25199+ pax_open_kernel();
25200 memcpy(dest, insn.kaddr, insn.length);
25201+ pax_close_kernel();
25202
25203 #ifdef CONFIG_X86_64
25204 if (insn_rip_relative(&insn)) {
25205@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
25206 return 0;
25207 }
25208 disp = (u8 *) dest + insn_offset_displacement(&insn);
25209+ pax_open_kernel();
25210 *(s32 *) disp = (s32) newdisp;
25211+ pax_close_kernel();
25212 }
25213 #endif
25214 return insn.length;
25215@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
25216 * nor set current_kprobe, because it doesn't use single
25217 * stepping.
25218 */
25219- regs->ip = (unsigned long)p->ainsn.insn;
25220+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25221 preempt_enable_no_resched();
25222 return;
25223 }
25224@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
25225 regs->flags &= ~X86_EFLAGS_IF;
25226 /* single step inline if the instruction is an int3 */
25227 if (p->opcode == BREAKPOINT_INSTRUCTION)
25228- regs->ip = (unsigned long)p->addr;
25229+ regs->ip = ktla_ktva((unsigned long)p->addr);
25230 else
25231- regs->ip = (unsigned long)p->ainsn.insn;
25232+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25233 }
25234
25235 /*
25236@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
25237 setup_singlestep(p, regs, kcb, 0);
25238 return 1;
25239 }
25240- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25241+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25242 /*
25243 * The breakpoint instruction was removed right
25244 * after we hit it. Another cpu has removed
25245@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
25246 " movq %rax, 152(%rsp)\n"
25247 RESTORE_REGS_STRING
25248 " popfq\n"
25249+#ifdef KERNEXEC_PLUGIN
25250+ " btsq $63,(%rsp)\n"
25251+#endif
25252 #else
25253 " pushf\n"
25254 SAVE_REGS_STRING
25255@@ -779,7 +789,7 @@ static void __kprobes
25256 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
25257 {
25258 unsigned long *tos = stack_addr(regs);
25259- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25260+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25261 unsigned long orig_ip = (unsigned long)p->addr;
25262 kprobe_opcode_t *insn = p->ainsn.insn;
25263
25264@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
25265 struct die_args *args = data;
25266 int ret = NOTIFY_DONE;
25267
25268- if (args->regs && user_mode_vm(args->regs))
25269+ if (args->regs && user_mode(args->regs))
25270 return ret;
25271
25272 switch (val) {
25273diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25274index 898160b..758cde8 100644
25275--- a/arch/x86/kernel/kprobes/opt.c
25276+++ b/arch/x86/kernel/kprobes/opt.c
25277@@ -79,6 +79,7 @@ found:
25278 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25279 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25280 {
25281+ pax_open_kernel();
25282 #ifdef CONFIG_X86_64
25283 *addr++ = 0x48;
25284 *addr++ = 0xbf;
25285@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
25286 *addr++ = 0xb8;
25287 #endif
25288 *(unsigned long *)addr = val;
25289+ pax_close_kernel();
25290 }
25291
25292 asm (
25293@@ -335,7 +337,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25294 * Verify if the address gap is in 2GB range, because this uses
25295 * a relative jump.
25296 */
25297- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25298+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25299 if (abs(rel) > 0x7fffffff)
25300 return -ERANGE;
25301
25302@@ -350,16 +352,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25303 op->optinsn.size = ret;
25304
25305 /* Copy arch-dep-instance from template */
25306- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25307+ pax_open_kernel();
25308+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25309+ pax_close_kernel();
25310
25311 /* Set probe information */
25312 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25313
25314 /* Set probe function call */
25315- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25316+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25317
25318 /* Set returning jmp instruction at the tail of out-of-line buffer */
25319- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25320+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25321 (u8 *)op->kp.addr + op->optinsn.size);
25322
25323 flush_icache_range((unsigned long) buf,
25324@@ -384,7 +388,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist)
25325 WARN_ON(kprobe_disabled(&op->kp));
25326
25327 /* Backup instructions which will be replaced by jump address */
25328- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25329+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25330 RELATIVE_ADDR_SIZE);
25331
25332 insn_buf[0] = RELATIVEJUMP_OPCODE;
25333@@ -433,7 +437,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25334 /* This kprobe is really able to run optimized path. */
25335 op = container_of(p, struct optimized_kprobe, kp);
25336 /* Detour through copied instructions */
25337- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25338+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25339 if (!reenter)
25340 reset_current_kprobe();
25341 preempt_enable_no_resched();
25342diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25343index ebc9873..1b9724b 100644
25344--- a/arch/x86/kernel/ldt.c
25345+++ b/arch/x86/kernel/ldt.c
25346@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25347 if (reload) {
25348 #ifdef CONFIG_SMP
25349 preempt_disable();
25350- load_LDT(pc);
25351+ load_LDT_nolock(pc);
25352 if (!cpumask_equal(mm_cpumask(current->mm),
25353 cpumask_of(smp_processor_id())))
25354 smp_call_function(flush_ldt, current->mm, 1);
25355 preempt_enable();
25356 #else
25357- load_LDT(pc);
25358+ load_LDT_nolock(pc);
25359 #endif
25360 }
25361 if (oldsize) {
25362@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25363 return err;
25364
25365 for (i = 0; i < old->size; i++)
25366- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25367+ write_ldt_entry(new->ldt, i, old->ldt + i);
25368 return 0;
25369 }
25370
25371@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25372 retval = copy_ldt(&mm->context, &old_mm->context);
25373 mutex_unlock(&old_mm->context.lock);
25374 }
25375+
25376+ if (tsk == current) {
25377+ mm->context.vdso = 0;
25378+
25379+#ifdef CONFIG_X86_32
25380+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25381+ mm->context.user_cs_base = 0UL;
25382+ mm->context.user_cs_limit = ~0UL;
25383+
25384+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25385+ cpus_clear(mm->context.cpu_user_cs_mask);
25386+#endif
25387+
25388+#endif
25389+#endif
25390+
25391+ }
25392+
25393 return retval;
25394 }
25395
25396@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25397 }
25398 }
25399
25400+#ifdef CONFIG_PAX_SEGMEXEC
25401+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25402+ error = -EINVAL;
25403+ goto out_unlock;
25404+ }
25405+#endif
25406+
25407 fill_ldt(&ldt, &ldt_info);
25408 if (oldmode)
25409 ldt.avl = 0;
25410diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25411index 5b19e4d..6476a76 100644
25412--- a/arch/x86/kernel/machine_kexec_32.c
25413+++ b/arch/x86/kernel/machine_kexec_32.c
25414@@ -26,7 +26,7 @@
25415 #include <asm/cacheflush.h>
25416 #include <asm/debugreg.h>
25417
25418-static void set_idt(void *newidt, __u16 limit)
25419+static void set_idt(struct desc_struct *newidt, __u16 limit)
25420 {
25421 struct desc_ptr curidt;
25422
25423@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25424 }
25425
25426
25427-static void set_gdt(void *newgdt, __u16 limit)
25428+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25429 {
25430 struct desc_ptr curgdt;
25431
25432@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25433 }
25434
25435 control_page = page_address(image->control_code_page);
25436- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25437+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25438
25439 relocate_kernel_ptr = control_page;
25440 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25441diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
25442index 15c9876..0a43909 100644
25443--- a/arch/x86/kernel/microcode_core.c
25444+++ b/arch/x86/kernel/microcode_core.c
25445@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
25446 return NOTIFY_OK;
25447 }
25448
25449-static struct notifier_block __refdata mc_cpu_notifier = {
25450+static struct notifier_block mc_cpu_notifier = {
25451 .notifier_call = mc_cpu_callback,
25452 };
25453
25454diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
25455index 5fb2ceb..3ae90bb 100644
25456--- a/arch/x86/kernel/microcode_intel.c
25457+++ b/arch/x86/kernel/microcode_intel.c
25458@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
25459
25460 static int get_ucode_user(void *to, const void *from, size_t n)
25461 {
25462- return copy_from_user(to, from, n);
25463+ return copy_from_user(to, (const void __force_user *)from, n);
25464 }
25465
25466 static enum ucode_state
25467 request_microcode_user(int cpu, const void __user *buf, size_t size)
25468 {
25469- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
25470+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
25471 }
25472
25473 static void microcode_fini_cpu(int cpu)
25474diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25475index 18be189..4a9fe40 100644
25476--- a/arch/x86/kernel/module.c
25477+++ b/arch/x86/kernel/module.c
25478@@ -43,15 +43,60 @@ do { \
25479 } while (0)
25480 #endif
25481
25482-void *module_alloc(unsigned long size)
25483+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25484 {
25485- if (PAGE_ALIGN(size) > MODULES_LEN)
25486+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25487 return NULL;
25488 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
25489- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
25490+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
25491 NUMA_NO_NODE, __builtin_return_address(0));
25492 }
25493
25494+void *module_alloc(unsigned long size)
25495+{
25496+
25497+#ifdef CONFIG_PAX_KERNEXEC
25498+ return __module_alloc(size, PAGE_KERNEL);
25499+#else
25500+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25501+#endif
25502+
25503+}
25504+
25505+#ifdef CONFIG_PAX_KERNEXEC
25506+#ifdef CONFIG_X86_32
25507+void *module_alloc_exec(unsigned long size)
25508+{
25509+ struct vm_struct *area;
25510+
25511+ if (size == 0)
25512+ return NULL;
25513+
25514+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25515+ return area ? area->addr : NULL;
25516+}
25517+EXPORT_SYMBOL(module_alloc_exec);
25518+
25519+void module_free_exec(struct module *mod, void *module_region)
25520+{
25521+ vunmap(module_region);
25522+}
25523+EXPORT_SYMBOL(module_free_exec);
25524+#else
25525+void module_free_exec(struct module *mod, void *module_region)
25526+{
25527+ module_free(mod, module_region);
25528+}
25529+EXPORT_SYMBOL(module_free_exec);
25530+
25531+void *module_alloc_exec(unsigned long size)
25532+{
25533+ return __module_alloc(size, PAGE_KERNEL_RX);
25534+}
25535+EXPORT_SYMBOL(module_alloc_exec);
25536+#endif
25537+#endif
25538+
25539 #ifdef CONFIG_X86_32
25540 int apply_relocate(Elf32_Shdr *sechdrs,
25541 const char *strtab,
25542@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
25543 unsigned int i;
25544 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
25545 Elf32_Sym *sym;
25546- uint32_t *location;
25547+ uint32_t *plocation, location;
25548
25549 DEBUGP("Applying relocate section %u to %u\n",
25550 relsec, sechdrs[relsec].sh_info);
25551 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
25552 /* This is where to make the change */
25553- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
25554- + rel[i].r_offset;
25555+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
25556+ location = (uint32_t)plocation;
25557+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
25558+ plocation = ktla_ktva((void *)plocation);
25559 /* This is the symbol it is referring to. Note that all
25560 undefined symbols have been resolved. */
25561 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
25562@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
25563 switch (ELF32_R_TYPE(rel[i].r_info)) {
25564 case R_386_32:
25565 /* We add the value into the location given */
25566- *location += sym->st_value;
25567+ pax_open_kernel();
25568+ *plocation += sym->st_value;
25569+ pax_close_kernel();
25570 break;
25571 case R_386_PC32:
25572 /* Add the value, subtract its position */
25573- *location += sym->st_value - (uint32_t)location;
25574+ pax_open_kernel();
25575+ *plocation += sym->st_value - location;
25576+ pax_close_kernel();
25577 break;
25578 default:
25579 pr_err("%s: Unknown relocation: %u\n",
25580@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
25581 case R_X86_64_NONE:
25582 break;
25583 case R_X86_64_64:
25584+ pax_open_kernel();
25585 *(u64 *)loc = val;
25586+ pax_close_kernel();
25587 break;
25588 case R_X86_64_32:
25589+ pax_open_kernel();
25590 *(u32 *)loc = val;
25591+ pax_close_kernel();
25592 if (val != *(u32 *)loc)
25593 goto overflow;
25594 break;
25595 case R_X86_64_32S:
25596+ pax_open_kernel();
25597 *(s32 *)loc = val;
25598+ pax_close_kernel();
25599 if ((s64)val != *(s32 *)loc)
25600 goto overflow;
25601 break;
25602 case R_X86_64_PC32:
25603 val -= (u64)loc;
25604+ pax_open_kernel();
25605 *(u32 *)loc = val;
25606+ pax_close_kernel();
25607+
25608 #if 0
25609 if ((s64)val != *(s32 *)loc)
25610 goto overflow;
25611diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
25612index 05266b5..3432443 100644
25613--- a/arch/x86/kernel/msr.c
25614+++ b/arch/x86/kernel/msr.c
25615@@ -37,6 +37,7 @@
25616 #include <linux/notifier.h>
25617 #include <linux/uaccess.h>
25618 #include <linux/gfp.h>
25619+#include <linux/grsecurity.h>
25620
25621 #include <asm/processor.h>
25622 #include <asm/msr.h>
25623@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
25624 int err = 0;
25625 ssize_t bytes = 0;
25626
25627+#ifdef CONFIG_GRKERNSEC_KMEM
25628+ gr_handle_msr_write();
25629+ return -EPERM;
25630+#endif
25631+
25632 if (count % 8)
25633 return -EINVAL; /* Invalid chunk size */
25634
25635@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
25636 err = -EBADF;
25637 break;
25638 }
25639+#ifdef CONFIG_GRKERNSEC_KMEM
25640+ gr_handle_msr_write();
25641+ return -EPERM;
25642+#endif
25643 if (copy_from_user(&regs, uregs, sizeof regs)) {
25644 err = -EFAULT;
25645 break;
25646@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
25647 return notifier_from_errno(err);
25648 }
25649
25650-static struct notifier_block __refdata msr_class_cpu_notifier = {
25651+static struct notifier_block msr_class_cpu_notifier = {
25652 .notifier_call = msr_class_cpu_callback,
25653 };
25654
25655diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
25656index 6fcb49c..5b3f4ff 100644
25657--- a/arch/x86/kernel/nmi.c
25658+++ b/arch/x86/kernel/nmi.c
25659@@ -138,7 +138,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
25660 return handled;
25661 }
25662
25663-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
25664+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
25665 {
25666 struct nmi_desc *desc = nmi_to_desc(type);
25667 unsigned long flags;
25668@@ -162,9 +162,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
25669 * event confuses some handlers (kdump uses this flag)
25670 */
25671 if (action->flags & NMI_FLAG_FIRST)
25672- list_add_rcu(&action->list, &desc->head);
25673+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
25674 else
25675- list_add_tail_rcu(&action->list, &desc->head);
25676+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
25677
25678 spin_unlock_irqrestore(&desc->lock, flags);
25679 return 0;
25680@@ -187,7 +187,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
25681 if (!strcmp(n->name, name)) {
25682 WARN(in_nmi(),
25683 "Trying to free NMI (%s) from NMI context!\n", n->name);
25684- list_del_rcu(&n->list);
25685+ pax_list_del_rcu((struct list_head *)&n->list);
25686 break;
25687 }
25688 }
25689@@ -512,6 +512,17 @@ static inline void nmi_nesting_postprocess(void)
25690 dotraplinkage notrace __kprobes void
25691 do_nmi(struct pt_regs *regs, long error_code)
25692 {
25693+
25694+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25695+ if (!user_mode(regs)) {
25696+ unsigned long cs = regs->cs & 0xFFFF;
25697+ unsigned long ip = ktva_ktla(regs->ip);
25698+
25699+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
25700+ regs->ip = ip;
25701+ }
25702+#endif
25703+
25704 nmi_nesting_preprocess(regs);
25705
25706 nmi_enter();
25707diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
25708index 6d9582e..f746287 100644
25709--- a/arch/x86/kernel/nmi_selftest.c
25710+++ b/arch/x86/kernel/nmi_selftest.c
25711@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
25712 {
25713 /* trap all the unknown NMIs we may generate */
25714 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
25715- __initdata);
25716+ __initconst);
25717 }
25718
25719 static void __init cleanup_nmi_testsuite(void)
25720@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
25721 unsigned long timeout;
25722
25723 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
25724- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
25725+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
25726 nmi_fail = FAILURE;
25727 return;
25728 }
25729diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
25730index bbb6c73..24a58ef 100644
25731--- a/arch/x86/kernel/paravirt-spinlocks.c
25732+++ b/arch/x86/kernel/paravirt-spinlocks.c
25733@@ -8,7 +8,7 @@
25734
25735 #include <asm/paravirt.h>
25736
25737-struct pv_lock_ops pv_lock_ops = {
25738+struct pv_lock_ops pv_lock_ops __read_only = {
25739 #ifdef CONFIG_SMP
25740 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
25741 .unlock_kick = paravirt_nop,
25742diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
25743index 1b10af8..0b58cbc 100644
25744--- a/arch/x86/kernel/paravirt.c
25745+++ b/arch/x86/kernel/paravirt.c
25746@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
25747 {
25748 return x;
25749 }
25750+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25751+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
25752+#endif
25753
25754 void __init default_banner(void)
25755 {
25756@@ -142,15 +145,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
25757 if (opfunc == NULL)
25758 /* If there's no function, patch it with a ud2a (BUG) */
25759 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
25760- else if (opfunc == _paravirt_nop)
25761+ else if (opfunc == (void *)_paravirt_nop)
25762 /* If the operation is a nop, then nop the callsite */
25763 ret = paravirt_patch_nop();
25764
25765 /* identity functions just return their single argument */
25766- else if (opfunc == _paravirt_ident_32)
25767+ else if (opfunc == (void *)_paravirt_ident_32)
25768 ret = paravirt_patch_ident_32(insnbuf, len);
25769- else if (opfunc == _paravirt_ident_64)
25770+ else if (opfunc == (void *)_paravirt_ident_64)
25771 ret = paravirt_patch_ident_64(insnbuf, len);
25772+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25773+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
25774+ ret = paravirt_patch_ident_64(insnbuf, len);
25775+#endif
25776
25777 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
25778 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
25779@@ -175,7 +182,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
25780 if (insn_len > len || start == NULL)
25781 insn_len = len;
25782 else
25783- memcpy(insnbuf, start, insn_len);
25784+ memcpy(insnbuf, ktla_ktva(start), insn_len);
25785
25786 return insn_len;
25787 }
25788@@ -299,7 +306,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
25789 return this_cpu_read(paravirt_lazy_mode);
25790 }
25791
25792-struct pv_info pv_info = {
25793+struct pv_info pv_info __read_only = {
25794 .name = "bare hardware",
25795 .paravirt_enabled = 0,
25796 .kernel_rpl = 0,
25797@@ -310,16 +317,16 @@ struct pv_info pv_info = {
25798 #endif
25799 };
25800
25801-struct pv_init_ops pv_init_ops = {
25802+struct pv_init_ops pv_init_ops __read_only = {
25803 .patch = native_patch,
25804 };
25805
25806-struct pv_time_ops pv_time_ops = {
25807+struct pv_time_ops pv_time_ops __read_only = {
25808 .sched_clock = native_sched_clock,
25809 .steal_clock = native_steal_clock,
25810 };
25811
25812-__visible struct pv_irq_ops pv_irq_ops = {
25813+__visible struct pv_irq_ops pv_irq_ops __read_only = {
25814 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
25815 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
25816 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
25817@@ -331,7 +338,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
25818 #endif
25819 };
25820
25821-__visible struct pv_cpu_ops pv_cpu_ops = {
25822+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
25823 .cpuid = native_cpuid,
25824 .get_debugreg = native_get_debugreg,
25825 .set_debugreg = native_set_debugreg,
25826@@ -389,21 +396,26 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
25827 .end_context_switch = paravirt_nop,
25828 };
25829
25830-struct pv_apic_ops pv_apic_ops = {
25831+struct pv_apic_ops pv_apic_ops __read_only= {
25832 #ifdef CONFIG_X86_LOCAL_APIC
25833 .startup_ipi_hook = paravirt_nop,
25834 #endif
25835 };
25836
25837-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
25838+#ifdef CONFIG_X86_32
25839+#ifdef CONFIG_X86_PAE
25840+/* 64-bit pagetable entries */
25841+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
25842+#else
25843 /* 32-bit pagetable entries */
25844 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
25845+#endif
25846 #else
25847 /* 64-bit pagetable entries */
25848 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
25849 #endif
25850
25851-struct pv_mmu_ops pv_mmu_ops = {
25852+struct pv_mmu_ops pv_mmu_ops __read_only = {
25853
25854 .read_cr2 = native_read_cr2,
25855 .write_cr2 = native_write_cr2,
25856@@ -453,6 +465,7 @@ struct pv_mmu_ops pv_mmu_ops = {
25857 .make_pud = PTE_IDENT,
25858
25859 .set_pgd = native_set_pgd,
25860+ .set_pgd_batched = native_set_pgd_batched,
25861 #endif
25862 #endif /* PAGETABLE_LEVELS >= 3 */
25863
25864@@ -473,6 +486,12 @@ struct pv_mmu_ops pv_mmu_ops = {
25865 },
25866
25867 .set_fixmap = native_set_fixmap,
25868+
25869+#ifdef CONFIG_PAX_KERNEXEC
25870+ .pax_open_kernel = native_pax_open_kernel,
25871+ .pax_close_kernel = native_pax_close_kernel,
25872+#endif
25873+
25874 };
25875
25876 EXPORT_SYMBOL_GPL(pv_time_ops);
25877diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
25878index 299d493..2ccb0ee 100644
25879--- a/arch/x86/kernel/pci-calgary_64.c
25880+++ b/arch/x86/kernel/pci-calgary_64.c
25881@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
25882 tce_space = be64_to_cpu(readq(target));
25883 tce_space = tce_space & TAR_SW_BITS;
25884
25885- tce_space = tce_space & (~specified_table_size);
25886+ tce_space = tce_space & (~(unsigned long)specified_table_size);
25887 info->tce_space = (u64 *)__va(tce_space);
25888 }
25889 }
25890diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
25891index 35ccf75..7a15747 100644
25892--- a/arch/x86/kernel/pci-iommu_table.c
25893+++ b/arch/x86/kernel/pci-iommu_table.c
25894@@ -2,7 +2,7 @@
25895 #include <asm/iommu_table.h>
25896 #include <linux/string.h>
25897 #include <linux/kallsyms.h>
25898-
25899+#include <linux/sched.h>
25900
25901 #define DEBUG 1
25902
25903diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
25904index 6c483ba..d10ce2f 100644
25905--- a/arch/x86/kernel/pci-swiotlb.c
25906+++ b/arch/x86/kernel/pci-swiotlb.c
25907@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
25908 void *vaddr, dma_addr_t dma_addr,
25909 struct dma_attrs *attrs)
25910 {
25911- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
25912+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
25913 }
25914
25915 static struct dma_map_ops swiotlb_dma_ops = {
25916diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
25917index ca7f0d5..8996469 100644
25918--- a/arch/x86/kernel/preempt.S
25919+++ b/arch/x86/kernel/preempt.S
25920@@ -3,12 +3,14 @@
25921 #include <asm/dwarf2.h>
25922 #include <asm/asm.h>
25923 #include <asm/calling.h>
25924+#include <asm/alternative-asm.h>
25925
25926 ENTRY(___preempt_schedule)
25927 CFI_STARTPROC
25928 SAVE_ALL
25929 call preempt_schedule
25930 RESTORE_ALL
25931+ pax_force_retaddr
25932 ret
25933 CFI_ENDPROC
25934
25935@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
25936 SAVE_ALL
25937 call preempt_schedule_context
25938 RESTORE_ALL
25939+ pax_force_retaddr
25940 ret
25941 CFI_ENDPROC
25942
25943diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
25944index 3fb8d95..254dc51 100644
25945--- a/arch/x86/kernel/process.c
25946+++ b/arch/x86/kernel/process.c
25947@@ -36,7 +36,8 @@
25948 * section. Since TSS's are completely CPU-local, we want them
25949 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
25950 */
25951-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
25952+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
25953+EXPORT_SYMBOL(init_tss);
25954
25955 #ifdef CONFIG_X86_64
25956 static DEFINE_PER_CPU(unsigned char, is_idle);
25957@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
25958 task_xstate_cachep =
25959 kmem_cache_create("task_xstate", xstate_size,
25960 __alignof__(union thread_xstate),
25961- SLAB_PANIC | SLAB_NOTRACK, NULL);
25962+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
25963 }
25964
25965 /*
25966@@ -105,7 +106,7 @@ void exit_thread(void)
25967 unsigned long *bp = t->io_bitmap_ptr;
25968
25969 if (bp) {
25970- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
25971+ struct tss_struct *tss = init_tss + get_cpu();
25972
25973 t->io_bitmap_ptr = NULL;
25974 clear_thread_flag(TIF_IO_BITMAP);
25975@@ -125,6 +126,9 @@ void flush_thread(void)
25976 {
25977 struct task_struct *tsk = current;
25978
25979+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
25980+ loadsegment(gs, 0);
25981+#endif
25982 flush_ptrace_hw_breakpoint(tsk);
25983 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
25984 drop_init_fpu(tsk);
25985@@ -271,7 +275,7 @@ static void __exit_idle(void)
25986 void exit_idle(void)
25987 {
25988 /* idle loop has pid 0 */
25989- if (current->pid)
25990+ if (task_pid_nr(current))
25991 return;
25992 __exit_idle();
25993 }
25994@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
25995 return ret;
25996 }
25997 #endif
25998-void stop_this_cpu(void *dummy)
25999+__noreturn void stop_this_cpu(void *dummy)
26000 {
26001 local_irq_disable();
26002 /*
26003@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
26004 }
26005 early_param("idle", idle_setup);
26006
26007-unsigned long arch_align_stack(unsigned long sp)
26008+#ifdef CONFIG_PAX_RANDKSTACK
26009+void pax_randomize_kstack(struct pt_regs *regs)
26010 {
26011- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26012- sp -= get_random_int() % 8192;
26013- return sp & ~0xf;
26014-}
26015+ struct thread_struct *thread = &current->thread;
26016+ unsigned long time;
26017
26018-unsigned long arch_randomize_brk(struct mm_struct *mm)
26019-{
26020- unsigned long range_end = mm->brk + 0x02000000;
26021- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26022-}
26023+ if (!randomize_va_space)
26024+ return;
26025+
26026+ if (v8086_mode(regs))
26027+ return;
26028
26029+ rdtscl(time);
26030+
26031+ /* P4 seems to return a 0 LSB, ignore it */
26032+#ifdef CONFIG_MPENTIUM4
26033+ time &= 0x3EUL;
26034+ time <<= 2;
26035+#elif defined(CONFIG_X86_64)
26036+ time &= 0xFUL;
26037+ time <<= 4;
26038+#else
26039+ time &= 0x1FUL;
26040+ time <<= 3;
26041+#endif
26042+
26043+ thread->sp0 ^= time;
26044+ load_sp0(init_tss + smp_processor_id(), thread);
26045+
26046+#ifdef CONFIG_X86_64
26047+ this_cpu_write(kernel_stack, thread->sp0);
26048+#endif
26049+}
26050+#endif
26051diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26052index 6f1236c..fd448d4 100644
26053--- a/arch/x86/kernel/process_32.c
26054+++ b/arch/x86/kernel/process_32.c
26055@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26056 unsigned long thread_saved_pc(struct task_struct *tsk)
26057 {
26058 return ((unsigned long *)tsk->thread.sp)[3];
26059+//XXX return tsk->thread.eip;
26060 }
26061
26062 void __show_regs(struct pt_regs *regs, int all)
26063@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
26064 unsigned long sp;
26065 unsigned short ss, gs;
26066
26067- if (user_mode_vm(regs)) {
26068+ if (user_mode(regs)) {
26069 sp = regs->sp;
26070 ss = regs->ss & 0xffff;
26071- gs = get_user_gs(regs);
26072 } else {
26073 sp = kernel_stack_pointer(regs);
26074 savesegment(ss, ss);
26075- savesegment(gs, gs);
26076 }
26077+ gs = get_user_gs(regs);
26078
26079 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26080 (u16)regs->cs, regs->ip, regs->flags,
26081- smp_processor_id());
26082+ raw_smp_processor_id());
26083 print_symbol("EIP is at %s\n", regs->ip);
26084
26085 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26086@@ -133,20 +133,21 @@ void release_thread(struct task_struct *dead_task)
26087 int copy_thread(unsigned long clone_flags, unsigned long sp,
26088 unsigned long arg, struct task_struct *p)
26089 {
26090- struct pt_regs *childregs = task_pt_regs(p);
26091+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26092 struct task_struct *tsk;
26093 int err;
26094
26095 p->thread.sp = (unsigned long) childregs;
26096 p->thread.sp0 = (unsigned long) (childregs+1);
26097+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26098
26099 if (unlikely(p->flags & PF_KTHREAD)) {
26100 /* kernel thread */
26101 memset(childregs, 0, sizeof(struct pt_regs));
26102 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26103- task_user_gs(p) = __KERNEL_STACK_CANARY;
26104- childregs->ds = __USER_DS;
26105- childregs->es = __USER_DS;
26106+ savesegment(gs, childregs->gs);
26107+ childregs->ds = __KERNEL_DS;
26108+ childregs->es = __KERNEL_DS;
26109 childregs->fs = __KERNEL_PERCPU;
26110 childregs->bx = sp; /* function */
26111 childregs->bp = arg;
26112@@ -253,7 +254,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26113 struct thread_struct *prev = &prev_p->thread,
26114 *next = &next_p->thread;
26115 int cpu = smp_processor_id();
26116- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26117+ struct tss_struct *tss = init_tss + cpu;
26118 fpu_switch_t fpu;
26119
26120 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26121@@ -277,6 +278,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26122 */
26123 lazy_save_gs(prev->gs);
26124
26125+#ifdef CONFIG_PAX_MEMORY_UDEREF
26126+ __set_fs(task_thread_info(next_p)->addr_limit);
26127+#endif
26128+
26129 /*
26130 * Load the per-thread Thread-Local Storage descriptor.
26131 */
26132@@ -315,6 +320,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26133 */
26134 arch_end_context_switch(next_p);
26135
26136+ this_cpu_write(current_task, next_p);
26137+ this_cpu_write(current_tinfo, &next_p->tinfo);
26138+
26139 /*
26140 * Restore %gs if needed (which is common)
26141 */
26142@@ -323,8 +331,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26143
26144 switch_fpu_finish(next_p, fpu);
26145
26146- this_cpu_write(current_task, next_p);
26147-
26148 return prev_p;
26149 }
26150
26151@@ -354,4 +360,3 @@ unsigned long get_wchan(struct task_struct *p)
26152 } while (count++ < 16);
26153 return 0;
26154 }
26155-
26156diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26157index 9c0280f..5bbb1c0 100644
26158--- a/arch/x86/kernel/process_64.c
26159+++ b/arch/x86/kernel/process_64.c
26160@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26161 struct pt_regs *childregs;
26162 struct task_struct *me = current;
26163
26164- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26165+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26166 childregs = task_pt_regs(p);
26167 p->thread.sp = (unsigned long) childregs;
26168 p->thread.usersp = me->thread.usersp;
26169+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
26170 set_tsk_thread_flag(p, TIF_FORK);
26171 p->thread.fpu_counter = 0;
26172 p->thread.io_bitmap_ptr = NULL;
26173@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26174 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26175 savesegment(es, p->thread.es);
26176 savesegment(ds, p->thread.ds);
26177+ savesegment(ss, p->thread.ss);
26178+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26179 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26180
26181 if (unlikely(p->flags & PF_KTHREAD)) {
26182@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26183 struct thread_struct *prev = &prev_p->thread;
26184 struct thread_struct *next = &next_p->thread;
26185 int cpu = smp_processor_id();
26186- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26187+ struct tss_struct *tss = init_tss + cpu;
26188 unsigned fsindex, gsindex;
26189 fpu_switch_t fpu;
26190
26191@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26192 if (unlikely(next->ds | prev->ds))
26193 loadsegment(ds, next->ds);
26194
26195+ savesegment(ss, prev->ss);
26196+ if (unlikely(next->ss != prev->ss))
26197+ loadsegment(ss, next->ss);
26198
26199 /* We must save %fs and %gs before load_TLS() because
26200 * %fs and %gs may be cleared by load_TLS().
26201@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26202 prev->usersp = this_cpu_read(old_rsp);
26203 this_cpu_write(old_rsp, next->usersp);
26204 this_cpu_write(current_task, next_p);
26205+ this_cpu_write(current_tinfo, &next_p->tinfo);
26206
26207 /*
26208 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26209@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26210 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26211 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26212
26213- this_cpu_write(kernel_stack,
26214- (unsigned long)task_stack_page(next_p) +
26215- THREAD_SIZE - KERNEL_STACK_OFFSET);
26216+ this_cpu_write(kernel_stack, next->sp0);
26217
26218 /*
26219 * Now maybe reload the debug registers and handle I/O bitmaps
26220@@ -442,12 +447,11 @@ unsigned long get_wchan(struct task_struct *p)
26221 if (!p || p == current || p->state == TASK_RUNNING)
26222 return 0;
26223 stack = (unsigned long)task_stack_page(p);
26224- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26225+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26226 return 0;
26227 fp = *(u64 *)(p->thread.sp);
26228 do {
26229- if (fp < (unsigned long)stack ||
26230- fp >= (unsigned long)stack+THREAD_SIZE)
26231+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26232 return 0;
26233 ip = *(u64 *)(fp+8);
26234 if (!in_sched_functions(ip))
26235diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26236index 7461f50..1334029 100644
26237--- a/arch/x86/kernel/ptrace.c
26238+++ b/arch/x86/kernel/ptrace.c
26239@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26240 {
26241 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
26242 unsigned long sp = (unsigned long)&regs->sp;
26243- struct thread_info *tinfo;
26244
26245- if (context == (sp & ~(THREAD_SIZE - 1)))
26246+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26247 return sp;
26248
26249- tinfo = (struct thread_info *)context;
26250- if (tinfo->previous_esp)
26251- return tinfo->previous_esp;
26252+ sp = *(unsigned long *)context;
26253+ if (sp)
26254+ return sp;
26255
26256 return (unsigned long)regs;
26257 }
26258@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
26259 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26260 {
26261 int i;
26262- int dr7 = 0;
26263+ unsigned long dr7 = 0;
26264 struct arch_hw_breakpoint *info;
26265
26266 for (i = 0; i < HBP_NUM; i++) {
26267@@ -822,7 +821,7 @@ long arch_ptrace(struct task_struct *child, long request,
26268 unsigned long addr, unsigned long data)
26269 {
26270 int ret;
26271- unsigned long __user *datap = (unsigned long __user *)data;
26272+ unsigned long __user *datap = (__force unsigned long __user *)data;
26273
26274 switch (request) {
26275 /* read the word at location addr in the USER area. */
26276@@ -907,14 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
26277 if ((int) addr < 0)
26278 return -EIO;
26279 ret = do_get_thread_area(child, addr,
26280- (struct user_desc __user *)data);
26281+ (__force struct user_desc __user *) data);
26282 break;
26283
26284 case PTRACE_SET_THREAD_AREA:
26285 if ((int) addr < 0)
26286 return -EIO;
26287 ret = do_set_thread_area(child, addr,
26288- (struct user_desc __user *)data, 0);
26289+ (__force struct user_desc __user *) data, 0);
26290 break;
26291 #endif
26292
26293@@ -1292,7 +1291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26294
26295 #ifdef CONFIG_X86_64
26296
26297-static struct user_regset x86_64_regsets[] __read_mostly = {
26298+static user_regset_no_const x86_64_regsets[] __read_only = {
26299 [REGSET_GENERAL] = {
26300 .core_note_type = NT_PRSTATUS,
26301 .n = sizeof(struct user_regs_struct) / sizeof(long),
26302@@ -1333,7 +1332,7 @@ static const struct user_regset_view user_x86_64_view = {
26303 #endif /* CONFIG_X86_64 */
26304
26305 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26306-static struct user_regset x86_32_regsets[] __read_mostly = {
26307+static user_regset_no_const x86_32_regsets[] __read_only = {
26308 [REGSET_GENERAL] = {
26309 .core_note_type = NT_PRSTATUS,
26310 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26311@@ -1386,7 +1385,7 @@ static const struct user_regset_view user_x86_32_view = {
26312 */
26313 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26314
26315-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26316+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26317 {
26318 #ifdef CONFIG_X86_64
26319 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26320@@ -1421,7 +1420,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26321 memset(info, 0, sizeof(*info));
26322 info->si_signo = SIGTRAP;
26323 info->si_code = si_code;
26324- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26325+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26326 }
26327
26328 void user_single_step_siginfo(struct task_struct *tsk,
26329@@ -1450,6 +1449,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
26330 # define IS_IA32 0
26331 #endif
26332
26333+#ifdef CONFIG_GRKERNSEC_SETXID
26334+extern void gr_delayed_cred_worker(void);
26335+#endif
26336+
26337 /*
26338 * We must return the syscall number to actually look up in the table.
26339 * This can be -1L to skip running any syscall at all.
26340@@ -1460,6 +1463,11 @@ long syscall_trace_enter(struct pt_regs *regs)
26341
26342 user_exit();
26343
26344+#ifdef CONFIG_GRKERNSEC_SETXID
26345+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26346+ gr_delayed_cred_worker();
26347+#endif
26348+
26349 /*
26350 * If we stepped into a sysenter/syscall insn, it trapped in
26351 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26352@@ -1515,6 +1523,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26353 */
26354 user_exit();
26355
26356+#ifdef CONFIG_GRKERNSEC_SETXID
26357+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26358+ gr_delayed_cred_worker();
26359+#endif
26360+
26361 audit_syscall_exit(regs);
26362
26363 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26364diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26365index 2f355d2..e75ed0a 100644
26366--- a/arch/x86/kernel/pvclock.c
26367+++ b/arch/x86/kernel/pvclock.c
26368@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26369 reset_hung_task_detector();
26370 }
26371
26372-static atomic64_t last_value = ATOMIC64_INIT(0);
26373+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26374
26375 void pvclock_resume(void)
26376 {
26377- atomic64_set(&last_value, 0);
26378+ atomic64_set_unchecked(&last_value, 0);
26379 }
26380
26381 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26382@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26383 * updating at the same time, and one of them could be slightly behind,
26384 * making the assumption that last_value always go forward fail to hold.
26385 */
26386- last = atomic64_read(&last_value);
26387+ last = atomic64_read_unchecked(&last_value);
26388 do {
26389 if (ret < last)
26390 return last;
26391- last = atomic64_cmpxchg(&last_value, last, ret);
26392+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26393 } while (unlikely(last != ret));
26394
26395 return ret;
26396diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26397index c752cb4..866c432 100644
26398--- a/arch/x86/kernel/reboot.c
26399+++ b/arch/x86/kernel/reboot.c
26400@@ -68,6 +68,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26401
26402 void __noreturn machine_real_restart(unsigned int type)
26403 {
26404+
26405+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26406+ struct desc_struct *gdt;
26407+#endif
26408+
26409 local_irq_disable();
26410
26411 /*
26412@@ -95,7 +100,29 @@ void __noreturn machine_real_restart(unsigned int type)
26413
26414 /* Jump to the identity-mapped low memory code */
26415 #ifdef CONFIG_X86_32
26416- asm volatile("jmpl *%0" : :
26417+
26418+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26419+ gdt = get_cpu_gdt_table(smp_processor_id());
26420+ pax_open_kernel();
26421+#ifdef CONFIG_PAX_MEMORY_UDEREF
26422+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26423+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26424+ loadsegment(ds, __KERNEL_DS);
26425+ loadsegment(es, __KERNEL_DS);
26426+ loadsegment(ss, __KERNEL_DS);
26427+#endif
26428+#ifdef CONFIG_PAX_KERNEXEC
26429+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26430+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26431+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26432+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26433+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26434+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26435+#endif
26436+ pax_close_kernel();
26437+#endif
26438+
26439+ asm volatile("ljmpl *%0" : :
26440 "rm" (real_mode_header->machine_real_restart_asm),
26441 "a" (type));
26442 #else
26443@@ -470,7 +497,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
26444 * try to force a triple fault and then cycle between hitting the keyboard
26445 * controller and doing that
26446 */
26447-static void native_machine_emergency_restart(void)
26448+static void __noreturn native_machine_emergency_restart(void)
26449 {
26450 int i;
26451 int attempt = 0;
26452@@ -593,13 +620,13 @@ void native_machine_shutdown(void)
26453 #endif
26454 }
26455
26456-static void __machine_emergency_restart(int emergency)
26457+static void __noreturn __machine_emergency_restart(int emergency)
26458 {
26459 reboot_emergency = emergency;
26460 machine_ops.emergency_restart();
26461 }
26462
26463-static void native_machine_restart(char *__unused)
26464+static void __noreturn native_machine_restart(char *__unused)
26465 {
26466 pr_notice("machine restart\n");
26467
26468@@ -608,7 +635,7 @@ static void native_machine_restart(char *__unused)
26469 __machine_emergency_restart(0);
26470 }
26471
26472-static void native_machine_halt(void)
26473+static void __noreturn native_machine_halt(void)
26474 {
26475 /* Stop other cpus and apics */
26476 machine_shutdown();
26477@@ -618,7 +645,7 @@ static void native_machine_halt(void)
26478 stop_this_cpu(NULL);
26479 }
26480
26481-static void native_machine_power_off(void)
26482+static void __noreturn native_machine_power_off(void)
26483 {
26484 if (pm_power_off) {
26485 if (!reboot_force)
26486@@ -627,9 +654,10 @@ static void native_machine_power_off(void)
26487 }
26488 /* A fallback in case there is no PM info available */
26489 tboot_shutdown(TB_SHUTDOWN_HALT);
26490+ unreachable();
26491 }
26492
26493-struct machine_ops machine_ops = {
26494+struct machine_ops machine_ops __read_only = {
26495 .power_off = native_machine_power_off,
26496 .shutdown = native_machine_shutdown,
26497 .emergency_restart = native_machine_emergency_restart,
26498diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
26499index c8e41e9..64049ef 100644
26500--- a/arch/x86/kernel/reboot_fixups_32.c
26501+++ b/arch/x86/kernel/reboot_fixups_32.c
26502@@ -57,7 +57,7 @@ struct device_fixup {
26503 unsigned int vendor;
26504 unsigned int device;
26505 void (*reboot_fixup)(struct pci_dev *);
26506-};
26507+} __do_const;
26508
26509 /*
26510 * PCI ids solely used for fixups_table go here
26511diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
26512index 3fd2c69..16ef367 100644
26513--- a/arch/x86/kernel/relocate_kernel_64.S
26514+++ b/arch/x86/kernel/relocate_kernel_64.S
26515@@ -11,6 +11,7 @@
26516 #include <asm/kexec.h>
26517 #include <asm/processor-flags.h>
26518 #include <asm/pgtable_types.h>
26519+#include <asm/alternative-asm.h>
26520
26521 /*
26522 * Must be relocatable PIC code callable as a C function
26523@@ -96,8 +97,7 @@ relocate_kernel:
26524
26525 /* jump to identity mapped page */
26526 addq $(identity_mapped - relocate_kernel), %r8
26527- pushq %r8
26528- ret
26529+ jmp *%r8
26530
26531 identity_mapped:
26532 /* set return address to 0 if not preserving context */
26533@@ -167,6 +167,7 @@ identity_mapped:
26534 xorl %r14d, %r14d
26535 xorl %r15d, %r15d
26536
26537+ pax_force_retaddr 0, 1
26538 ret
26539
26540 1:
26541diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
26542index cb233bc..23b4879 100644
26543--- a/arch/x86/kernel/setup.c
26544+++ b/arch/x86/kernel/setup.c
26545@@ -110,6 +110,7 @@
26546 #include <asm/mce.h>
26547 #include <asm/alternative.h>
26548 #include <asm/prom.h>
26549+#include <asm/boot.h>
26550
26551 /*
26552 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
26553@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
26554 #endif
26555
26556
26557-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
26558-__visible unsigned long mmu_cr4_features;
26559+#ifdef CONFIG_X86_64
26560+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
26561+#elif defined(CONFIG_X86_PAE)
26562+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
26563 #else
26564-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
26565+__visible unsigned long mmu_cr4_features __read_only;
26566 #endif
26567
26568+void set_in_cr4(unsigned long mask)
26569+{
26570+ unsigned long cr4 = read_cr4();
26571+
26572+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
26573+ return;
26574+
26575+ pax_open_kernel();
26576+ mmu_cr4_features |= mask;
26577+ pax_close_kernel();
26578+
26579+ if (trampoline_cr4_features)
26580+ *trampoline_cr4_features = mmu_cr4_features;
26581+ cr4 |= mask;
26582+ write_cr4(cr4);
26583+}
26584+EXPORT_SYMBOL(set_in_cr4);
26585+
26586+void clear_in_cr4(unsigned long mask)
26587+{
26588+ unsigned long cr4 = read_cr4();
26589+
26590+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
26591+ return;
26592+
26593+ pax_open_kernel();
26594+ mmu_cr4_features &= ~mask;
26595+ pax_close_kernel();
26596+
26597+ if (trampoline_cr4_features)
26598+ *trampoline_cr4_features = mmu_cr4_features;
26599+ cr4 &= ~mask;
26600+ write_cr4(cr4);
26601+}
26602+EXPORT_SYMBOL(clear_in_cr4);
26603+
26604 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
26605 int bootloader_type, bootloader_version;
26606
26607@@ -768,7 +807,7 @@ static void __init trim_bios_range(void)
26608 * area (640->1Mb) as ram even though it is not.
26609 * take them out.
26610 */
26611- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
26612+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
26613
26614 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
26615 }
26616@@ -776,7 +815,7 @@ static void __init trim_bios_range(void)
26617 /* called before trim_bios_range() to spare extra sanitize */
26618 static void __init e820_add_kernel_range(void)
26619 {
26620- u64 start = __pa_symbol(_text);
26621+ u64 start = __pa_symbol(ktla_ktva(_text));
26622 u64 size = __pa_symbol(_end) - start;
26623
26624 /*
26625@@ -838,8 +877,12 @@ static void __init trim_low_memory_range(void)
26626
26627 void __init setup_arch(char **cmdline_p)
26628 {
26629+#ifdef CONFIG_X86_32
26630+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
26631+#else
26632 memblock_reserve(__pa_symbol(_text),
26633 (unsigned long)__bss_stop - (unsigned long)_text);
26634+#endif
26635
26636 early_reserve_initrd();
26637
26638@@ -931,14 +974,14 @@ void __init setup_arch(char **cmdline_p)
26639
26640 if (!boot_params.hdr.root_flags)
26641 root_mountflags &= ~MS_RDONLY;
26642- init_mm.start_code = (unsigned long) _text;
26643- init_mm.end_code = (unsigned long) _etext;
26644+ init_mm.start_code = ktla_ktva((unsigned long) _text);
26645+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
26646 init_mm.end_data = (unsigned long) _edata;
26647 init_mm.brk = _brk_end;
26648
26649- code_resource.start = __pa_symbol(_text);
26650- code_resource.end = __pa_symbol(_etext)-1;
26651- data_resource.start = __pa_symbol(_etext);
26652+ code_resource.start = __pa_symbol(ktla_ktva(_text));
26653+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
26654+ data_resource.start = __pa_symbol(_sdata);
26655 data_resource.end = __pa_symbol(_edata)-1;
26656 bss_resource.start = __pa_symbol(__bss_start);
26657 bss_resource.end = __pa_symbol(__bss_stop)-1;
26658diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
26659index 5cdff03..80fa283 100644
26660--- a/arch/x86/kernel/setup_percpu.c
26661+++ b/arch/x86/kernel/setup_percpu.c
26662@@ -21,19 +21,17 @@
26663 #include <asm/cpu.h>
26664 #include <asm/stackprotector.h>
26665
26666-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
26667+#ifdef CONFIG_SMP
26668+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
26669 EXPORT_PER_CPU_SYMBOL(cpu_number);
26670+#endif
26671
26672-#ifdef CONFIG_X86_64
26673 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
26674-#else
26675-#define BOOT_PERCPU_OFFSET 0
26676-#endif
26677
26678 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
26679 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
26680
26681-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
26682+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
26683 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
26684 };
26685 EXPORT_SYMBOL(__per_cpu_offset);
26686@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
26687 {
26688 #ifdef CONFIG_NEED_MULTIPLE_NODES
26689 pg_data_t *last = NULL;
26690- unsigned int cpu;
26691+ int cpu;
26692
26693 for_each_possible_cpu(cpu) {
26694 int node = early_cpu_to_node(cpu);
26695@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
26696 {
26697 #ifdef CONFIG_X86_32
26698 struct desc_struct gdt;
26699+ unsigned long base = per_cpu_offset(cpu);
26700
26701- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
26702- 0x2 | DESCTYPE_S, 0x8);
26703- gdt.s = 1;
26704+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
26705+ 0x83 | DESCTYPE_S, 0xC);
26706 write_gdt_entry(get_cpu_gdt_table(cpu),
26707 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
26708 #endif
26709@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
26710 /* alrighty, percpu areas up and running */
26711 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
26712 for_each_possible_cpu(cpu) {
26713+#ifdef CONFIG_CC_STACKPROTECTOR
26714+#ifdef CONFIG_X86_32
26715+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
26716+#endif
26717+#endif
26718 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
26719 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
26720 per_cpu(cpu_number, cpu) = cpu;
26721@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
26722 */
26723 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
26724 #endif
26725+#ifdef CONFIG_CC_STACKPROTECTOR
26726+#ifdef CONFIG_X86_32
26727+ if (!cpu)
26728+ per_cpu(stack_canary.canary, cpu) = canary;
26729+#endif
26730+#endif
26731 /*
26732 * Up to this point, the boot CPU has been using .init.data
26733 * area. Reload any changed state for the boot CPU.
26734diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
26735index 9e5de68..16c53cb 100644
26736--- a/arch/x86/kernel/signal.c
26737+++ b/arch/x86/kernel/signal.c
26738@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
26739 * Align the stack pointer according to the i386 ABI,
26740 * i.e. so that on function entry ((sp + 4) & 15) == 0.
26741 */
26742- sp = ((sp + 4) & -16ul) - 4;
26743+ sp = ((sp - 12) & -16ul) - 4;
26744 #else /* !CONFIG_X86_32 */
26745 sp = round_down(sp, 16) - 8;
26746 #endif
26747@@ -298,9 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
26748 }
26749
26750 if (current->mm->context.vdso)
26751- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
26752+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
26753 else
26754- restorer = &frame->retcode;
26755+ restorer = (void __user *)&frame->retcode;
26756 if (ksig->ka.sa.sa_flags & SA_RESTORER)
26757 restorer = ksig->ka.sa.sa_restorer;
26758
26759@@ -314,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
26760 * reasons and because gdb uses it as a signature to notice
26761 * signal handler stack frames.
26762 */
26763- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
26764+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
26765
26766 if (err)
26767 return -EFAULT;
26768@@ -361,7 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
26769 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
26770
26771 /* Set up to return from userspace. */
26772- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
26773+ if (current->mm->context.vdso)
26774+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
26775+ else
26776+ restorer = (void __user *)&frame->retcode;
26777 if (ksig->ka.sa.sa_flags & SA_RESTORER)
26778 restorer = ksig->ka.sa.sa_restorer;
26779 put_user_ex(restorer, &frame->pretcode);
26780@@ -373,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
26781 * reasons and because gdb uses it as a signature to notice
26782 * signal handler stack frames.
26783 */
26784- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
26785+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
26786 } put_user_catch(err);
26787
26788 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
26789@@ -609,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
26790 {
26791 int usig = signr_convert(ksig->sig);
26792 sigset_t *set = sigmask_to_save();
26793- compat_sigset_t *cset = (compat_sigset_t *) set;
26794+ sigset_t sigcopy;
26795+ compat_sigset_t *cset;
26796+
26797+ sigcopy = *set;
26798+
26799+ cset = (compat_sigset_t *) &sigcopy;
26800
26801 /* Set up the stack frame */
26802 if (is_ia32_frame()) {
26803@@ -620,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
26804 } else if (is_x32_frame()) {
26805 return x32_setup_rt_frame(ksig, cset, regs);
26806 } else {
26807- return __setup_rt_frame(ksig->sig, ksig, set, regs);
26808+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
26809 }
26810 }
26811
26812diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
26813index 7c3a5a6..f0a8961 100644
26814--- a/arch/x86/kernel/smp.c
26815+++ b/arch/x86/kernel/smp.c
26816@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
26817
26818 __setup("nonmi_ipi", nonmi_ipi_setup);
26819
26820-struct smp_ops smp_ops = {
26821+struct smp_ops smp_ops __read_only = {
26822 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
26823 .smp_prepare_cpus = native_smp_prepare_cpus,
26824 .smp_cpus_done = native_smp_cpus_done,
26825diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
26826index 85dc05a..1241266 100644
26827--- a/arch/x86/kernel/smpboot.c
26828+++ b/arch/x86/kernel/smpboot.c
26829@@ -229,14 +229,18 @@ static void notrace start_secondary(void *unused)
26830
26831 enable_start_cpu0 = 0;
26832
26833-#ifdef CONFIG_X86_32
26834- /* switch away from the initial page table */
26835- load_cr3(swapper_pg_dir);
26836- __flush_tlb_all();
26837-#endif
26838-
26839 /* otherwise gcc will move up smp_processor_id before the cpu_init */
26840 barrier();
26841+
26842+ /* switch away from the initial page table */
26843+#ifdef CONFIG_PAX_PER_CPU_PGD
26844+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
26845+ __flush_tlb_all();
26846+#elif defined(CONFIG_X86_32)
26847+ load_cr3(swapper_pg_dir);
26848+ __flush_tlb_all();
26849+#endif
26850+
26851 /*
26852 * Check TSC synchronization with the BP:
26853 */
26854@@ -751,6 +755,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
26855 idle->thread.sp = (unsigned long) (((struct pt_regs *)
26856 (THREAD_SIZE + task_stack_page(idle))) - 1);
26857 per_cpu(current_task, cpu) = idle;
26858+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
26859
26860 #ifdef CONFIG_X86_32
26861 /* Stack for startup_32 can be just as for start_secondary onwards */
26862@@ -758,11 +763,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
26863 #else
26864 clear_tsk_thread_flag(idle, TIF_FORK);
26865 initial_gs = per_cpu_offset(cpu);
26866- per_cpu(kernel_stack, cpu) =
26867- (unsigned long)task_stack_page(idle) -
26868- KERNEL_STACK_OFFSET + THREAD_SIZE;
26869+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26870 #endif
26871+
26872+ pax_open_kernel();
26873 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
26874+ pax_close_kernel();
26875+
26876 initial_code = (unsigned long)start_secondary;
26877 stack_start = idle->thread.sp;
26878
26879@@ -911,6 +918,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
26880 /* the FPU context is blank, nobody can own it */
26881 __cpu_disable_lazy_restore(cpu);
26882
26883+#ifdef CONFIG_PAX_PER_CPU_PGD
26884+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
26885+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26886+ KERNEL_PGD_PTRS);
26887+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
26888+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26889+ KERNEL_PGD_PTRS);
26890+#endif
26891+
26892 err = do_boot_cpu(apicid, cpu, tidle);
26893 if (err) {
26894 pr_debug("do_boot_cpu failed %d\n", err);
26895diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
26896index 9b4d51d..5d28b58 100644
26897--- a/arch/x86/kernel/step.c
26898+++ b/arch/x86/kernel/step.c
26899@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
26900 struct desc_struct *desc;
26901 unsigned long base;
26902
26903- seg &= ~7UL;
26904+ seg >>= 3;
26905
26906 mutex_lock(&child->mm->context.lock);
26907- if (unlikely((seg >> 3) >= child->mm->context.size))
26908+ if (unlikely(seg >= child->mm->context.size))
26909 addr = -1L; /* bogus selector, access would fault */
26910 else {
26911 desc = child->mm->context.ldt + seg;
26912@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
26913 addr += base;
26914 }
26915 mutex_unlock(&child->mm->context.lock);
26916- }
26917+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
26918+ addr = ktla_ktva(addr);
26919
26920 return addr;
26921 }
26922@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
26923 unsigned char opcode[15];
26924 unsigned long addr = convert_ip_to_linear(child, regs);
26925
26926+ if (addr == -EINVAL)
26927+ return 0;
26928+
26929 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
26930 for (i = 0; i < copied; i++) {
26931 switch (opcode[i]) {
26932diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
26933new file mode 100644
26934index 0000000..5877189
26935--- /dev/null
26936+++ b/arch/x86/kernel/sys_i386_32.c
26937@@ -0,0 +1,189 @@
26938+/*
26939+ * This file contains various random system calls that
26940+ * have a non-standard calling sequence on the Linux/i386
26941+ * platform.
26942+ */
26943+
26944+#include <linux/errno.h>
26945+#include <linux/sched.h>
26946+#include <linux/mm.h>
26947+#include <linux/fs.h>
26948+#include <linux/smp.h>
26949+#include <linux/sem.h>
26950+#include <linux/msg.h>
26951+#include <linux/shm.h>
26952+#include <linux/stat.h>
26953+#include <linux/syscalls.h>
26954+#include <linux/mman.h>
26955+#include <linux/file.h>
26956+#include <linux/utsname.h>
26957+#include <linux/ipc.h>
26958+#include <linux/elf.h>
26959+
26960+#include <linux/uaccess.h>
26961+#include <linux/unistd.h>
26962+
26963+#include <asm/syscalls.h>
26964+
26965+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
26966+{
26967+ unsigned long pax_task_size = TASK_SIZE;
26968+
26969+#ifdef CONFIG_PAX_SEGMEXEC
26970+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
26971+ pax_task_size = SEGMEXEC_TASK_SIZE;
26972+#endif
26973+
26974+ if (flags & MAP_FIXED)
26975+ if (len > pax_task_size || addr > pax_task_size - len)
26976+ return -EINVAL;
26977+
26978+ return 0;
26979+}
26980+
26981+/*
26982+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
26983+ */
26984+static unsigned long get_align_mask(void)
26985+{
26986+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
26987+ return 0;
26988+
26989+ if (!(current->flags & PF_RANDOMIZE))
26990+ return 0;
26991+
26992+ return va_align.mask;
26993+}
26994+
26995+unsigned long
26996+arch_get_unmapped_area(struct file *filp, unsigned long addr,
26997+ unsigned long len, unsigned long pgoff, unsigned long flags)
26998+{
26999+ struct mm_struct *mm = current->mm;
27000+ struct vm_area_struct *vma;
27001+ unsigned long pax_task_size = TASK_SIZE;
27002+ struct vm_unmapped_area_info info;
27003+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27004+
27005+#ifdef CONFIG_PAX_SEGMEXEC
27006+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27007+ pax_task_size = SEGMEXEC_TASK_SIZE;
27008+#endif
27009+
27010+ pax_task_size -= PAGE_SIZE;
27011+
27012+ if (len > pax_task_size)
27013+ return -ENOMEM;
27014+
27015+ if (flags & MAP_FIXED)
27016+ return addr;
27017+
27018+#ifdef CONFIG_PAX_RANDMMAP
27019+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27020+#endif
27021+
27022+ if (addr) {
27023+ addr = PAGE_ALIGN(addr);
27024+ if (pax_task_size - len >= addr) {
27025+ vma = find_vma(mm, addr);
27026+ if (check_heap_stack_gap(vma, addr, len, offset))
27027+ return addr;
27028+ }
27029+ }
27030+
27031+ info.flags = 0;
27032+ info.length = len;
27033+ info.align_mask = filp ? get_align_mask() : 0;
27034+ info.align_offset = pgoff << PAGE_SHIFT;
27035+ info.threadstack_offset = offset;
27036+
27037+#ifdef CONFIG_PAX_PAGEEXEC
27038+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27039+ info.low_limit = 0x00110000UL;
27040+ info.high_limit = mm->start_code;
27041+
27042+#ifdef CONFIG_PAX_RANDMMAP
27043+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27044+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27045+#endif
27046+
27047+ if (info.low_limit < info.high_limit) {
27048+ addr = vm_unmapped_area(&info);
27049+ if (!IS_ERR_VALUE(addr))
27050+ return addr;
27051+ }
27052+ } else
27053+#endif
27054+
27055+ info.low_limit = mm->mmap_base;
27056+ info.high_limit = pax_task_size;
27057+
27058+ return vm_unmapped_area(&info);
27059+}
27060+
27061+unsigned long
27062+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27063+ const unsigned long len, const unsigned long pgoff,
27064+ const unsigned long flags)
27065+{
27066+ struct vm_area_struct *vma;
27067+ struct mm_struct *mm = current->mm;
27068+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27069+ struct vm_unmapped_area_info info;
27070+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27071+
27072+#ifdef CONFIG_PAX_SEGMEXEC
27073+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27074+ pax_task_size = SEGMEXEC_TASK_SIZE;
27075+#endif
27076+
27077+ pax_task_size -= PAGE_SIZE;
27078+
27079+ /* requested length too big for entire address space */
27080+ if (len > pax_task_size)
27081+ return -ENOMEM;
27082+
27083+ if (flags & MAP_FIXED)
27084+ return addr;
27085+
27086+#ifdef CONFIG_PAX_PAGEEXEC
27087+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27088+ goto bottomup;
27089+#endif
27090+
27091+#ifdef CONFIG_PAX_RANDMMAP
27092+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27093+#endif
27094+
27095+ /* requesting a specific address */
27096+ if (addr) {
27097+ addr = PAGE_ALIGN(addr);
27098+ if (pax_task_size - len >= addr) {
27099+ vma = find_vma(mm, addr);
27100+ if (check_heap_stack_gap(vma, addr, len, offset))
27101+ return addr;
27102+ }
27103+ }
27104+
27105+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27106+ info.length = len;
27107+ info.low_limit = PAGE_SIZE;
27108+ info.high_limit = mm->mmap_base;
27109+ info.align_mask = filp ? get_align_mask() : 0;
27110+ info.align_offset = pgoff << PAGE_SHIFT;
27111+ info.threadstack_offset = offset;
27112+
27113+ addr = vm_unmapped_area(&info);
27114+ if (!(addr & ~PAGE_MASK))
27115+ return addr;
27116+ VM_BUG_ON(addr != -ENOMEM);
27117+
27118+bottomup:
27119+ /*
27120+ * A failed mmap() very likely causes application failure,
27121+ * so fall back to the bottom-up function here. This scenario
27122+ * can happen with large stack limits and large mmap()
27123+ * allocations.
27124+ */
27125+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27126+}
27127diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27128index 30277e2..5664a29 100644
27129--- a/arch/x86/kernel/sys_x86_64.c
27130+++ b/arch/x86/kernel/sys_x86_64.c
27131@@ -81,8 +81,8 @@ out:
27132 return error;
27133 }
27134
27135-static void find_start_end(unsigned long flags, unsigned long *begin,
27136- unsigned long *end)
27137+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27138+ unsigned long *begin, unsigned long *end)
27139 {
27140 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27141 unsigned long new_begin;
27142@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27143 *begin = new_begin;
27144 }
27145 } else {
27146- *begin = current->mm->mmap_legacy_base;
27147+ *begin = mm->mmap_legacy_base;
27148 *end = TASK_SIZE;
27149 }
27150 }
27151@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27152 struct vm_area_struct *vma;
27153 struct vm_unmapped_area_info info;
27154 unsigned long begin, end;
27155+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27156
27157 if (flags & MAP_FIXED)
27158 return addr;
27159
27160- find_start_end(flags, &begin, &end);
27161+ find_start_end(mm, flags, &begin, &end);
27162
27163 if (len > end)
27164 return -ENOMEM;
27165
27166+#ifdef CONFIG_PAX_RANDMMAP
27167+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27168+#endif
27169+
27170 if (addr) {
27171 addr = PAGE_ALIGN(addr);
27172 vma = find_vma(mm, addr);
27173- if (end - len >= addr &&
27174- (!vma || addr + len <= vma->vm_start))
27175+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27176 return addr;
27177 }
27178
27179@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27180 info.high_limit = end;
27181 info.align_mask = filp ? get_align_mask() : 0;
27182 info.align_offset = pgoff << PAGE_SHIFT;
27183+ info.threadstack_offset = offset;
27184 return vm_unmapped_area(&info);
27185 }
27186
27187@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27188 struct mm_struct *mm = current->mm;
27189 unsigned long addr = addr0;
27190 struct vm_unmapped_area_info info;
27191+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27192
27193 /* requested length too big for entire address space */
27194 if (len > TASK_SIZE)
27195@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27196 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27197 goto bottomup;
27198
27199+#ifdef CONFIG_PAX_RANDMMAP
27200+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27201+#endif
27202+
27203 /* requesting a specific address */
27204 if (addr) {
27205 addr = PAGE_ALIGN(addr);
27206 vma = find_vma(mm, addr);
27207- if (TASK_SIZE - len >= addr &&
27208- (!vma || addr + len <= vma->vm_start))
27209+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27210 return addr;
27211 }
27212
27213@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27214 info.high_limit = mm->mmap_base;
27215 info.align_mask = filp ? get_align_mask() : 0;
27216 info.align_offset = pgoff << PAGE_SHIFT;
27217+ info.threadstack_offset = offset;
27218 addr = vm_unmapped_area(&info);
27219 if (!(addr & ~PAGE_MASK))
27220 return addr;
27221diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27222index 91a4496..bb87552 100644
27223--- a/arch/x86/kernel/tboot.c
27224+++ b/arch/x86/kernel/tboot.c
27225@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27226
27227 void tboot_shutdown(u32 shutdown_type)
27228 {
27229- void (*shutdown)(void);
27230+ void (* __noreturn shutdown)(void);
27231
27232 if (!tboot_enabled())
27233 return;
27234@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27235
27236 switch_to_tboot_pt();
27237
27238- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27239+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27240 shutdown();
27241
27242 /* should not reach here */
27243@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27244 return -ENODEV;
27245 }
27246
27247-static atomic_t ap_wfs_count;
27248+static atomic_unchecked_t ap_wfs_count;
27249
27250 static int tboot_wait_for_aps(int num_aps)
27251 {
27252@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27253 {
27254 switch (action) {
27255 case CPU_DYING:
27256- atomic_inc(&ap_wfs_count);
27257+ atomic_inc_unchecked(&ap_wfs_count);
27258 if (num_online_cpus() == 1)
27259- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27260+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27261 return NOTIFY_BAD;
27262 break;
27263 }
27264@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27265
27266 tboot_create_trampoline();
27267
27268- atomic_set(&ap_wfs_count, 0);
27269+ atomic_set_unchecked(&ap_wfs_count, 0);
27270 register_hotcpu_notifier(&tboot_cpu_notifier);
27271
27272 #ifdef CONFIG_DEBUG_FS
27273diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27274index 24d3c91..d06b473 100644
27275--- a/arch/x86/kernel/time.c
27276+++ b/arch/x86/kernel/time.c
27277@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27278 {
27279 unsigned long pc = instruction_pointer(regs);
27280
27281- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27282+ if (!user_mode(regs) && in_lock_functions(pc)) {
27283 #ifdef CONFIG_FRAME_POINTER
27284- return *(unsigned long *)(regs->bp + sizeof(long));
27285+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27286 #else
27287 unsigned long *sp =
27288 (unsigned long *)kernel_stack_pointer(regs);
27289@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27290 * or above a saved flags. Eflags has bits 22-31 zero,
27291 * kernel addresses don't.
27292 */
27293+
27294+#ifdef CONFIG_PAX_KERNEXEC
27295+ return ktla_ktva(sp[0]);
27296+#else
27297 if (sp[0] >> 22)
27298 return sp[0];
27299 if (sp[1] >> 22)
27300 return sp[1];
27301 #endif
27302+
27303+#endif
27304 }
27305 return pc;
27306 }
27307diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27308index f7fec09..9991981 100644
27309--- a/arch/x86/kernel/tls.c
27310+++ b/arch/x86/kernel/tls.c
27311@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27312 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27313 return -EINVAL;
27314
27315+#ifdef CONFIG_PAX_SEGMEXEC
27316+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27317+ return -EINVAL;
27318+#endif
27319+
27320 set_tls_desc(p, idx, &info, 1);
27321
27322 return 0;
27323@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27324
27325 if (kbuf)
27326 info = kbuf;
27327- else if (__copy_from_user(infobuf, ubuf, count))
27328+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27329 return -EFAULT;
27330 else
27331 info = infobuf;
27332diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27333index 1c113db..287b42e 100644
27334--- a/arch/x86/kernel/tracepoint.c
27335+++ b/arch/x86/kernel/tracepoint.c
27336@@ -9,11 +9,11 @@
27337 #include <linux/atomic.h>
27338
27339 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27340-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27341+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27342 (unsigned long) trace_idt_table };
27343
27344 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27345-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27346+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27347
27348 static int trace_irq_vector_refcount;
27349 static DEFINE_MUTEX(irq_vector_mutex);
27350diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27351index b857ed8..51ae4cb 100644
27352--- a/arch/x86/kernel/traps.c
27353+++ b/arch/x86/kernel/traps.c
27354@@ -66,7 +66,7 @@
27355 #include <asm/proto.h>
27356
27357 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27358-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27359+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27360 #else
27361 #include <asm/processor-flags.h>
27362 #include <asm/setup.h>
27363@@ -75,7 +75,7 @@ asmlinkage int system_call(void);
27364 #endif
27365
27366 /* Must be page-aligned because the real IDT is used in a fixmap. */
27367-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27368+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27369
27370 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27371 EXPORT_SYMBOL_GPL(used_vectors);
27372@@ -107,11 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
27373 }
27374
27375 static int __kprobes
27376-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27377+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27378 struct pt_regs *regs, long error_code)
27379 {
27380 #ifdef CONFIG_X86_32
27381- if (regs->flags & X86_VM_MASK) {
27382+ if (v8086_mode(regs)) {
27383 /*
27384 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27385 * On nmi (interrupt 2), do_trap should not be called.
27386@@ -124,12 +124,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27387 return -1;
27388 }
27389 #endif
27390- if (!user_mode(regs)) {
27391+ if (!user_mode_novm(regs)) {
27392 if (!fixup_exception(regs)) {
27393 tsk->thread.error_code = error_code;
27394 tsk->thread.trap_nr = trapnr;
27395+
27396+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27397+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27398+ str = "PAX: suspicious stack segment fault";
27399+#endif
27400+
27401 die(str, regs, error_code);
27402 }
27403+
27404+#ifdef CONFIG_PAX_REFCOUNT
27405+ if (trapnr == 4)
27406+ pax_report_refcount_overflow(regs);
27407+#endif
27408+
27409 return 0;
27410 }
27411
27412@@ -137,7 +149,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27413 }
27414
27415 static void __kprobes
27416-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27417+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27418 long error_code, siginfo_t *info)
27419 {
27420 struct task_struct *tsk = current;
27421@@ -161,7 +173,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27422 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27423 printk_ratelimit()) {
27424 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27425- tsk->comm, tsk->pid, str,
27426+ tsk->comm, task_pid_nr(tsk), str,
27427 regs->ip, regs->sp, error_code);
27428 print_vma_addr(" in ", regs->ip);
27429 pr_cont("\n");
27430@@ -277,7 +289,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
27431 conditional_sti(regs);
27432
27433 #ifdef CONFIG_X86_32
27434- if (regs->flags & X86_VM_MASK) {
27435+ if (v8086_mode(regs)) {
27436 local_irq_enable();
27437 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
27438 goto exit;
27439@@ -285,18 +297,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
27440 #endif
27441
27442 tsk = current;
27443- if (!user_mode(regs)) {
27444+ if (!user_mode_novm(regs)) {
27445 if (fixup_exception(regs))
27446 goto exit;
27447
27448 tsk->thread.error_code = error_code;
27449 tsk->thread.trap_nr = X86_TRAP_GP;
27450 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
27451- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
27452+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
27453+
27454+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27455+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
27456+ die("PAX: suspicious general protection fault", regs, error_code);
27457+ else
27458+#endif
27459+
27460 die("general protection fault", regs, error_code);
27461+ }
27462 goto exit;
27463 }
27464
27465+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27466+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
27467+ struct mm_struct *mm = tsk->mm;
27468+ unsigned long limit;
27469+
27470+ down_write(&mm->mmap_sem);
27471+ limit = mm->context.user_cs_limit;
27472+ if (limit < TASK_SIZE) {
27473+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
27474+ up_write(&mm->mmap_sem);
27475+ return;
27476+ }
27477+ up_write(&mm->mmap_sem);
27478+ }
27479+#endif
27480+
27481 tsk->thread.error_code = error_code;
27482 tsk->thread.trap_nr = X86_TRAP_GP;
27483
27484@@ -457,7 +493,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
27485 /* It's safe to allow irq's after DR6 has been saved */
27486 preempt_conditional_sti(regs);
27487
27488- if (regs->flags & X86_VM_MASK) {
27489+ if (v8086_mode(regs)) {
27490 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
27491 X86_TRAP_DB);
27492 preempt_conditional_cli(regs);
27493@@ -472,7 +508,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
27494 * We already checked v86 mode above, so we can check for kernel mode
27495 * by just checking the CPL of CS.
27496 */
27497- if ((dr6 & DR_STEP) && !user_mode(regs)) {
27498+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
27499 tsk->thread.debugreg6 &= ~DR_STEP;
27500 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
27501 regs->flags &= ~X86_EFLAGS_TF;
27502@@ -504,7 +540,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
27503 return;
27504 conditional_sti(regs);
27505
27506- if (!user_mode_vm(regs))
27507+ if (!user_mode(regs))
27508 {
27509 if (!fixup_exception(regs)) {
27510 task->thread.error_code = error_code;
27511diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
27512index 2ed8459..7cf329f 100644
27513--- a/arch/x86/kernel/uprobes.c
27514+++ b/arch/x86/kernel/uprobes.c
27515@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
27516 int ret = NOTIFY_DONE;
27517
27518 /* We are only interested in userspace traps */
27519- if (regs && !user_mode_vm(regs))
27520+ if (regs && !user_mode(regs))
27521 return NOTIFY_DONE;
27522
27523 switch (val) {
27524@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
27525
27526 if (ncopied != rasize) {
27527 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
27528- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
27529+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
27530
27531 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
27532 }
27533diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
27534index b9242ba..50c5edd 100644
27535--- a/arch/x86/kernel/verify_cpu.S
27536+++ b/arch/x86/kernel/verify_cpu.S
27537@@ -20,6 +20,7 @@
27538 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
27539 * arch/x86/kernel/trampoline_64.S: secondary processor verification
27540 * arch/x86/kernel/head_32.S: processor startup
27541+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
27542 *
27543 * verify_cpu, returns the status of longmode and SSE in register %eax.
27544 * 0: Success 1: Failure
27545diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
27546index e8edcf5..27f9344 100644
27547--- a/arch/x86/kernel/vm86_32.c
27548+++ b/arch/x86/kernel/vm86_32.c
27549@@ -44,6 +44,7 @@
27550 #include <linux/ptrace.h>
27551 #include <linux/audit.h>
27552 #include <linux/stddef.h>
27553+#include <linux/grsecurity.h>
27554
27555 #include <asm/uaccess.h>
27556 #include <asm/io.h>
27557@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
27558 do_exit(SIGSEGV);
27559 }
27560
27561- tss = &per_cpu(init_tss, get_cpu());
27562+ tss = init_tss + get_cpu();
27563 current->thread.sp0 = current->thread.saved_sp0;
27564 current->thread.sysenter_cs = __KERNEL_CS;
27565 load_sp0(tss, &current->thread);
27566@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
27567
27568 if (tsk->thread.saved_sp0)
27569 return -EPERM;
27570+
27571+#ifdef CONFIG_GRKERNSEC_VM86
27572+ if (!capable(CAP_SYS_RAWIO)) {
27573+ gr_handle_vm86();
27574+ return -EPERM;
27575+ }
27576+#endif
27577+
27578 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
27579 offsetof(struct kernel_vm86_struct, vm86plus) -
27580 sizeof(info.regs));
27581@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
27582 int tmp;
27583 struct vm86plus_struct __user *v86;
27584
27585+#ifdef CONFIG_GRKERNSEC_VM86
27586+ if (!capable(CAP_SYS_RAWIO)) {
27587+ gr_handle_vm86();
27588+ return -EPERM;
27589+ }
27590+#endif
27591+
27592 tsk = current;
27593 switch (cmd) {
27594 case VM86_REQUEST_IRQ:
27595@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
27596 tsk->thread.saved_fs = info->regs32->fs;
27597 tsk->thread.saved_gs = get_user_gs(info->regs32);
27598
27599- tss = &per_cpu(init_tss, get_cpu());
27600+ tss = init_tss + get_cpu();
27601 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
27602 if (cpu_has_sep)
27603 tsk->thread.sysenter_cs = 0;
27604@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
27605 goto cannot_handle;
27606 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
27607 goto cannot_handle;
27608- intr_ptr = (unsigned long __user *) (i << 2);
27609+ intr_ptr = (__force unsigned long __user *) (i << 2);
27610 if (get_user(segoffs, intr_ptr))
27611 goto cannot_handle;
27612 if ((segoffs >> 16) == BIOSSEG)
27613diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
27614index da6b35a..977e9cf 100644
27615--- a/arch/x86/kernel/vmlinux.lds.S
27616+++ b/arch/x86/kernel/vmlinux.lds.S
27617@@ -26,6 +26,13 @@
27618 #include <asm/page_types.h>
27619 #include <asm/cache.h>
27620 #include <asm/boot.h>
27621+#include <asm/segment.h>
27622+
27623+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27624+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
27625+#else
27626+#define __KERNEL_TEXT_OFFSET 0
27627+#endif
27628
27629 #undef i386 /* in case the preprocessor is a 32bit one */
27630
27631@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
27632
27633 PHDRS {
27634 text PT_LOAD FLAGS(5); /* R_E */
27635+#ifdef CONFIG_X86_32
27636+ module PT_LOAD FLAGS(5); /* R_E */
27637+#endif
27638+#ifdef CONFIG_XEN
27639+ rodata PT_LOAD FLAGS(5); /* R_E */
27640+#else
27641+ rodata PT_LOAD FLAGS(4); /* R__ */
27642+#endif
27643 data PT_LOAD FLAGS(6); /* RW_ */
27644-#ifdef CONFIG_X86_64
27645+ init.begin PT_LOAD FLAGS(6); /* RW_ */
27646 #ifdef CONFIG_SMP
27647 percpu PT_LOAD FLAGS(6); /* RW_ */
27648 #endif
27649+ text.init PT_LOAD FLAGS(5); /* R_E */
27650+ text.exit PT_LOAD FLAGS(5); /* R_E */
27651 init PT_LOAD FLAGS(7); /* RWE */
27652-#endif
27653 note PT_NOTE FLAGS(0); /* ___ */
27654 }
27655
27656 SECTIONS
27657 {
27658 #ifdef CONFIG_X86_32
27659- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
27660- phys_startup_32 = startup_32 - LOAD_OFFSET;
27661+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
27662 #else
27663- . = __START_KERNEL;
27664- phys_startup_64 = startup_64 - LOAD_OFFSET;
27665+ . = __START_KERNEL;
27666 #endif
27667
27668 /* Text and read-only data */
27669- .text : AT(ADDR(.text) - LOAD_OFFSET) {
27670- _text = .;
27671+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
27672 /* bootstrapping code */
27673+#ifdef CONFIG_X86_32
27674+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
27675+#else
27676+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
27677+#endif
27678+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
27679+ _text = .;
27680 HEAD_TEXT
27681 . = ALIGN(8);
27682 _stext = .;
27683@@ -104,13 +124,47 @@ SECTIONS
27684 IRQENTRY_TEXT
27685 *(.fixup)
27686 *(.gnu.warning)
27687- /* End of text section */
27688- _etext = .;
27689 } :text = 0x9090
27690
27691- NOTES :text :note
27692+ . += __KERNEL_TEXT_OFFSET;
27693
27694- EXCEPTION_TABLE(16) :text = 0x9090
27695+#ifdef CONFIG_X86_32
27696+ . = ALIGN(PAGE_SIZE);
27697+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
27698+
27699+#ifdef CONFIG_PAX_KERNEXEC
27700+ MODULES_EXEC_VADDR = .;
27701+ BYTE(0)
27702+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
27703+ . = ALIGN(HPAGE_SIZE) - 1;
27704+ MODULES_EXEC_END = .;
27705+#endif
27706+
27707+ } :module
27708+#endif
27709+
27710+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
27711+ /* End of text section */
27712+ BYTE(0)
27713+ _etext = . - __KERNEL_TEXT_OFFSET;
27714+ }
27715+
27716+#ifdef CONFIG_X86_32
27717+ . = ALIGN(PAGE_SIZE);
27718+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
27719+ . = ALIGN(PAGE_SIZE);
27720+ *(.empty_zero_page)
27721+ *(.initial_pg_fixmap)
27722+ *(.initial_pg_pmd)
27723+ *(.initial_page_table)
27724+ *(.swapper_pg_dir)
27725+ } :rodata
27726+#endif
27727+
27728+ . = ALIGN(PAGE_SIZE);
27729+ NOTES :rodata :note
27730+
27731+ EXCEPTION_TABLE(16) :rodata
27732
27733 #if defined(CONFIG_DEBUG_RODATA)
27734 /* .text should occupy whole number of pages */
27735@@ -122,16 +176,20 @@ SECTIONS
27736
27737 /* Data */
27738 .data : AT(ADDR(.data) - LOAD_OFFSET) {
27739+
27740+#ifdef CONFIG_PAX_KERNEXEC
27741+ . = ALIGN(HPAGE_SIZE);
27742+#else
27743+ . = ALIGN(PAGE_SIZE);
27744+#endif
27745+
27746 /* Start of data section */
27747 _sdata = .;
27748
27749 /* init_task */
27750 INIT_TASK_DATA(THREAD_SIZE)
27751
27752-#ifdef CONFIG_X86_32
27753- /* 32 bit has nosave before _edata */
27754 NOSAVE_DATA
27755-#endif
27756
27757 PAGE_ALIGNED_DATA(PAGE_SIZE)
27758
27759@@ -172,12 +230,19 @@ SECTIONS
27760 #endif /* CONFIG_X86_64 */
27761
27762 /* Init code and data - will be freed after init */
27763- . = ALIGN(PAGE_SIZE);
27764 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
27765+ BYTE(0)
27766+
27767+#ifdef CONFIG_PAX_KERNEXEC
27768+ . = ALIGN(HPAGE_SIZE);
27769+#else
27770+ . = ALIGN(PAGE_SIZE);
27771+#endif
27772+
27773 __init_begin = .; /* paired with __init_end */
27774- }
27775+ } :init.begin
27776
27777-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
27778+#ifdef CONFIG_SMP
27779 /*
27780 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
27781 * output PHDR, so the next output section - .init.text - should
27782@@ -186,12 +251,27 @@ SECTIONS
27783 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
27784 #endif
27785
27786- INIT_TEXT_SECTION(PAGE_SIZE)
27787-#ifdef CONFIG_X86_64
27788- :init
27789-#endif
27790+ . = ALIGN(PAGE_SIZE);
27791+ init_begin = .;
27792+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
27793+ VMLINUX_SYMBOL(_sinittext) = .;
27794+ INIT_TEXT
27795+ VMLINUX_SYMBOL(_einittext) = .;
27796+ . = ALIGN(PAGE_SIZE);
27797+ } :text.init
27798
27799- INIT_DATA_SECTION(16)
27800+ /*
27801+ * .exit.text is discard at runtime, not link time, to deal with
27802+ * references from .altinstructions and .eh_frame
27803+ */
27804+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
27805+ EXIT_TEXT
27806+ . = ALIGN(16);
27807+ } :text.exit
27808+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
27809+
27810+ . = ALIGN(PAGE_SIZE);
27811+ INIT_DATA_SECTION(16) :init
27812
27813 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
27814 __x86_cpu_dev_start = .;
27815@@ -262,19 +342,12 @@ SECTIONS
27816 }
27817
27818 . = ALIGN(8);
27819- /*
27820- * .exit.text is discard at runtime, not link time, to deal with
27821- * references from .altinstructions and .eh_frame
27822- */
27823- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
27824- EXIT_TEXT
27825- }
27826
27827 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
27828 EXIT_DATA
27829 }
27830
27831-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
27832+#ifndef CONFIG_SMP
27833 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
27834 #endif
27835
27836@@ -293,16 +366,10 @@ SECTIONS
27837 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
27838 __smp_locks = .;
27839 *(.smp_locks)
27840- . = ALIGN(PAGE_SIZE);
27841 __smp_locks_end = .;
27842+ . = ALIGN(PAGE_SIZE);
27843 }
27844
27845-#ifdef CONFIG_X86_64
27846- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
27847- NOSAVE_DATA
27848- }
27849-#endif
27850-
27851 /* BSS */
27852 . = ALIGN(PAGE_SIZE);
27853 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
27854@@ -318,6 +385,7 @@ SECTIONS
27855 __brk_base = .;
27856 . += 64 * 1024; /* 64k alignment slop space */
27857 *(.brk_reservation) /* areas brk users have reserved */
27858+ . = ALIGN(HPAGE_SIZE);
27859 __brk_limit = .;
27860 }
27861
27862@@ -344,13 +412,12 @@ SECTIONS
27863 * for the boot processor.
27864 */
27865 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
27866-INIT_PER_CPU(gdt_page);
27867 INIT_PER_CPU(irq_stack_union);
27868
27869 /*
27870 * Build-time check on the image size:
27871 */
27872-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
27873+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
27874 "kernel image bigger than KERNEL_IMAGE_SIZE");
27875
27876 #ifdef CONFIG_SMP
27877diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
27878index 1f96f93..d5c8f7a 100644
27879--- a/arch/x86/kernel/vsyscall_64.c
27880+++ b/arch/x86/kernel/vsyscall_64.c
27881@@ -56,15 +56,13 @@
27882 DEFINE_VVAR(int, vgetcpu_mode);
27883 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
27884
27885-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
27886+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
27887
27888 static int __init vsyscall_setup(char *str)
27889 {
27890 if (str) {
27891 if (!strcmp("emulate", str))
27892 vsyscall_mode = EMULATE;
27893- else if (!strcmp("native", str))
27894- vsyscall_mode = NATIVE;
27895 else if (!strcmp("none", str))
27896 vsyscall_mode = NONE;
27897 else
27898@@ -323,8 +321,7 @@ do_ret:
27899 return true;
27900
27901 sigsegv:
27902- force_sig(SIGSEGV, current);
27903- return true;
27904+ do_group_exit(SIGKILL);
27905 }
27906
27907 /*
27908@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
27909 extern char __vvar_page;
27910 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
27911
27912- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
27913- vsyscall_mode == NATIVE
27914- ? PAGE_KERNEL_VSYSCALL
27915- : PAGE_KERNEL_VVAR);
27916+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
27917 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
27918 (unsigned long)VSYSCALL_START);
27919
27920diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
27921index 04068192..4d75aa6 100644
27922--- a/arch/x86/kernel/x8664_ksyms_64.c
27923+++ b/arch/x86/kernel/x8664_ksyms_64.c
27924@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
27925 EXPORT_SYMBOL(copy_user_generic_unrolled);
27926 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
27927 EXPORT_SYMBOL(__copy_user_nocache);
27928-EXPORT_SYMBOL(_copy_from_user);
27929-EXPORT_SYMBOL(_copy_to_user);
27930
27931 EXPORT_SYMBOL(copy_page);
27932 EXPORT_SYMBOL(clear_page);
27933@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
27934 EXPORT_SYMBOL(___preempt_schedule_context);
27935 #endif
27936 #endif
27937+
27938+#ifdef CONFIG_PAX_PER_CPU_PGD
27939+EXPORT_SYMBOL(cpu_pgd);
27940+#endif
27941diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
27942index 021783b..6511282 100644
27943--- a/arch/x86/kernel/x86_init.c
27944+++ b/arch/x86/kernel/x86_init.c
27945@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
27946 static void default_nmi_init(void) { };
27947 static int default_i8042_detect(void) { return 1; };
27948
27949-struct x86_platform_ops x86_platform = {
27950+struct x86_platform_ops x86_platform __read_only = {
27951 .calibrate_tsc = native_calibrate_tsc,
27952 .get_wallclock = mach_get_cmos_time,
27953 .set_wallclock = mach_set_rtc_mmss,
27954@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
27955 EXPORT_SYMBOL_GPL(x86_platform);
27956
27957 #if defined(CONFIG_PCI_MSI)
27958-struct x86_msi_ops x86_msi = {
27959+struct x86_msi_ops x86_msi __read_only = {
27960 .setup_msi_irqs = native_setup_msi_irqs,
27961 .compose_msi_msg = native_compose_msi_msg,
27962 .teardown_msi_irq = native_teardown_msi_irq,
27963@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
27964 }
27965 #endif
27966
27967-struct x86_io_apic_ops x86_io_apic_ops = {
27968+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
27969 .init = native_io_apic_init_mappings,
27970 .read = native_io_apic_read,
27971 .write = native_io_apic_write,
27972diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
27973index 422fd82..b2d262e 100644
27974--- a/arch/x86/kernel/xsave.c
27975+++ b/arch/x86/kernel/xsave.c
27976@@ -164,18 +164,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
27977
27978 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
27979 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
27980- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
27981+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
27982
27983 if (!use_xsave())
27984 return err;
27985
27986- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
27987+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
27988
27989 /*
27990 * Read the xstate_bv which we copied (directly from the cpu or
27991 * from the state in task struct) to the user buffers.
27992 */
27993- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
27994+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
27995
27996 /*
27997 * For legacy compatible, we always set FP/SSE bits in the bit
27998@@ -190,7 +190,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
27999 */
28000 xstate_bv |= XSTATE_FPSSE;
28001
28002- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28003+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28004
28005 return err;
28006 }
28007@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28008 {
28009 int err;
28010
28011+ buf = (struct xsave_struct __user *)____m(buf);
28012 if (use_xsave())
28013 err = xsave_user(buf);
28014 else if (use_fxsr())
28015@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28016 */
28017 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28018 {
28019+ buf = (void __user *)____m(buf);
28020 if (use_xsave()) {
28021 if ((unsigned long)buf % 64 || fx_only) {
28022 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28023diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28024index c697625..a032162 100644
28025--- a/arch/x86/kvm/cpuid.c
28026+++ b/arch/x86/kvm/cpuid.c
28027@@ -156,15 +156,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28028 struct kvm_cpuid2 *cpuid,
28029 struct kvm_cpuid_entry2 __user *entries)
28030 {
28031- int r;
28032+ int r, i;
28033
28034 r = -E2BIG;
28035 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28036 goto out;
28037 r = -EFAULT;
28038- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28039- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28040+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28041 goto out;
28042+ for (i = 0; i < cpuid->nent; ++i) {
28043+ struct kvm_cpuid_entry2 cpuid_entry;
28044+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28045+ goto out;
28046+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28047+ }
28048 vcpu->arch.cpuid_nent = cpuid->nent;
28049 kvm_apic_set_version(vcpu);
28050 kvm_x86_ops->cpuid_update(vcpu);
28051@@ -179,15 +184,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28052 struct kvm_cpuid2 *cpuid,
28053 struct kvm_cpuid_entry2 __user *entries)
28054 {
28055- int r;
28056+ int r, i;
28057
28058 r = -E2BIG;
28059 if (cpuid->nent < vcpu->arch.cpuid_nent)
28060 goto out;
28061 r = -EFAULT;
28062- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28063- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28064+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28065 goto out;
28066+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28067+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28068+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28069+ goto out;
28070+ }
28071 return 0;
28072
28073 out:
28074diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28075index d86ff15..e77b023 100644
28076--- a/arch/x86/kvm/lapic.c
28077+++ b/arch/x86/kvm/lapic.c
28078@@ -55,7 +55,7 @@
28079 #define APIC_BUS_CYCLE_NS 1
28080
28081 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28082-#define apic_debug(fmt, arg...)
28083+#define apic_debug(fmt, arg...) do {} while (0)
28084
28085 #define APIC_LVT_NUM 6
28086 /* 14 is the version for Xeon and Pentium 8.4.8*/
28087diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28088index ad75d77..a679d32 100644
28089--- a/arch/x86/kvm/paging_tmpl.h
28090+++ b/arch/x86/kvm/paging_tmpl.h
28091@@ -331,7 +331,7 @@ retry_walk:
28092 if (unlikely(kvm_is_error_hva(host_addr)))
28093 goto error;
28094
28095- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28096+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28097 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28098 goto error;
28099 walker->ptep_user[walker->level - 1] = ptep_user;
28100diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28101index 532add1..59eb241 100644
28102--- a/arch/x86/kvm/svm.c
28103+++ b/arch/x86/kvm/svm.c
28104@@ -3495,7 +3495,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28105 int cpu = raw_smp_processor_id();
28106
28107 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28108+
28109+ pax_open_kernel();
28110 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28111+ pax_close_kernel();
28112+
28113 load_TR_desc();
28114 }
28115
28116@@ -3898,6 +3902,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28117 #endif
28118 #endif
28119
28120+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28121+ __set_fs(current_thread_info()->addr_limit);
28122+#endif
28123+
28124 reload_tss(vcpu);
28125
28126 local_irq_disable();
28127diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28128index dcc4de3..6bf73f4 100644
28129--- a/arch/x86/kvm/vmx.c
28130+++ b/arch/x86/kvm/vmx.c
28131@@ -1316,12 +1316,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28132 #endif
28133 }
28134
28135-static void vmcs_clear_bits(unsigned long field, u32 mask)
28136+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28137 {
28138 vmcs_writel(field, vmcs_readl(field) & ~mask);
28139 }
28140
28141-static void vmcs_set_bits(unsigned long field, u32 mask)
28142+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28143 {
28144 vmcs_writel(field, vmcs_readl(field) | mask);
28145 }
28146@@ -1522,7 +1522,11 @@ static void reload_tss(void)
28147 struct desc_struct *descs;
28148
28149 descs = (void *)gdt->address;
28150+
28151+ pax_open_kernel();
28152 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28153+ pax_close_kernel();
28154+
28155 load_TR_desc();
28156 }
28157
28158@@ -1746,6 +1750,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28159 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28160 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28161
28162+#ifdef CONFIG_PAX_PER_CPU_PGD
28163+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28164+#endif
28165+
28166 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28167 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28168 vmx->loaded_vmcs->cpu = cpu;
28169@@ -2033,7 +2041,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28170 * reads and returns guest's timestamp counter "register"
28171 * guest_tsc = host_tsc + tsc_offset -- 21.3
28172 */
28173-static u64 guest_read_tsc(void)
28174+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28175 {
28176 u64 host_tsc, tsc_offset;
28177
28178@@ -2987,8 +2995,11 @@ static __init int hardware_setup(void)
28179 if (!cpu_has_vmx_flexpriority())
28180 flexpriority_enabled = 0;
28181
28182- if (!cpu_has_vmx_tpr_shadow())
28183- kvm_x86_ops->update_cr8_intercept = NULL;
28184+ if (!cpu_has_vmx_tpr_shadow()) {
28185+ pax_open_kernel();
28186+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28187+ pax_close_kernel();
28188+ }
28189
28190 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28191 kvm_disable_largepages();
28192@@ -2999,13 +3010,15 @@ static __init int hardware_setup(void)
28193 if (!cpu_has_vmx_apicv())
28194 enable_apicv = 0;
28195
28196+ pax_open_kernel();
28197 if (enable_apicv)
28198- kvm_x86_ops->update_cr8_intercept = NULL;
28199+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28200 else {
28201- kvm_x86_ops->hwapic_irr_update = NULL;
28202- kvm_x86_ops->deliver_posted_interrupt = NULL;
28203- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28204+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28205+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28206+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28207 }
28208+ pax_close_kernel();
28209
28210 if (nested)
28211 nested_vmx_setup_ctls_msrs();
28212@@ -4134,7 +4147,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28213
28214 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28215 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
28216+
28217+#ifndef CONFIG_PAX_PER_CPU_PGD
28218 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28219+#endif
28220
28221 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
28222 #ifdef CONFIG_X86_64
28223@@ -4156,7 +4172,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28224 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28225 vmx->host_idt_base = dt.address;
28226
28227- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28228+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28229
28230 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28231 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28232@@ -7219,6 +7235,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28233 "jmp 2f \n\t"
28234 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28235 "2: "
28236+
28237+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28238+ "ljmp %[cs],$3f\n\t"
28239+ "3: "
28240+#endif
28241+
28242 /* Save guest registers, load host registers, keep flags */
28243 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28244 "pop %0 \n\t"
28245@@ -7271,6 +7293,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28246 #endif
28247 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28248 [wordsize]"i"(sizeof(ulong))
28249+
28250+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28251+ ,[cs]"i"(__KERNEL_CS)
28252+#endif
28253+
28254 : "cc", "memory"
28255 #ifdef CONFIG_X86_64
28256 , "rax", "rbx", "rdi", "rsi"
28257@@ -7284,7 +7311,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28258 if (debugctlmsr)
28259 update_debugctlmsr(debugctlmsr);
28260
28261-#ifndef CONFIG_X86_64
28262+#ifdef CONFIG_X86_32
28263 /*
28264 * The sysexit path does not restore ds/es, so we must set them to
28265 * a reasonable value ourselves.
28266@@ -7293,8 +7320,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28267 * may be executed in interrupt context, which saves and restore segments
28268 * around it, nullifying its effect.
28269 */
28270- loadsegment(ds, __USER_DS);
28271- loadsegment(es, __USER_DS);
28272+ loadsegment(ds, __KERNEL_DS);
28273+ loadsegment(es, __KERNEL_DS);
28274+ loadsegment(ss, __KERNEL_DS);
28275+
28276+#ifdef CONFIG_PAX_KERNEXEC
28277+ loadsegment(fs, __KERNEL_PERCPU);
28278+#endif
28279+
28280+#ifdef CONFIG_PAX_MEMORY_UDEREF
28281+ __set_fs(current_thread_info()->addr_limit);
28282+#endif
28283+
28284 #endif
28285
28286 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28287diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28288index 4e33b85..fa94855 100644
28289--- a/arch/x86/kvm/x86.c
28290+++ b/arch/x86/kvm/x86.c
28291@@ -1791,8 +1791,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28292 {
28293 struct kvm *kvm = vcpu->kvm;
28294 int lm = is_long_mode(vcpu);
28295- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28296- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28297+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28298+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28299 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28300 : kvm->arch.xen_hvm_config.blob_size_32;
28301 u32 page_num = data & ~PAGE_MASK;
28302@@ -2676,6 +2676,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28303 if (n < msr_list.nmsrs)
28304 goto out;
28305 r = -EFAULT;
28306+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28307+ goto out;
28308 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28309 num_msrs_to_save * sizeof(u32)))
28310 goto out;
28311@@ -5485,7 +5487,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28312 };
28313 #endif
28314
28315-int kvm_arch_init(void *opaque)
28316+int kvm_arch_init(const void *opaque)
28317 {
28318 int r;
28319 struct kvm_x86_ops *ops = opaque;
28320diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28321index bdf8532..f63c587 100644
28322--- a/arch/x86/lguest/boot.c
28323+++ b/arch/x86/lguest/boot.c
28324@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28325 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28326 * Launcher to reboot us.
28327 */
28328-static void lguest_restart(char *reason)
28329+static __noreturn void lguest_restart(char *reason)
28330 {
28331 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28332+ BUG();
28333 }
28334
28335 /*G:050
28336diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28337index 00933d5..3a64af9 100644
28338--- a/arch/x86/lib/atomic64_386_32.S
28339+++ b/arch/x86/lib/atomic64_386_32.S
28340@@ -48,6 +48,10 @@ BEGIN(read)
28341 movl (v), %eax
28342 movl 4(v), %edx
28343 RET_ENDP
28344+BEGIN(read_unchecked)
28345+ movl (v), %eax
28346+ movl 4(v), %edx
28347+RET_ENDP
28348 #undef v
28349
28350 #define v %esi
28351@@ -55,6 +59,10 @@ BEGIN(set)
28352 movl %ebx, (v)
28353 movl %ecx, 4(v)
28354 RET_ENDP
28355+BEGIN(set_unchecked)
28356+ movl %ebx, (v)
28357+ movl %ecx, 4(v)
28358+RET_ENDP
28359 #undef v
28360
28361 #define v %esi
28362@@ -70,6 +78,20 @@ RET_ENDP
28363 BEGIN(add)
28364 addl %eax, (v)
28365 adcl %edx, 4(v)
28366+
28367+#ifdef CONFIG_PAX_REFCOUNT
28368+ jno 0f
28369+ subl %eax, (v)
28370+ sbbl %edx, 4(v)
28371+ int $4
28372+0:
28373+ _ASM_EXTABLE(0b, 0b)
28374+#endif
28375+
28376+RET_ENDP
28377+BEGIN(add_unchecked)
28378+ addl %eax, (v)
28379+ adcl %edx, 4(v)
28380 RET_ENDP
28381 #undef v
28382
28383@@ -77,6 +99,24 @@ RET_ENDP
28384 BEGIN(add_return)
28385 addl (v), %eax
28386 adcl 4(v), %edx
28387+
28388+#ifdef CONFIG_PAX_REFCOUNT
28389+ into
28390+1234:
28391+ _ASM_EXTABLE(1234b, 2f)
28392+#endif
28393+
28394+ movl %eax, (v)
28395+ movl %edx, 4(v)
28396+
28397+#ifdef CONFIG_PAX_REFCOUNT
28398+2:
28399+#endif
28400+
28401+RET_ENDP
28402+BEGIN(add_return_unchecked)
28403+ addl (v), %eax
28404+ adcl 4(v), %edx
28405 movl %eax, (v)
28406 movl %edx, 4(v)
28407 RET_ENDP
28408@@ -86,6 +126,20 @@ RET_ENDP
28409 BEGIN(sub)
28410 subl %eax, (v)
28411 sbbl %edx, 4(v)
28412+
28413+#ifdef CONFIG_PAX_REFCOUNT
28414+ jno 0f
28415+ addl %eax, (v)
28416+ adcl %edx, 4(v)
28417+ int $4
28418+0:
28419+ _ASM_EXTABLE(0b, 0b)
28420+#endif
28421+
28422+RET_ENDP
28423+BEGIN(sub_unchecked)
28424+ subl %eax, (v)
28425+ sbbl %edx, 4(v)
28426 RET_ENDP
28427 #undef v
28428
28429@@ -96,6 +150,27 @@ BEGIN(sub_return)
28430 sbbl $0, %edx
28431 addl (v), %eax
28432 adcl 4(v), %edx
28433+
28434+#ifdef CONFIG_PAX_REFCOUNT
28435+ into
28436+1234:
28437+ _ASM_EXTABLE(1234b, 2f)
28438+#endif
28439+
28440+ movl %eax, (v)
28441+ movl %edx, 4(v)
28442+
28443+#ifdef CONFIG_PAX_REFCOUNT
28444+2:
28445+#endif
28446+
28447+RET_ENDP
28448+BEGIN(sub_return_unchecked)
28449+ negl %edx
28450+ negl %eax
28451+ sbbl $0, %edx
28452+ addl (v), %eax
28453+ adcl 4(v), %edx
28454 movl %eax, (v)
28455 movl %edx, 4(v)
28456 RET_ENDP
28457@@ -105,6 +180,20 @@ RET_ENDP
28458 BEGIN(inc)
28459 addl $1, (v)
28460 adcl $0, 4(v)
28461+
28462+#ifdef CONFIG_PAX_REFCOUNT
28463+ jno 0f
28464+ subl $1, (v)
28465+ sbbl $0, 4(v)
28466+ int $4
28467+0:
28468+ _ASM_EXTABLE(0b, 0b)
28469+#endif
28470+
28471+RET_ENDP
28472+BEGIN(inc_unchecked)
28473+ addl $1, (v)
28474+ adcl $0, 4(v)
28475 RET_ENDP
28476 #undef v
28477
28478@@ -114,6 +203,26 @@ BEGIN(inc_return)
28479 movl 4(v), %edx
28480 addl $1, %eax
28481 adcl $0, %edx
28482+
28483+#ifdef CONFIG_PAX_REFCOUNT
28484+ into
28485+1234:
28486+ _ASM_EXTABLE(1234b, 2f)
28487+#endif
28488+
28489+ movl %eax, (v)
28490+ movl %edx, 4(v)
28491+
28492+#ifdef CONFIG_PAX_REFCOUNT
28493+2:
28494+#endif
28495+
28496+RET_ENDP
28497+BEGIN(inc_return_unchecked)
28498+ movl (v), %eax
28499+ movl 4(v), %edx
28500+ addl $1, %eax
28501+ adcl $0, %edx
28502 movl %eax, (v)
28503 movl %edx, 4(v)
28504 RET_ENDP
28505@@ -123,6 +232,20 @@ RET_ENDP
28506 BEGIN(dec)
28507 subl $1, (v)
28508 sbbl $0, 4(v)
28509+
28510+#ifdef CONFIG_PAX_REFCOUNT
28511+ jno 0f
28512+ addl $1, (v)
28513+ adcl $0, 4(v)
28514+ int $4
28515+0:
28516+ _ASM_EXTABLE(0b, 0b)
28517+#endif
28518+
28519+RET_ENDP
28520+BEGIN(dec_unchecked)
28521+ subl $1, (v)
28522+ sbbl $0, 4(v)
28523 RET_ENDP
28524 #undef v
28525
28526@@ -132,6 +255,26 @@ BEGIN(dec_return)
28527 movl 4(v), %edx
28528 subl $1, %eax
28529 sbbl $0, %edx
28530+
28531+#ifdef CONFIG_PAX_REFCOUNT
28532+ into
28533+1234:
28534+ _ASM_EXTABLE(1234b, 2f)
28535+#endif
28536+
28537+ movl %eax, (v)
28538+ movl %edx, 4(v)
28539+
28540+#ifdef CONFIG_PAX_REFCOUNT
28541+2:
28542+#endif
28543+
28544+RET_ENDP
28545+BEGIN(dec_return_unchecked)
28546+ movl (v), %eax
28547+ movl 4(v), %edx
28548+ subl $1, %eax
28549+ sbbl $0, %edx
28550 movl %eax, (v)
28551 movl %edx, 4(v)
28552 RET_ENDP
28553@@ -143,6 +286,13 @@ BEGIN(add_unless)
28554 adcl %edx, %edi
28555 addl (v), %eax
28556 adcl 4(v), %edx
28557+
28558+#ifdef CONFIG_PAX_REFCOUNT
28559+ into
28560+1234:
28561+ _ASM_EXTABLE(1234b, 2f)
28562+#endif
28563+
28564 cmpl %eax, %ecx
28565 je 3f
28566 1:
28567@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
28568 1:
28569 addl $1, %eax
28570 adcl $0, %edx
28571+
28572+#ifdef CONFIG_PAX_REFCOUNT
28573+ into
28574+1234:
28575+ _ASM_EXTABLE(1234b, 2f)
28576+#endif
28577+
28578 movl %eax, (v)
28579 movl %edx, 4(v)
28580 movl $1, %eax
28581@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
28582 movl 4(v), %edx
28583 subl $1, %eax
28584 sbbl $0, %edx
28585+
28586+#ifdef CONFIG_PAX_REFCOUNT
28587+ into
28588+1234:
28589+ _ASM_EXTABLE(1234b, 1f)
28590+#endif
28591+
28592 js 1f
28593 movl %eax, (v)
28594 movl %edx, 4(v)
28595diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
28596index f5cc9eb..51fa319 100644
28597--- a/arch/x86/lib/atomic64_cx8_32.S
28598+++ b/arch/x86/lib/atomic64_cx8_32.S
28599@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
28600 CFI_STARTPROC
28601
28602 read64 %ecx
28603+ pax_force_retaddr
28604 ret
28605 CFI_ENDPROC
28606 ENDPROC(atomic64_read_cx8)
28607
28608+ENTRY(atomic64_read_unchecked_cx8)
28609+ CFI_STARTPROC
28610+
28611+ read64 %ecx
28612+ pax_force_retaddr
28613+ ret
28614+ CFI_ENDPROC
28615+ENDPROC(atomic64_read_unchecked_cx8)
28616+
28617 ENTRY(atomic64_set_cx8)
28618 CFI_STARTPROC
28619
28620@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
28621 cmpxchg8b (%esi)
28622 jne 1b
28623
28624+ pax_force_retaddr
28625 ret
28626 CFI_ENDPROC
28627 ENDPROC(atomic64_set_cx8)
28628
28629+ENTRY(atomic64_set_unchecked_cx8)
28630+ CFI_STARTPROC
28631+
28632+1:
28633+/* we don't need LOCK_PREFIX since aligned 64-bit writes
28634+ * are atomic on 586 and newer */
28635+ cmpxchg8b (%esi)
28636+ jne 1b
28637+
28638+ pax_force_retaddr
28639+ ret
28640+ CFI_ENDPROC
28641+ENDPROC(atomic64_set_unchecked_cx8)
28642+
28643 ENTRY(atomic64_xchg_cx8)
28644 CFI_STARTPROC
28645
28646@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
28647 cmpxchg8b (%esi)
28648 jne 1b
28649
28650+ pax_force_retaddr
28651 ret
28652 CFI_ENDPROC
28653 ENDPROC(atomic64_xchg_cx8)
28654
28655-.macro addsub_return func ins insc
28656-ENTRY(atomic64_\func\()_return_cx8)
28657+.macro addsub_return func ins insc unchecked=""
28658+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
28659 CFI_STARTPROC
28660 SAVE ebp
28661 SAVE ebx
28662@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
28663 movl %edx, %ecx
28664 \ins\()l %esi, %ebx
28665 \insc\()l %edi, %ecx
28666+
28667+.ifb \unchecked
28668+#ifdef CONFIG_PAX_REFCOUNT
28669+ into
28670+2:
28671+ _ASM_EXTABLE(2b, 3f)
28672+#endif
28673+.endif
28674+
28675 LOCK_PREFIX
28676 cmpxchg8b (%ebp)
28677 jne 1b
28678-
28679-10:
28680 movl %ebx, %eax
28681 movl %ecx, %edx
28682+
28683+.ifb \unchecked
28684+#ifdef CONFIG_PAX_REFCOUNT
28685+3:
28686+#endif
28687+.endif
28688+
28689 RESTORE edi
28690 RESTORE esi
28691 RESTORE ebx
28692 RESTORE ebp
28693+ pax_force_retaddr
28694 ret
28695 CFI_ENDPROC
28696-ENDPROC(atomic64_\func\()_return_cx8)
28697+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
28698 .endm
28699
28700 addsub_return add add adc
28701 addsub_return sub sub sbb
28702+addsub_return add add adc _unchecked
28703+addsub_return sub sub sbb _unchecked
28704
28705-.macro incdec_return func ins insc
28706-ENTRY(atomic64_\func\()_return_cx8)
28707+.macro incdec_return func ins insc unchecked=""
28708+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
28709 CFI_STARTPROC
28710 SAVE ebx
28711
28712@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
28713 movl %edx, %ecx
28714 \ins\()l $1, %ebx
28715 \insc\()l $0, %ecx
28716+
28717+.ifb \unchecked
28718+#ifdef CONFIG_PAX_REFCOUNT
28719+ into
28720+2:
28721+ _ASM_EXTABLE(2b, 3f)
28722+#endif
28723+.endif
28724+
28725 LOCK_PREFIX
28726 cmpxchg8b (%esi)
28727 jne 1b
28728
28729-10:
28730 movl %ebx, %eax
28731 movl %ecx, %edx
28732+
28733+.ifb \unchecked
28734+#ifdef CONFIG_PAX_REFCOUNT
28735+3:
28736+#endif
28737+.endif
28738+
28739 RESTORE ebx
28740+ pax_force_retaddr
28741 ret
28742 CFI_ENDPROC
28743-ENDPROC(atomic64_\func\()_return_cx8)
28744+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
28745 .endm
28746
28747 incdec_return inc add adc
28748 incdec_return dec sub sbb
28749+incdec_return inc add adc _unchecked
28750+incdec_return dec sub sbb _unchecked
28751
28752 ENTRY(atomic64_dec_if_positive_cx8)
28753 CFI_STARTPROC
28754@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
28755 movl %edx, %ecx
28756 subl $1, %ebx
28757 sbb $0, %ecx
28758+
28759+#ifdef CONFIG_PAX_REFCOUNT
28760+ into
28761+1234:
28762+ _ASM_EXTABLE(1234b, 2f)
28763+#endif
28764+
28765 js 2f
28766 LOCK_PREFIX
28767 cmpxchg8b (%esi)
28768@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
28769 movl %ebx, %eax
28770 movl %ecx, %edx
28771 RESTORE ebx
28772+ pax_force_retaddr
28773 ret
28774 CFI_ENDPROC
28775 ENDPROC(atomic64_dec_if_positive_cx8)
28776@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
28777 movl %edx, %ecx
28778 addl %ebp, %ebx
28779 adcl %edi, %ecx
28780+
28781+#ifdef CONFIG_PAX_REFCOUNT
28782+ into
28783+1234:
28784+ _ASM_EXTABLE(1234b, 3f)
28785+#endif
28786+
28787 LOCK_PREFIX
28788 cmpxchg8b (%esi)
28789 jne 1b
28790@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
28791 CFI_ADJUST_CFA_OFFSET -8
28792 RESTORE ebx
28793 RESTORE ebp
28794+ pax_force_retaddr
28795 ret
28796 4:
28797 cmpl %edx, 4(%esp)
28798@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
28799 xorl %ecx, %ecx
28800 addl $1, %ebx
28801 adcl %edx, %ecx
28802+
28803+#ifdef CONFIG_PAX_REFCOUNT
28804+ into
28805+1234:
28806+ _ASM_EXTABLE(1234b, 3f)
28807+#endif
28808+
28809 LOCK_PREFIX
28810 cmpxchg8b (%esi)
28811 jne 1b
28812@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
28813 movl $1, %eax
28814 3:
28815 RESTORE ebx
28816+ pax_force_retaddr
28817 ret
28818 CFI_ENDPROC
28819 ENDPROC(atomic64_inc_not_zero_cx8)
28820diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
28821index e78b8ee..7e173a8 100644
28822--- a/arch/x86/lib/checksum_32.S
28823+++ b/arch/x86/lib/checksum_32.S
28824@@ -29,7 +29,8 @@
28825 #include <asm/dwarf2.h>
28826 #include <asm/errno.h>
28827 #include <asm/asm.h>
28828-
28829+#include <asm/segment.h>
28830+
28831 /*
28832 * computes a partial checksum, e.g. for TCP/UDP fragments
28833 */
28834@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
28835
28836 #define ARGBASE 16
28837 #define FP 12
28838-
28839-ENTRY(csum_partial_copy_generic)
28840+
28841+ENTRY(csum_partial_copy_generic_to_user)
28842 CFI_STARTPROC
28843+
28844+#ifdef CONFIG_PAX_MEMORY_UDEREF
28845+ pushl_cfi %gs
28846+ popl_cfi %es
28847+ jmp csum_partial_copy_generic
28848+#endif
28849+
28850+ENTRY(csum_partial_copy_generic_from_user)
28851+
28852+#ifdef CONFIG_PAX_MEMORY_UDEREF
28853+ pushl_cfi %gs
28854+ popl_cfi %ds
28855+#endif
28856+
28857+ENTRY(csum_partial_copy_generic)
28858 subl $4,%esp
28859 CFI_ADJUST_CFA_OFFSET 4
28860 pushl_cfi %edi
28861@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
28862 jmp 4f
28863 SRC(1: movw (%esi), %bx )
28864 addl $2, %esi
28865-DST( movw %bx, (%edi) )
28866+DST( movw %bx, %es:(%edi) )
28867 addl $2, %edi
28868 addw %bx, %ax
28869 adcl $0, %eax
28870@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
28871 SRC(1: movl (%esi), %ebx )
28872 SRC( movl 4(%esi), %edx )
28873 adcl %ebx, %eax
28874-DST( movl %ebx, (%edi) )
28875+DST( movl %ebx, %es:(%edi) )
28876 adcl %edx, %eax
28877-DST( movl %edx, 4(%edi) )
28878+DST( movl %edx, %es:4(%edi) )
28879
28880 SRC( movl 8(%esi), %ebx )
28881 SRC( movl 12(%esi), %edx )
28882 adcl %ebx, %eax
28883-DST( movl %ebx, 8(%edi) )
28884+DST( movl %ebx, %es:8(%edi) )
28885 adcl %edx, %eax
28886-DST( movl %edx, 12(%edi) )
28887+DST( movl %edx, %es:12(%edi) )
28888
28889 SRC( movl 16(%esi), %ebx )
28890 SRC( movl 20(%esi), %edx )
28891 adcl %ebx, %eax
28892-DST( movl %ebx, 16(%edi) )
28893+DST( movl %ebx, %es:16(%edi) )
28894 adcl %edx, %eax
28895-DST( movl %edx, 20(%edi) )
28896+DST( movl %edx, %es:20(%edi) )
28897
28898 SRC( movl 24(%esi), %ebx )
28899 SRC( movl 28(%esi), %edx )
28900 adcl %ebx, %eax
28901-DST( movl %ebx, 24(%edi) )
28902+DST( movl %ebx, %es:24(%edi) )
28903 adcl %edx, %eax
28904-DST( movl %edx, 28(%edi) )
28905+DST( movl %edx, %es:28(%edi) )
28906
28907 lea 32(%esi), %esi
28908 lea 32(%edi), %edi
28909@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
28910 shrl $2, %edx # This clears CF
28911 SRC(3: movl (%esi), %ebx )
28912 adcl %ebx, %eax
28913-DST( movl %ebx, (%edi) )
28914+DST( movl %ebx, %es:(%edi) )
28915 lea 4(%esi), %esi
28916 lea 4(%edi), %edi
28917 dec %edx
28918@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
28919 jb 5f
28920 SRC( movw (%esi), %cx )
28921 leal 2(%esi), %esi
28922-DST( movw %cx, (%edi) )
28923+DST( movw %cx, %es:(%edi) )
28924 leal 2(%edi), %edi
28925 je 6f
28926 shll $16,%ecx
28927 SRC(5: movb (%esi), %cl )
28928-DST( movb %cl, (%edi) )
28929+DST( movb %cl, %es:(%edi) )
28930 6: addl %ecx, %eax
28931 adcl $0, %eax
28932 7:
28933@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
28934
28935 6001:
28936 movl ARGBASE+20(%esp), %ebx # src_err_ptr
28937- movl $-EFAULT, (%ebx)
28938+ movl $-EFAULT, %ss:(%ebx)
28939
28940 # zero the complete destination - computing the rest
28941 # is too much work
28942@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
28943
28944 6002:
28945 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
28946- movl $-EFAULT,(%ebx)
28947+ movl $-EFAULT,%ss:(%ebx)
28948 jmp 5000b
28949
28950 .previous
28951
28952+ pushl_cfi %ss
28953+ popl_cfi %ds
28954+ pushl_cfi %ss
28955+ popl_cfi %es
28956 popl_cfi %ebx
28957 CFI_RESTORE ebx
28958 popl_cfi %esi
28959@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
28960 popl_cfi %ecx # equivalent to addl $4,%esp
28961 ret
28962 CFI_ENDPROC
28963-ENDPROC(csum_partial_copy_generic)
28964+ENDPROC(csum_partial_copy_generic_to_user)
28965
28966 #else
28967
28968 /* Version for PentiumII/PPro */
28969
28970 #define ROUND1(x) \
28971+ nop; nop; nop; \
28972 SRC(movl x(%esi), %ebx ) ; \
28973 addl %ebx, %eax ; \
28974- DST(movl %ebx, x(%edi) ) ;
28975+ DST(movl %ebx, %es:x(%edi)) ;
28976
28977 #define ROUND(x) \
28978+ nop; nop; nop; \
28979 SRC(movl x(%esi), %ebx ) ; \
28980 adcl %ebx, %eax ; \
28981- DST(movl %ebx, x(%edi) ) ;
28982+ DST(movl %ebx, %es:x(%edi)) ;
28983
28984 #define ARGBASE 12
28985-
28986-ENTRY(csum_partial_copy_generic)
28987+
28988+ENTRY(csum_partial_copy_generic_to_user)
28989 CFI_STARTPROC
28990+
28991+#ifdef CONFIG_PAX_MEMORY_UDEREF
28992+ pushl_cfi %gs
28993+ popl_cfi %es
28994+ jmp csum_partial_copy_generic
28995+#endif
28996+
28997+ENTRY(csum_partial_copy_generic_from_user)
28998+
28999+#ifdef CONFIG_PAX_MEMORY_UDEREF
29000+ pushl_cfi %gs
29001+ popl_cfi %ds
29002+#endif
29003+
29004+ENTRY(csum_partial_copy_generic)
29005 pushl_cfi %ebx
29006 CFI_REL_OFFSET ebx, 0
29007 pushl_cfi %edi
29008@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29009 subl %ebx, %edi
29010 lea -1(%esi),%edx
29011 andl $-32,%edx
29012- lea 3f(%ebx,%ebx), %ebx
29013+ lea 3f(%ebx,%ebx,2), %ebx
29014 testl %esi, %esi
29015 jmp *%ebx
29016 1: addl $64,%esi
29017@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29018 jb 5f
29019 SRC( movw (%esi), %dx )
29020 leal 2(%esi), %esi
29021-DST( movw %dx, (%edi) )
29022+DST( movw %dx, %es:(%edi) )
29023 leal 2(%edi), %edi
29024 je 6f
29025 shll $16,%edx
29026 5:
29027 SRC( movb (%esi), %dl )
29028-DST( movb %dl, (%edi) )
29029+DST( movb %dl, %es:(%edi) )
29030 6: addl %edx, %eax
29031 adcl $0, %eax
29032 7:
29033 .section .fixup, "ax"
29034 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29035- movl $-EFAULT, (%ebx)
29036+ movl $-EFAULT, %ss:(%ebx)
29037 # zero the complete destination (computing the rest is too much work)
29038 movl ARGBASE+8(%esp),%edi # dst
29039 movl ARGBASE+12(%esp),%ecx # len
29040@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29041 rep; stosb
29042 jmp 7b
29043 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29044- movl $-EFAULT, (%ebx)
29045+ movl $-EFAULT, %ss:(%ebx)
29046 jmp 7b
29047 .previous
29048
29049+#ifdef CONFIG_PAX_MEMORY_UDEREF
29050+ pushl_cfi %ss
29051+ popl_cfi %ds
29052+ pushl_cfi %ss
29053+ popl_cfi %es
29054+#endif
29055+
29056 popl_cfi %esi
29057 CFI_RESTORE esi
29058 popl_cfi %edi
29059@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29060 CFI_RESTORE ebx
29061 ret
29062 CFI_ENDPROC
29063-ENDPROC(csum_partial_copy_generic)
29064+ENDPROC(csum_partial_copy_generic_to_user)
29065
29066 #undef ROUND
29067 #undef ROUND1
29068diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29069index f2145cf..cea889d 100644
29070--- a/arch/x86/lib/clear_page_64.S
29071+++ b/arch/x86/lib/clear_page_64.S
29072@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29073 movl $4096/8,%ecx
29074 xorl %eax,%eax
29075 rep stosq
29076+ pax_force_retaddr
29077 ret
29078 CFI_ENDPROC
29079 ENDPROC(clear_page_c)
29080@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29081 movl $4096,%ecx
29082 xorl %eax,%eax
29083 rep stosb
29084+ pax_force_retaddr
29085 ret
29086 CFI_ENDPROC
29087 ENDPROC(clear_page_c_e)
29088@@ -43,6 +45,7 @@ ENTRY(clear_page)
29089 leaq 64(%rdi),%rdi
29090 jnz .Lloop
29091 nop
29092+ pax_force_retaddr
29093 ret
29094 CFI_ENDPROC
29095 .Lclear_page_end:
29096@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29097
29098 #include <asm/cpufeature.h>
29099
29100- .section .altinstr_replacement,"ax"
29101+ .section .altinstr_replacement,"a"
29102 1: .byte 0xeb /* jmp <disp8> */
29103 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29104 2: .byte 0xeb /* jmp <disp8> */
29105diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29106index 1e572c5..2a162cd 100644
29107--- a/arch/x86/lib/cmpxchg16b_emu.S
29108+++ b/arch/x86/lib/cmpxchg16b_emu.S
29109@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
29110
29111 popf
29112 mov $1, %al
29113+ pax_force_retaddr
29114 ret
29115
29116 not_same:
29117 popf
29118 xor %al,%al
29119+ pax_force_retaddr
29120 ret
29121
29122 CFI_ENDPROC
29123diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29124index 176cca6..e0d658e 100644
29125--- a/arch/x86/lib/copy_page_64.S
29126+++ b/arch/x86/lib/copy_page_64.S
29127@@ -9,6 +9,7 @@ copy_page_rep:
29128 CFI_STARTPROC
29129 movl $4096/8, %ecx
29130 rep movsq
29131+ pax_force_retaddr
29132 ret
29133 CFI_ENDPROC
29134 ENDPROC(copy_page_rep)
29135@@ -24,8 +25,8 @@ ENTRY(copy_page)
29136 CFI_ADJUST_CFA_OFFSET 2*8
29137 movq %rbx, (%rsp)
29138 CFI_REL_OFFSET rbx, 0
29139- movq %r12, 1*8(%rsp)
29140- CFI_REL_OFFSET r12, 1*8
29141+ movq %r13, 1*8(%rsp)
29142+ CFI_REL_OFFSET r13, 1*8
29143
29144 movl $(4096/64)-5, %ecx
29145 .p2align 4
29146@@ -38,7 +39,7 @@ ENTRY(copy_page)
29147 movq 0x8*4(%rsi), %r9
29148 movq 0x8*5(%rsi), %r10
29149 movq 0x8*6(%rsi), %r11
29150- movq 0x8*7(%rsi), %r12
29151+ movq 0x8*7(%rsi), %r13
29152
29153 prefetcht0 5*64(%rsi)
29154
29155@@ -49,7 +50,7 @@ ENTRY(copy_page)
29156 movq %r9, 0x8*4(%rdi)
29157 movq %r10, 0x8*5(%rdi)
29158 movq %r11, 0x8*6(%rdi)
29159- movq %r12, 0x8*7(%rdi)
29160+ movq %r13, 0x8*7(%rdi)
29161
29162 leaq 64 (%rsi), %rsi
29163 leaq 64 (%rdi), %rdi
29164@@ -68,7 +69,7 @@ ENTRY(copy_page)
29165 movq 0x8*4(%rsi), %r9
29166 movq 0x8*5(%rsi), %r10
29167 movq 0x8*6(%rsi), %r11
29168- movq 0x8*7(%rsi), %r12
29169+ movq 0x8*7(%rsi), %r13
29170
29171 movq %rax, 0x8*0(%rdi)
29172 movq %rbx, 0x8*1(%rdi)
29173@@ -77,7 +78,7 @@ ENTRY(copy_page)
29174 movq %r9, 0x8*4(%rdi)
29175 movq %r10, 0x8*5(%rdi)
29176 movq %r11, 0x8*6(%rdi)
29177- movq %r12, 0x8*7(%rdi)
29178+ movq %r13, 0x8*7(%rdi)
29179
29180 leaq 64(%rdi), %rdi
29181 leaq 64(%rsi), %rsi
29182@@ -85,10 +86,11 @@ ENTRY(copy_page)
29183
29184 movq (%rsp), %rbx
29185 CFI_RESTORE rbx
29186- movq 1*8(%rsp), %r12
29187- CFI_RESTORE r12
29188+ movq 1*8(%rsp), %r13
29189+ CFI_RESTORE r13
29190 addq $2*8, %rsp
29191 CFI_ADJUST_CFA_OFFSET -2*8
29192+ pax_force_retaddr
29193 ret
29194 .Lcopy_page_end:
29195 CFI_ENDPROC
29196@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29197
29198 #include <asm/cpufeature.h>
29199
29200- .section .altinstr_replacement,"ax"
29201+ .section .altinstr_replacement,"a"
29202 1: .byte 0xeb /* jmp <disp8> */
29203 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29204 2:
29205diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29206index a30ca15..407412b 100644
29207--- a/arch/x86/lib/copy_user_64.S
29208+++ b/arch/x86/lib/copy_user_64.S
29209@@ -18,31 +18,7 @@
29210 #include <asm/alternative-asm.h>
29211 #include <asm/asm.h>
29212 #include <asm/smap.h>
29213-
29214-/*
29215- * By placing feature2 after feature1 in altinstructions section, we logically
29216- * implement:
29217- * If CPU has feature2, jmp to alt2 is used
29218- * else if CPU has feature1, jmp to alt1 is used
29219- * else jmp to orig is used.
29220- */
29221- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29222-0:
29223- .byte 0xe9 /* 32bit jump */
29224- .long \orig-1f /* by default jump to orig */
29225-1:
29226- .section .altinstr_replacement,"ax"
29227-2: .byte 0xe9 /* near jump with 32bit immediate */
29228- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29229-3: .byte 0xe9 /* near jump with 32bit immediate */
29230- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29231- .previous
29232-
29233- .section .altinstructions,"a"
29234- altinstruction_entry 0b,2b,\feature1,5,5
29235- altinstruction_entry 0b,3b,\feature2,5,5
29236- .previous
29237- .endm
29238+#include <asm/pgtable.h>
29239
29240 .macro ALIGN_DESTINATION
29241 #ifdef FIX_ALIGNMENT
29242@@ -70,52 +46,6 @@
29243 #endif
29244 .endm
29245
29246-/* Standard copy_to_user with segment limit checking */
29247-ENTRY(_copy_to_user)
29248- CFI_STARTPROC
29249- GET_THREAD_INFO(%rax)
29250- movq %rdi,%rcx
29251- addq %rdx,%rcx
29252- jc bad_to_user
29253- cmpq TI_addr_limit(%rax),%rcx
29254- ja bad_to_user
29255- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29256- copy_user_generic_unrolled,copy_user_generic_string, \
29257- copy_user_enhanced_fast_string
29258- CFI_ENDPROC
29259-ENDPROC(_copy_to_user)
29260-
29261-/* Standard copy_from_user with segment limit checking */
29262-ENTRY(_copy_from_user)
29263- CFI_STARTPROC
29264- GET_THREAD_INFO(%rax)
29265- movq %rsi,%rcx
29266- addq %rdx,%rcx
29267- jc bad_from_user
29268- cmpq TI_addr_limit(%rax),%rcx
29269- ja bad_from_user
29270- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29271- copy_user_generic_unrolled,copy_user_generic_string, \
29272- copy_user_enhanced_fast_string
29273- CFI_ENDPROC
29274-ENDPROC(_copy_from_user)
29275-
29276- .section .fixup,"ax"
29277- /* must zero dest */
29278-ENTRY(bad_from_user)
29279-bad_from_user:
29280- CFI_STARTPROC
29281- movl %edx,%ecx
29282- xorl %eax,%eax
29283- rep
29284- stosb
29285-bad_to_user:
29286- movl %edx,%eax
29287- ret
29288- CFI_ENDPROC
29289-ENDPROC(bad_from_user)
29290- .previous
29291-
29292 /*
29293 * copy_user_generic_unrolled - memory copy with exception handling.
29294 * This version is for CPUs like P4 that don't have efficient micro
29295@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29296 */
29297 ENTRY(copy_user_generic_unrolled)
29298 CFI_STARTPROC
29299+ ASM_PAX_OPEN_USERLAND
29300 ASM_STAC
29301 cmpl $8,%edx
29302 jb 20f /* less then 8 bytes, go to byte copy loop */
29303@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29304 jnz 21b
29305 23: xor %eax,%eax
29306 ASM_CLAC
29307+ ASM_PAX_CLOSE_USERLAND
29308+ pax_force_retaddr
29309 ret
29310
29311 .section .fixup,"ax"
29312@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29313 */
29314 ENTRY(copy_user_generic_string)
29315 CFI_STARTPROC
29316+ ASM_PAX_OPEN_USERLAND
29317 ASM_STAC
29318 andl %edx,%edx
29319 jz 4f
29320@@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
29321 movsb
29322 4: xorl %eax,%eax
29323 ASM_CLAC
29324+ ASM_PAX_CLOSE_USERLAND
29325+ pax_force_retaddr
29326 ret
29327
29328 .section .fixup,"ax"
29329@@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
29330 */
29331 ENTRY(copy_user_enhanced_fast_string)
29332 CFI_STARTPROC
29333+ ASM_PAX_OPEN_USERLAND
29334 ASM_STAC
29335 andl %edx,%edx
29336 jz 2f
29337@@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
29338 movsb
29339 2: xorl %eax,%eax
29340 ASM_CLAC
29341+ ASM_PAX_CLOSE_USERLAND
29342+ pax_force_retaddr
29343 ret
29344
29345 .section .fixup,"ax"
29346diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
29347index 6a4f43c..c70fb52 100644
29348--- a/arch/x86/lib/copy_user_nocache_64.S
29349+++ b/arch/x86/lib/copy_user_nocache_64.S
29350@@ -8,6 +8,7 @@
29351
29352 #include <linux/linkage.h>
29353 #include <asm/dwarf2.h>
29354+#include <asm/alternative-asm.h>
29355
29356 #define FIX_ALIGNMENT 1
29357
29358@@ -16,6 +17,7 @@
29359 #include <asm/thread_info.h>
29360 #include <asm/asm.h>
29361 #include <asm/smap.h>
29362+#include <asm/pgtable.h>
29363
29364 .macro ALIGN_DESTINATION
29365 #ifdef FIX_ALIGNMENT
29366@@ -49,6 +51,16 @@
29367 */
29368 ENTRY(__copy_user_nocache)
29369 CFI_STARTPROC
29370+
29371+#ifdef CONFIG_PAX_MEMORY_UDEREF
29372+ mov pax_user_shadow_base,%rcx
29373+ cmp %rcx,%rsi
29374+ jae 1f
29375+ add %rcx,%rsi
29376+1:
29377+#endif
29378+
29379+ ASM_PAX_OPEN_USERLAND
29380 ASM_STAC
29381 cmpl $8,%edx
29382 jb 20f /* less then 8 bytes, go to byte copy loop */
29383@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
29384 jnz 21b
29385 23: xorl %eax,%eax
29386 ASM_CLAC
29387+ ASM_PAX_CLOSE_USERLAND
29388 sfence
29389+ pax_force_retaddr
29390 ret
29391
29392 .section .fixup,"ax"
29393diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
29394index 2419d5f..fe52d0e 100644
29395--- a/arch/x86/lib/csum-copy_64.S
29396+++ b/arch/x86/lib/csum-copy_64.S
29397@@ -9,6 +9,7 @@
29398 #include <asm/dwarf2.h>
29399 #include <asm/errno.h>
29400 #include <asm/asm.h>
29401+#include <asm/alternative-asm.h>
29402
29403 /*
29404 * Checksum copy with exception handling.
29405@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
29406 CFI_ADJUST_CFA_OFFSET 7*8
29407 movq %rbx, 2*8(%rsp)
29408 CFI_REL_OFFSET rbx, 2*8
29409- movq %r12, 3*8(%rsp)
29410- CFI_REL_OFFSET r12, 3*8
29411+ movq %r15, 3*8(%rsp)
29412+ CFI_REL_OFFSET r15, 3*8
29413 movq %r14, 4*8(%rsp)
29414 CFI_REL_OFFSET r14, 4*8
29415 movq %r13, 5*8(%rsp)
29416@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
29417 movl %edx, %ecx
29418
29419 xorl %r9d, %r9d
29420- movq %rcx, %r12
29421+ movq %rcx, %r15
29422
29423- shrq $6, %r12
29424+ shrq $6, %r15
29425 jz .Lhandle_tail /* < 64 */
29426
29427 clc
29428
29429 /* main loop. clear in 64 byte blocks */
29430 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
29431- /* r11: temp3, rdx: temp4, r12 loopcnt */
29432+ /* r11: temp3, rdx: temp4, r15 loopcnt */
29433 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
29434 .p2align 4
29435 .Lloop:
29436@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
29437 adcq %r14, %rax
29438 adcq %r13, %rax
29439
29440- decl %r12d
29441+ decl %r15d
29442
29443 dest
29444 movq %rbx, (%rsi)
29445@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
29446 .Lende:
29447 movq 2*8(%rsp), %rbx
29448 CFI_RESTORE rbx
29449- movq 3*8(%rsp), %r12
29450- CFI_RESTORE r12
29451+ movq 3*8(%rsp), %r15
29452+ CFI_RESTORE r15
29453 movq 4*8(%rsp), %r14
29454 CFI_RESTORE r14
29455 movq 5*8(%rsp), %r13
29456@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
29457 CFI_RESTORE rbp
29458 addq $7*8, %rsp
29459 CFI_ADJUST_CFA_OFFSET -7*8
29460+ pax_force_retaddr
29461 ret
29462 CFI_RESTORE_STATE
29463
29464diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
29465index 7609e0e..b449b98 100644
29466--- a/arch/x86/lib/csum-wrappers_64.c
29467+++ b/arch/x86/lib/csum-wrappers_64.c
29468@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
29469 len -= 2;
29470 }
29471 }
29472+ pax_open_userland();
29473 stac();
29474- isum = csum_partial_copy_generic((__force const void *)src,
29475+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
29476 dst, len, isum, errp, NULL);
29477 clac();
29478+ pax_close_userland();
29479 if (unlikely(*errp))
29480 goto out_err;
29481
29482@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
29483 }
29484
29485 *errp = 0;
29486+ pax_open_userland();
29487 stac();
29488- ret = csum_partial_copy_generic(src, (void __force *)dst,
29489+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
29490 len, isum, NULL, errp);
29491 clac();
29492+ pax_close_userland();
29493 return ret;
29494 }
29495 EXPORT_SYMBOL(csum_partial_copy_to_user);
29496diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
29497index a451235..1daa956 100644
29498--- a/arch/x86/lib/getuser.S
29499+++ b/arch/x86/lib/getuser.S
29500@@ -33,17 +33,40 @@
29501 #include <asm/thread_info.h>
29502 #include <asm/asm.h>
29503 #include <asm/smap.h>
29504+#include <asm/segment.h>
29505+#include <asm/pgtable.h>
29506+#include <asm/alternative-asm.h>
29507+
29508+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
29509+#define __copyuser_seg gs;
29510+#else
29511+#define __copyuser_seg
29512+#endif
29513
29514 .text
29515 ENTRY(__get_user_1)
29516 CFI_STARTPROC
29517+
29518+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29519 GET_THREAD_INFO(%_ASM_DX)
29520 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29521 jae bad_get_user
29522 ASM_STAC
29523-1: movzbl (%_ASM_AX),%edx
29524+
29525+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29526+ mov pax_user_shadow_base,%_ASM_DX
29527+ cmp %_ASM_DX,%_ASM_AX
29528+ jae 1234f
29529+ add %_ASM_DX,%_ASM_AX
29530+1234:
29531+#endif
29532+
29533+#endif
29534+
29535+1: __copyuser_seg movzbl (%_ASM_AX),%edx
29536 xor %eax,%eax
29537 ASM_CLAC
29538+ pax_force_retaddr
29539 ret
29540 CFI_ENDPROC
29541 ENDPROC(__get_user_1)
29542@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
29543 ENTRY(__get_user_2)
29544 CFI_STARTPROC
29545 add $1,%_ASM_AX
29546+
29547+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29548 jc bad_get_user
29549 GET_THREAD_INFO(%_ASM_DX)
29550 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29551 jae bad_get_user
29552 ASM_STAC
29553-2: movzwl -1(%_ASM_AX),%edx
29554+
29555+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29556+ mov pax_user_shadow_base,%_ASM_DX
29557+ cmp %_ASM_DX,%_ASM_AX
29558+ jae 1234f
29559+ add %_ASM_DX,%_ASM_AX
29560+1234:
29561+#endif
29562+
29563+#endif
29564+
29565+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
29566 xor %eax,%eax
29567 ASM_CLAC
29568+ pax_force_retaddr
29569 ret
29570 CFI_ENDPROC
29571 ENDPROC(__get_user_2)
29572@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
29573 ENTRY(__get_user_4)
29574 CFI_STARTPROC
29575 add $3,%_ASM_AX
29576+
29577+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29578 jc bad_get_user
29579 GET_THREAD_INFO(%_ASM_DX)
29580 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29581 jae bad_get_user
29582 ASM_STAC
29583-3: movl -3(%_ASM_AX),%edx
29584+
29585+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29586+ mov pax_user_shadow_base,%_ASM_DX
29587+ cmp %_ASM_DX,%_ASM_AX
29588+ jae 1234f
29589+ add %_ASM_DX,%_ASM_AX
29590+1234:
29591+#endif
29592+
29593+#endif
29594+
29595+3: __copyuser_seg movl -3(%_ASM_AX),%edx
29596 xor %eax,%eax
29597 ASM_CLAC
29598+ pax_force_retaddr
29599 ret
29600 CFI_ENDPROC
29601 ENDPROC(__get_user_4)
29602@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
29603 GET_THREAD_INFO(%_ASM_DX)
29604 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29605 jae bad_get_user
29606+
29607+#ifdef CONFIG_PAX_MEMORY_UDEREF
29608+ mov pax_user_shadow_base,%_ASM_DX
29609+ cmp %_ASM_DX,%_ASM_AX
29610+ jae 1234f
29611+ add %_ASM_DX,%_ASM_AX
29612+1234:
29613+#endif
29614+
29615 ASM_STAC
29616 4: movq -7(%_ASM_AX),%rdx
29617 xor %eax,%eax
29618 ASM_CLAC
29619+ pax_force_retaddr
29620 ret
29621 #else
29622 add $7,%_ASM_AX
29623@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
29624 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
29625 jae bad_get_user_8
29626 ASM_STAC
29627-4: movl -7(%_ASM_AX),%edx
29628-5: movl -3(%_ASM_AX),%ecx
29629+4: __copyuser_seg movl -7(%_ASM_AX),%edx
29630+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
29631 xor %eax,%eax
29632 ASM_CLAC
29633+ pax_force_retaddr
29634 ret
29635 #endif
29636 CFI_ENDPROC
29637@@ -113,6 +175,7 @@ bad_get_user:
29638 xor %edx,%edx
29639 mov $(-EFAULT),%_ASM_AX
29640 ASM_CLAC
29641+ pax_force_retaddr
29642 ret
29643 CFI_ENDPROC
29644 END(bad_get_user)
29645@@ -124,6 +187,7 @@ bad_get_user_8:
29646 xor %ecx,%ecx
29647 mov $(-EFAULT),%_ASM_AX
29648 ASM_CLAC
29649+ pax_force_retaddr
29650 ret
29651 CFI_ENDPROC
29652 END(bad_get_user_8)
29653diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
29654index 54fcffe..7be149e 100644
29655--- a/arch/x86/lib/insn.c
29656+++ b/arch/x86/lib/insn.c
29657@@ -20,8 +20,10 @@
29658
29659 #ifdef __KERNEL__
29660 #include <linux/string.h>
29661+#include <asm/pgtable_types.h>
29662 #else
29663 #include <string.h>
29664+#define ktla_ktva(addr) addr
29665 #endif
29666 #include <asm/inat.h>
29667 #include <asm/insn.h>
29668@@ -53,8 +55,8 @@
29669 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
29670 {
29671 memset(insn, 0, sizeof(*insn));
29672- insn->kaddr = kaddr;
29673- insn->next_byte = kaddr;
29674+ insn->kaddr = ktla_ktva(kaddr);
29675+ insn->next_byte = ktla_ktva(kaddr);
29676 insn->x86_64 = x86_64 ? 1 : 0;
29677 insn->opnd_bytes = 4;
29678 if (x86_64)
29679diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
29680index 05a95e7..326f2fa 100644
29681--- a/arch/x86/lib/iomap_copy_64.S
29682+++ b/arch/x86/lib/iomap_copy_64.S
29683@@ -17,6 +17,7 @@
29684
29685 #include <linux/linkage.h>
29686 #include <asm/dwarf2.h>
29687+#include <asm/alternative-asm.h>
29688
29689 /*
29690 * override generic version in lib/iomap_copy.c
29691@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
29692 CFI_STARTPROC
29693 movl %edx,%ecx
29694 rep movsd
29695+ pax_force_retaddr
29696 ret
29697 CFI_ENDPROC
29698 ENDPROC(__iowrite32_copy)
29699diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
29700index 56313a3..0db417e 100644
29701--- a/arch/x86/lib/memcpy_64.S
29702+++ b/arch/x86/lib/memcpy_64.S
29703@@ -24,7 +24,7 @@
29704 * This gets patched over the unrolled variant (below) via the
29705 * alternative instructions framework:
29706 */
29707- .section .altinstr_replacement, "ax", @progbits
29708+ .section .altinstr_replacement, "a", @progbits
29709 .Lmemcpy_c:
29710 movq %rdi, %rax
29711 movq %rdx, %rcx
29712@@ -33,6 +33,7 @@
29713 rep movsq
29714 movl %edx, %ecx
29715 rep movsb
29716+ pax_force_retaddr
29717 ret
29718 .Lmemcpy_e:
29719 .previous
29720@@ -44,11 +45,12 @@
29721 * This gets patched over the unrolled variant (below) via the
29722 * alternative instructions framework:
29723 */
29724- .section .altinstr_replacement, "ax", @progbits
29725+ .section .altinstr_replacement, "a", @progbits
29726 .Lmemcpy_c_e:
29727 movq %rdi, %rax
29728 movq %rdx, %rcx
29729 rep movsb
29730+ pax_force_retaddr
29731 ret
29732 .Lmemcpy_e_e:
29733 .previous
29734@@ -136,6 +138,7 @@ ENTRY(memcpy)
29735 movq %r9, 1*8(%rdi)
29736 movq %r10, -2*8(%rdi, %rdx)
29737 movq %r11, -1*8(%rdi, %rdx)
29738+ pax_force_retaddr
29739 retq
29740 .p2align 4
29741 .Lless_16bytes:
29742@@ -148,6 +151,7 @@ ENTRY(memcpy)
29743 movq -1*8(%rsi, %rdx), %r9
29744 movq %r8, 0*8(%rdi)
29745 movq %r9, -1*8(%rdi, %rdx)
29746+ pax_force_retaddr
29747 retq
29748 .p2align 4
29749 .Lless_8bytes:
29750@@ -161,6 +165,7 @@ ENTRY(memcpy)
29751 movl -4(%rsi, %rdx), %r8d
29752 movl %ecx, (%rdi)
29753 movl %r8d, -4(%rdi, %rdx)
29754+ pax_force_retaddr
29755 retq
29756 .p2align 4
29757 .Lless_3bytes:
29758@@ -179,6 +184,7 @@ ENTRY(memcpy)
29759 movb %cl, (%rdi)
29760
29761 .Lend:
29762+ pax_force_retaddr
29763 retq
29764 CFI_ENDPROC
29765 ENDPROC(memcpy)
29766diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
29767index 65268a6..dd1de11 100644
29768--- a/arch/x86/lib/memmove_64.S
29769+++ b/arch/x86/lib/memmove_64.S
29770@@ -202,14 +202,16 @@ ENTRY(memmove)
29771 movb (%rsi), %r11b
29772 movb %r11b, (%rdi)
29773 13:
29774+ pax_force_retaddr
29775 retq
29776 CFI_ENDPROC
29777
29778- .section .altinstr_replacement,"ax"
29779+ .section .altinstr_replacement,"a"
29780 .Lmemmove_begin_forward_efs:
29781 /* Forward moving data. */
29782 movq %rdx, %rcx
29783 rep movsb
29784+ pax_force_retaddr
29785 retq
29786 .Lmemmove_end_forward_efs:
29787 .previous
29788diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
29789index 2dcb380..2eb79fe 100644
29790--- a/arch/x86/lib/memset_64.S
29791+++ b/arch/x86/lib/memset_64.S
29792@@ -16,7 +16,7 @@
29793 *
29794 * rax original destination
29795 */
29796- .section .altinstr_replacement, "ax", @progbits
29797+ .section .altinstr_replacement, "a", @progbits
29798 .Lmemset_c:
29799 movq %rdi,%r9
29800 movq %rdx,%rcx
29801@@ -30,6 +30,7 @@
29802 movl %edx,%ecx
29803 rep stosb
29804 movq %r9,%rax
29805+ pax_force_retaddr
29806 ret
29807 .Lmemset_e:
29808 .previous
29809@@ -45,13 +46,14 @@
29810 *
29811 * rax original destination
29812 */
29813- .section .altinstr_replacement, "ax", @progbits
29814+ .section .altinstr_replacement, "a", @progbits
29815 .Lmemset_c_e:
29816 movq %rdi,%r9
29817 movb %sil,%al
29818 movq %rdx,%rcx
29819 rep stosb
29820 movq %r9,%rax
29821+ pax_force_retaddr
29822 ret
29823 .Lmemset_e_e:
29824 .previous
29825@@ -118,6 +120,7 @@ ENTRY(__memset)
29826
29827 .Lende:
29828 movq %r10,%rax
29829+ pax_force_retaddr
29830 ret
29831
29832 CFI_RESTORE_STATE
29833diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
29834index c9f2d9b..e7fd2c0 100644
29835--- a/arch/x86/lib/mmx_32.c
29836+++ b/arch/x86/lib/mmx_32.c
29837@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
29838 {
29839 void *p;
29840 int i;
29841+ unsigned long cr0;
29842
29843 if (unlikely(in_interrupt()))
29844 return __memcpy(to, from, len);
29845@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
29846 kernel_fpu_begin();
29847
29848 __asm__ __volatile__ (
29849- "1: prefetch (%0)\n" /* This set is 28 bytes */
29850- " prefetch 64(%0)\n"
29851- " prefetch 128(%0)\n"
29852- " prefetch 192(%0)\n"
29853- " prefetch 256(%0)\n"
29854+ "1: prefetch (%1)\n" /* This set is 28 bytes */
29855+ " prefetch 64(%1)\n"
29856+ " prefetch 128(%1)\n"
29857+ " prefetch 192(%1)\n"
29858+ " prefetch 256(%1)\n"
29859 "2: \n"
29860 ".section .fixup, \"ax\"\n"
29861- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29862+ "3: \n"
29863+
29864+#ifdef CONFIG_PAX_KERNEXEC
29865+ " movl %%cr0, %0\n"
29866+ " movl %0, %%eax\n"
29867+ " andl $0xFFFEFFFF, %%eax\n"
29868+ " movl %%eax, %%cr0\n"
29869+#endif
29870+
29871+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29872+
29873+#ifdef CONFIG_PAX_KERNEXEC
29874+ " movl %0, %%cr0\n"
29875+#endif
29876+
29877 " jmp 2b\n"
29878 ".previous\n"
29879 _ASM_EXTABLE(1b, 3b)
29880- : : "r" (from));
29881+ : "=&r" (cr0) : "r" (from) : "ax");
29882
29883 for ( ; i > 5; i--) {
29884 __asm__ __volatile__ (
29885- "1: prefetch 320(%0)\n"
29886- "2: movq (%0), %%mm0\n"
29887- " movq 8(%0), %%mm1\n"
29888- " movq 16(%0), %%mm2\n"
29889- " movq 24(%0), %%mm3\n"
29890- " movq %%mm0, (%1)\n"
29891- " movq %%mm1, 8(%1)\n"
29892- " movq %%mm2, 16(%1)\n"
29893- " movq %%mm3, 24(%1)\n"
29894- " movq 32(%0), %%mm0\n"
29895- " movq 40(%0), %%mm1\n"
29896- " movq 48(%0), %%mm2\n"
29897- " movq 56(%0), %%mm3\n"
29898- " movq %%mm0, 32(%1)\n"
29899- " movq %%mm1, 40(%1)\n"
29900- " movq %%mm2, 48(%1)\n"
29901- " movq %%mm3, 56(%1)\n"
29902+ "1: prefetch 320(%1)\n"
29903+ "2: movq (%1), %%mm0\n"
29904+ " movq 8(%1), %%mm1\n"
29905+ " movq 16(%1), %%mm2\n"
29906+ " movq 24(%1), %%mm3\n"
29907+ " movq %%mm0, (%2)\n"
29908+ " movq %%mm1, 8(%2)\n"
29909+ " movq %%mm2, 16(%2)\n"
29910+ " movq %%mm3, 24(%2)\n"
29911+ " movq 32(%1), %%mm0\n"
29912+ " movq 40(%1), %%mm1\n"
29913+ " movq 48(%1), %%mm2\n"
29914+ " movq 56(%1), %%mm3\n"
29915+ " movq %%mm0, 32(%2)\n"
29916+ " movq %%mm1, 40(%2)\n"
29917+ " movq %%mm2, 48(%2)\n"
29918+ " movq %%mm3, 56(%2)\n"
29919 ".section .fixup, \"ax\"\n"
29920- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29921+ "3:\n"
29922+
29923+#ifdef CONFIG_PAX_KERNEXEC
29924+ " movl %%cr0, %0\n"
29925+ " movl %0, %%eax\n"
29926+ " andl $0xFFFEFFFF, %%eax\n"
29927+ " movl %%eax, %%cr0\n"
29928+#endif
29929+
29930+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29931+
29932+#ifdef CONFIG_PAX_KERNEXEC
29933+ " movl %0, %%cr0\n"
29934+#endif
29935+
29936 " jmp 2b\n"
29937 ".previous\n"
29938 _ASM_EXTABLE(1b, 3b)
29939- : : "r" (from), "r" (to) : "memory");
29940+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29941
29942 from += 64;
29943 to += 64;
29944@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
29945 static void fast_copy_page(void *to, void *from)
29946 {
29947 int i;
29948+ unsigned long cr0;
29949
29950 kernel_fpu_begin();
29951
29952@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
29953 * but that is for later. -AV
29954 */
29955 __asm__ __volatile__(
29956- "1: prefetch (%0)\n"
29957- " prefetch 64(%0)\n"
29958- " prefetch 128(%0)\n"
29959- " prefetch 192(%0)\n"
29960- " prefetch 256(%0)\n"
29961+ "1: prefetch (%1)\n"
29962+ " prefetch 64(%1)\n"
29963+ " prefetch 128(%1)\n"
29964+ " prefetch 192(%1)\n"
29965+ " prefetch 256(%1)\n"
29966 "2: \n"
29967 ".section .fixup, \"ax\"\n"
29968- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29969+ "3: \n"
29970+
29971+#ifdef CONFIG_PAX_KERNEXEC
29972+ " movl %%cr0, %0\n"
29973+ " movl %0, %%eax\n"
29974+ " andl $0xFFFEFFFF, %%eax\n"
29975+ " movl %%eax, %%cr0\n"
29976+#endif
29977+
29978+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29979+
29980+#ifdef CONFIG_PAX_KERNEXEC
29981+ " movl %0, %%cr0\n"
29982+#endif
29983+
29984 " jmp 2b\n"
29985 ".previous\n"
29986- _ASM_EXTABLE(1b, 3b) : : "r" (from));
29987+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
29988
29989 for (i = 0; i < (4096-320)/64; i++) {
29990 __asm__ __volatile__ (
29991- "1: prefetch 320(%0)\n"
29992- "2: movq (%0), %%mm0\n"
29993- " movntq %%mm0, (%1)\n"
29994- " movq 8(%0), %%mm1\n"
29995- " movntq %%mm1, 8(%1)\n"
29996- " movq 16(%0), %%mm2\n"
29997- " movntq %%mm2, 16(%1)\n"
29998- " movq 24(%0), %%mm3\n"
29999- " movntq %%mm3, 24(%1)\n"
30000- " movq 32(%0), %%mm4\n"
30001- " movntq %%mm4, 32(%1)\n"
30002- " movq 40(%0), %%mm5\n"
30003- " movntq %%mm5, 40(%1)\n"
30004- " movq 48(%0), %%mm6\n"
30005- " movntq %%mm6, 48(%1)\n"
30006- " movq 56(%0), %%mm7\n"
30007- " movntq %%mm7, 56(%1)\n"
30008+ "1: prefetch 320(%1)\n"
30009+ "2: movq (%1), %%mm0\n"
30010+ " movntq %%mm0, (%2)\n"
30011+ " movq 8(%1), %%mm1\n"
30012+ " movntq %%mm1, 8(%2)\n"
30013+ " movq 16(%1), %%mm2\n"
30014+ " movntq %%mm2, 16(%2)\n"
30015+ " movq 24(%1), %%mm3\n"
30016+ " movntq %%mm3, 24(%2)\n"
30017+ " movq 32(%1), %%mm4\n"
30018+ " movntq %%mm4, 32(%2)\n"
30019+ " movq 40(%1), %%mm5\n"
30020+ " movntq %%mm5, 40(%2)\n"
30021+ " movq 48(%1), %%mm6\n"
30022+ " movntq %%mm6, 48(%2)\n"
30023+ " movq 56(%1), %%mm7\n"
30024+ " movntq %%mm7, 56(%2)\n"
30025 ".section .fixup, \"ax\"\n"
30026- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30027+ "3:\n"
30028+
30029+#ifdef CONFIG_PAX_KERNEXEC
30030+ " movl %%cr0, %0\n"
30031+ " movl %0, %%eax\n"
30032+ " andl $0xFFFEFFFF, %%eax\n"
30033+ " movl %%eax, %%cr0\n"
30034+#endif
30035+
30036+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30037+
30038+#ifdef CONFIG_PAX_KERNEXEC
30039+ " movl %0, %%cr0\n"
30040+#endif
30041+
30042 " jmp 2b\n"
30043 ".previous\n"
30044- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30045+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30046
30047 from += 64;
30048 to += 64;
30049@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30050 static void fast_copy_page(void *to, void *from)
30051 {
30052 int i;
30053+ unsigned long cr0;
30054
30055 kernel_fpu_begin();
30056
30057 __asm__ __volatile__ (
30058- "1: prefetch (%0)\n"
30059- " prefetch 64(%0)\n"
30060- " prefetch 128(%0)\n"
30061- " prefetch 192(%0)\n"
30062- " prefetch 256(%0)\n"
30063+ "1: prefetch (%1)\n"
30064+ " prefetch 64(%1)\n"
30065+ " prefetch 128(%1)\n"
30066+ " prefetch 192(%1)\n"
30067+ " prefetch 256(%1)\n"
30068 "2: \n"
30069 ".section .fixup, \"ax\"\n"
30070- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30071+ "3: \n"
30072+
30073+#ifdef CONFIG_PAX_KERNEXEC
30074+ " movl %%cr0, %0\n"
30075+ " movl %0, %%eax\n"
30076+ " andl $0xFFFEFFFF, %%eax\n"
30077+ " movl %%eax, %%cr0\n"
30078+#endif
30079+
30080+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30081+
30082+#ifdef CONFIG_PAX_KERNEXEC
30083+ " movl %0, %%cr0\n"
30084+#endif
30085+
30086 " jmp 2b\n"
30087 ".previous\n"
30088- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30089+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30090
30091 for (i = 0; i < 4096/64; i++) {
30092 __asm__ __volatile__ (
30093- "1: prefetch 320(%0)\n"
30094- "2: movq (%0), %%mm0\n"
30095- " movq 8(%0), %%mm1\n"
30096- " movq 16(%0), %%mm2\n"
30097- " movq 24(%0), %%mm3\n"
30098- " movq %%mm0, (%1)\n"
30099- " movq %%mm1, 8(%1)\n"
30100- " movq %%mm2, 16(%1)\n"
30101- " movq %%mm3, 24(%1)\n"
30102- " movq 32(%0), %%mm0\n"
30103- " movq 40(%0), %%mm1\n"
30104- " movq 48(%0), %%mm2\n"
30105- " movq 56(%0), %%mm3\n"
30106- " movq %%mm0, 32(%1)\n"
30107- " movq %%mm1, 40(%1)\n"
30108- " movq %%mm2, 48(%1)\n"
30109- " movq %%mm3, 56(%1)\n"
30110+ "1: prefetch 320(%1)\n"
30111+ "2: movq (%1), %%mm0\n"
30112+ " movq 8(%1), %%mm1\n"
30113+ " movq 16(%1), %%mm2\n"
30114+ " movq 24(%1), %%mm3\n"
30115+ " movq %%mm0, (%2)\n"
30116+ " movq %%mm1, 8(%2)\n"
30117+ " movq %%mm2, 16(%2)\n"
30118+ " movq %%mm3, 24(%2)\n"
30119+ " movq 32(%1), %%mm0\n"
30120+ " movq 40(%1), %%mm1\n"
30121+ " movq 48(%1), %%mm2\n"
30122+ " movq 56(%1), %%mm3\n"
30123+ " movq %%mm0, 32(%2)\n"
30124+ " movq %%mm1, 40(%2)\n"
30125+ " movq %%mm2, 48(%2)\n"
30126+ " movq %%mm3, 56(%2)\n"
30127 ".section .fixup, \"ax\"\n"
30128- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30129+ "3:\n"
30130+
30131+#ifdef CONFIG_PAX_KERNEXEC
30132+ " movl %%cr0, %0\n"
30133+ " movl %0, %%eax\n"
30134+ " andl $0xFFFEFFFF, %%eax\n"
30135+ " movl %%eax, %%cr0\n"
30136+#endif
30137+
30138+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30139+
30140+#ifdef CONFIG_PAX_KERNEXEC
30141+ " movl %0, %%cr0\n"
30142+#endif
30143+
30144 " jmp 2b\n"
30145 ".previous\n"
30146 _ASM_EXTABLE(1b, 3b)
30147- : : "r" (from), "r" (to) : "memory");
30148+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30149
30150 from += 64;
30151 to += 64;
30152diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30153index f6d13ee..d789440 100644
30154--- a/arch/x86/lib/msr-reg.S
30155+++ b/arch/x86/lib/msr-reg.S
30156@@ -3,6 +3,7 @@
30157 #include <asm/dwarf2.h>
30158 #include <asm/asm.h>
30159 #include <asm/msr.h>
30160+#include <asm/alternative-asm.h>
30161
30162 #ifdef CONFIG_X86_64
30163 /*
30164@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30165 movl %edi, 28(%r10)
30166 popq_cfi %rbp
30167 popq_cfi %rbx
30168+ pax_force_retaddr
30169 ret
30170 3:
30171 CFI_RESTORE_STATE
30172diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30173index fc6ba17..d4d989d 100644
30174--- a/arch/x86/lib/putuser.S
30175+++ b/arch/x86/lib/putuser.S
30176@@ -16,7 +16,9 @@
30177 #include <asm/errno.h>
30178 #include <asm/asm.h>
30179 #include <asm/smap.h>
30180-
30181+#include <asm/segment.h>
30182+#include <asm/pgtable.h>
30183+#include <asm/alternative-asm.h>
30184
30185 /*
30186 * __put_user_X
30187@@ -30,57 +32,125 @@
30188 * as they get called from within inline assembly.
30189 */
30190
30191-#define ENTER CFI_STARTPROC ; \
30192- GET_THREAD_INFO(%_ASM_BX)
30193-#define EXIT ASM_CLAC ; \
30194- ret ; \
30195+#define ENTER CFI_STARTPROC
30196+#define EXIT ASM_CLAC ; \
30197+ pax_force_retaddr ; \
30198+ ret ; \
30199 CFI_ENDPROC
30200
30201+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30202+#define _DEST %_ASM_CX,%_ASM_BX
30203+#else
30204+#define _DEST %_ASM_CX
30205+#endif
30206+
30207+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30208+#define __copyuser_seg gs;
30209+#else
30210+#define __copyuser_seg
30211+#endif
30212+
30213 .text
30214 ENTRY(__put_user_1)
30215 ENTER
30216+
30217+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30218+ GET_THREAD_INFO(%_ASM_BX)
30219 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30220 jae bad_put_user
30221 ASM_STAC
30222-1: movb %al,(%_ASM_CX)
30223+
30224+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30225+ mov pax_user_shadow_base,%_ASM_BX
30226+ cmp %_ASM_BX,%_ASM_CX
30227+ jb 1234f
30228+ xor %ebx,%ebx
30229+1234:
30230+#endif
30231+
30232+#endif
30233+
30234+1: __copyuser_seg movb %al,(_DEST)
30235 xor %eax,%eax
30236 EXIT
30237 ENDPROC(__put_user_1)
30238
30239 ENTRY(__put_user_2)
30240 ENTER
30241+
30242+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30243+ GET_THREAD_INFO(%_ASM_BX)
30244 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30245 sub $1,%_ASM_BX
30246 cmp %_ASM_BX,%_ASM_CX
30247 jae bad_put_user
30248 ASM_STAC
30249-2: movw %ax,(%_ASM_CX)
30250+
30251+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30252+ mov pax_user_shadow_base,%_ASM_BX
30253+ cmp %_ASM_BX,%_ASM_CX
30254+ jb 1234f
30255+ xor %ebx,%ebx
30256+1234:
30257+#endif
30258+
30259+#endif
30260+
30261+2: __copyuser_seg movw %ax,(_DEST)
30262 xor %eax,%eax
30263 EXIT
30264 ENDPROC(__put_user_2)
30265
30266 ENTRY(__put_user_4)
30267 ENTER
30268+
30269+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30270+ GET_THREAD_INFO(%_ASM_BX)
30271 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30272 sub $3,%_ASM_BX
30273 cmp %_ASM_BX,%_ASM_CX
30274 jae bad_put_user
30275 ASM_STAC
30276-3: movl %eax,(%_ASM_CX)
30277+
30278+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30279+ mov pax_user_shadow_base,%_ASM_BX
30280+ cmp %_ASM_BX,%_ASM_CX
30281+ jb 1234f
30282+ xor %ebx,%ebx
30283+1234:
30284+#endif
30285+
30286+#endif
30287+
30288+3: __copyuser_seg movl %eax,(_DEST)
30289 xor %eax,%eax
30290 EXIT
30291 ENDPROC(__put_user_4)
30292
30293 ENTRY(__put_user_8)
30294 ENTER
30295+
30296+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30297+ GET_THREAD_INFO(%_ASM_BX)
30298 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30299 sub $7,%_ASM_BX
30300 cmp %_ASM_BX,%_ASM_CX
30301 jae bad_put_user
30302 ASM_STAC
30303-4: mov %_ASM_AX,(%_ASM_CX)
30304+
30305+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30306+ mov pax_user_shadow_base,%_ASM_BX
30307+ cmp %_ASM_BX,%_ASM_CX
30308+ jb 1234f
30309+ xor %ebx,%ebx
30310+1234:
30311+#endif
30312+
30313+#endif
30314+
30315+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30316 #ifdef CONFIG_X86_32
30317-5: movl %edx,4(%_ASM_CX)
30318+5: __copyuser_seg movl %edx,4(_DEST)
30319 #endif
30320 xor %eax,%eax
30321 EXIT
30322diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
30323index 1cad221..de671ee 100644
30324--- a/arch/x86/lib/rwlock.S
30325+++ b/arch/x86/lib/rwlock.S
30326@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
30327 FRAME
30328 0: LOCK_PREFIX
30329 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
30330+
30331+#ifdef CONFIG_PAX_REFCOUNT
30332+ jno 1234f
30333+ LOCK_PREFIX
30334+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
30335+ int $4
30336+1234:
30337+ _ASM_EXTABLE(1234b, 1234b)
30338+#endif
30339+
30340 1: rep; nop
30341 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
30342 jne 1b
30343 LOCK_PREFIX
30344 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
30345+
30346+#ifdef CONFIG_PAX_REFCOUNT
30347+ jno 1234f
30348+ LOCK_PREFIX
30349+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
30350+ int $4
30351+1234:
30352+ _ASM_EXTABLE(1234b, 1234b)
30353+#endif
30354+
30355 jnz 0b
30356 ENDFRAME
30357+ pax_force_retaddr
30358 ret
30359 CFI_ENDPROC
30360 END(__write_lock_failed)
30361@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
30362 FRAME
30363 0: LOCK_PREFIX
30364 READ_LOCK_SIZE(inc) (%__lock_ptr)
30365+
30366+#ifdef CONFIG_PAX_REFCOUNT
30367+ jno 1234f
30368+ LOCK_PREFIX
30369+ READ_LOCK_SIZE(dec) (%__lock_ptr)
30370+ int $4
30371+1234:
30372+ _ASM_EXTABLE(1234b, 1234b)
30373+#endif
30374+
30375 1: rep; nop
30376 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
30377 js 1b
30378 LOCK_PREFIX
30379 READ_LOCK_SIZE(dec) (%__lock_ptr)
30380+
30381+#ifdef CONFIG_PAX_REFCOUNT
30382+ jno 1234f
30383+ LOCK_PREFIX
30384+ READ_LOCK_SIZE(inc) (%__lock_ptr)
30385+ int $4
30386+1234:
30387+ _ASM_EXTABLE(1234b, 1234b)
30388+#endif
30389+
30390 js 0b
30391 ENDFRAME
30392+ pax_force_retaddr
30393 ret
30394 CFI_ENDPROC
30395 END(__read_lock_failed)
30396diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30397index 5dff5f0..cadebf4 100644
30398--- a/arch/x86/lib/rwsem.S
30399+++ b/arch/x86/lib/rwsem.S
30400@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30401 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30402 CFI_RESTORE __ASM_REG(dx)
30403 restore_common_regs
30404+ pax_force_retaddr
30405 ret
30406 CFI_ENDPROC
30407 ENDPROC(call_rwsem_down_read_failed)
30408@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
30409 movq %rax,%rdi
30410 call rwsem_down_write_failed
30411 restore_common_regs
30412+ pax_force_retaddr
30413 ret
30414 CFI_ENDPROC
30415 ENDPROC(call_rwsem_down_write_failed)
30416@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
30417 movq %rax,%rdi
30418 call rwsem_wake
30419 restore_common_regs
30420-1: ret
30421+1: pax_force_retaddr
30422+ ret
30423 CFI_ENDPROC
30424 ENDPROC(call_rwsem_wake)
30425
30426@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
30427 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30428 CFI_RESTORE __ASM_REG(dx)
30429 restore_common_regs
30430+ pax_force_retaddr
30431 ret
30432 CFI_ENDPROC
30433 ENDPROC(call_rwsem_downgrade_wake)
30434diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
30435index a63efd6..8149fbe 100644
30436--- a/arch/x86/lib/thunk_64.S
30437+++ b/arch/x86/lib/thunk_64.S
30438@@ -8,6 +8,7 @@
30439 #include <linux/linkage.h>
30440 #include <asm/dwarf2.h>
30441 #include <asm/calling.h>
30442+#include <asm/alternative-asm.h>
30443
30444 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
30445 .macro THUNK name, func, put_ret_addr_in_rdi=0
30446@@ -15,11 +16,11 @@
30447 \name:
30448 CFI_STARTPROC
30449
30450- /* this one pushes 9 elems, the next one would be %rIP */
30451- SAVE_ARGS
30452+ /* this one pushes 15+1 elems, the next one would be %rIP */
30453+ SAVE_ARGS 8
30454
30455 .if \put_ret_addr_in_rdi
30456- movq_cfi_restore 9*8, rdi
30457+ movq_cfi_restore RIP, rdi
30458 .endif
30459
30460 call \func
30461@@ -38,8 +39,9 @@
30462
30463 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
30464 CFI_STARTPROC
30465- SAVE_ARGS
30466+ SAVE_ARGS 8
30467 restore:
30468- RESTORE_ARGS
30469+ RESTORE_ARGS 1,8
30470+ pax_force_retaddr
30471 ret
30472 CFI_ENDPROC
30473diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
30474index e2f5e21..4b22130 100644
30475--- a/arch/x86/lib/usercopy_32.c
30476+++ b/arch/x86/lib/usercopy_32.c
30477@@ -42,11 +42,13 @@ do { \
30478 int __d0; \
30479 might_fault(); \
30480 __asm__ __volatile__( \
30481+ __COPYUSER_SET_ES \
30482 ASM_STAC "\n" \
30483 "0: rep; stosl\n" \
30484 " movl %2,%0\n" \
30485 "1: rep; stosb\n" \
30486 "2: " ASM_CLAC "\n" \
30487+ __COPYUSER_RESTORE_ES \
30488 ".section .fixup,\"ax\"\n" \
30489 "3: lea 0(%2,%0,4),%0\n" \
30490 " jmp 2b\n" \
30491@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
30492
30493 #ifdef CONFIG_X86_INTEL_USERCOPY
30494 static unsigned long
30495-__copy_user_intel(void __user *to, const void *from, unsigned long size)
30496+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
30497 {
30498 int d0, d1;
30499 __asm__ __volatile__(
30500@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
30501 " .align 2,0x90\n"
30502 "3: movl 0(%4), %%eax\n"
30503 "4: movl 4(%4), %%edx\n"
30504- "5: movl %%eax, 0(%3)\n"
30505- "6: movl %%edx, 4(%3)\n"
30506+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
30507+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
30508 "7: movl 8(%4), %%eax\n"
30509 "8: movl 12(%4),%%edx\n"
30510- "9: movl %%eax, 8(%3)\n"
30511- "10: movl %%edx, 12(%3)\n"
30512+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
30513+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
30514 "11: movl 16(%4), %%eax\n"
30515 "12: movl 20(%4), %%edx\n"
30516- "13: movl %%eax, 16(%3)\n"
30517- "14: movl %%edx, 20(%3)\n"
30518+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
30519+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
30520 "15: movl 24(%4), %%eax\n"
30521 "16: movl 28(%4), %%edx\n"
30522- "17: movl %%eax, 24(%3)\n"
30523- "18: movl %%edx, 28(%3)\n"
30524+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
30525+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
30526 "19: movl 32(%4), %%eax\n"
30527 "20: movl 36(%4), %%edx\n"
30528- "21: movl %%eax, 32(%3)\n"
30529- "22: movl %%edx, 36(%3)\n"
30530+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
30531+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
30532 "23: movl 40(%4), %%eax\n"
30533 "24: movl 44(%4), %%edx\n"
30534- "25: movl %%eax, 40(%3)\n"
30535- "26: movl %%edx, 44(%3)\n"
30536+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
30537+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
30538 "27: movl 48(%4), %%eax\n"
30539 "28: movl 52(%4), %%edx\n"
30540- "29: movl %%eax, 48(%3)\n"
30541- "30: movl %%edx, 52(%3)\n"
30542+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
30543+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
30544 "31: movl 56(%4), %%eax\n"
30545 "32: movl 60(%4), %%edx\n"
30546- "33: movl %%eax, 56(%3)\n"
30547- "34: movl %%edx, 60(%3)\n"
30548+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
30549+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
30550 " addl $-64, %0\n"
30551 " addl $64, %4\n"
30552 " addl $64, %3\n"
30553@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
30554 " shrl $2, %0\n"
30555 " andl $3, %%eax\n"
30556 " cld\n"
30557+ __COPYUSER_SET_ES
30558 "99: rep; movsl\n"
30559 "36: movl %%eax, %0\n"
30560 "37: rep; movsb\n"
30561 "100:\n"
30562+ __COPYUSER_RESTORE_ES
30563+ ".section .fixup,\"ax\"\n"
30564+ "101: lea 0(%%eax,%0,4),%0\n"
30565+ " jmp 100b\n"
30566+ ".previous\n"
30567+ _ASM_EXTABLE(1b,100b)
30568+ _ASM_EXTABLE(2b,100b)
30569+ _ASM_EXTABLE(3b,100b)
30570+ _ASM_EXTABLE(4b,100b)
30571+ _ASM_EXTABLE(5b,100b)
30572+ _ASM_EXTABLE(6b,100b)
30573+ _ASM_EXTABLE(7b,100b)
30574+ _ASM_EXTABLE(8b,100b)
30575+ _ASM_EXTABLE(9b,100b)
30576+ _ASM_EXTABLE(10b,100b)
30577+ _ASM_EXTABLE(11b,100b)
30578+ _ASM_EXTABLE(12b,100b)
30579+ _ASM_EXTABLE(13b,100b)
30580+ _ASM_EXTABLE(14b,100b)
30581+ _ASM_EXTABLE(15b,100b)
30582+ _ASM_EXTABLE(16b,100b)
30583+ _ASM_EXTABLE(17b,100b)
30584+ _ASM_EXTABLE(18b,100b)
30585+ _ASM_EXTABLE(19b,100b)
30586+ _ASM_EXTABLE(20b,100b)
30587+ _ASM_EXTABLE(21b,100b)
30588+ _ASM_EXTABLE(22b,100b)
30589+ _ASM_EXTABLE(23b,100b)
30590+ _ASM_EXTABLE(24b,100b)
30591+ _ASM_EXTABLE(25b,100b)
30592+ _ASM_EXTABLE(26b,100b)
30593+ _ASM_EXTABLE(27b,100b)
30594+ _ASM_EXTABLE(28b,100b)
30595+ _ASM_EXTABLE(29b,100b)
30596+ _ASM_EXTABLE(30b,100b)
30597+ _ASM_EXTABLE(31b,100b)
30598+ _ASM_EXTABLE(32b,100b)
30599+ _ASM_EXTABLE(33b,100b)
30600+ _ASM_EXTABLE(34b,100b)
30601+ _ASM_EXTABLE(35b,100b)
30602+ _ASM_EXTABLE(36b,100b)
30603+ _ASM_EXTABLE(37b,100b)
30604+ _ASM_EXTABLE(99b,101b)
30605+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
30606+ : "1"(to), "2"(from), "0"(size)
30607+ : "eax", "edx", "memory");
30608+ return size;
30609+}
30610+
30611+static unsigned long
30612+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
30613+{
30614+ int d0, d1;
30615+ __asm__ __volatile__(
30616+ " .align 2,0x90\n"
30617+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
30618+ " cmpl $67, %0\n"
30619+ " jbe 3f\n"
30620+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
30621+ " .align 2,0x90\n"
30622+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
30623+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
30624+ "5: movl %%eax, 0(%3)\n"
30625+ "6: movl %%edx, 4(%3)\n"
30626+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
30627+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
30628+ "9: movl %%eax, 8(%3)\n"
30629+ "10: movl %%edx, 12(%3)\n"
30630+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
30631+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
30632+ "13: movl %%eax, 16(%3)\n"
30633+ "14: movl %%edx, 20(%3)\n"
30634+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
30635+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
30636+ "17: movl %%eax, 24(%3)\n"
30637+ "18: movl %%edx, 28(%3)\n"
30638+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
30639+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
30640+ "21: movl %%eax, 32(%3)\n"
30641+ "22: movl %%edx, 36(%3)\n"
30642+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
30643+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
30644+ "25: movl %%eax, 40(%3)\n"
30645+ "26: movl %%edx, 44(%3)\n"
30646+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
30647+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
30648+ "29: movl %%eax, 48(%3)\n"
30649+ "30: movl %%edx, 52(%3)\n"
30650+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
30651+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
30652+ "33: movl %%eax, 56(%3)\n"
30653+ "34: movl %%edx, 60(%3)\n"
30654+ " addl $-64, %0\n"
30655+ " addl $64, %4\n"
30656+ " addl $64, %3\n"
30657+ " cmpl $63, %0\n"
30658+ " ja 1b\n"
30659+ "35: movl %0, %%eax\n"
30660+ " shrl $2, %0\n"
30661+ " andl $3, %%eax\n"
30662+ " cld\n"
30663+ "99: rep; "__copyuser_seg" movsl\n"
30664+ "36: movl %%eax, %0\n"
30665+ "37: rep; "__copyuser_seg" movsb\n"
30666+ "100:\n"
30667 ".section .fixup,\"ax\"\n"
30668 "101: lea 0(%%eax,%0,4),%0\n"
30669 " jmp 100b\n"
30670@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
30671 int d0, d1;
30672 __asm__ __volatile__(
30673 " .align 2,0x90\n"
30674- "0: movl 32(%4), %%eax\n"
30675+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30676 " cmpl $67, %0\n"
30677 " jbe 2f\n"
30678- "1: movl 64(%4), %%eax\n"
30679+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30680 " .align 2,0x90\n"
30681- "2: movl 0(%4), %%eax\n"
30682- "21: movl 4(%4), %%edx\n"
30683+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30684+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30685 " movl %%eax, 0(%3)\n"
30686 " movl %%edx, 4(%3)\n"
30687- "3: movl 8(%4), %%eax\n"
30688- "31: movl 12(%4),%%edx\n"
30689+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30690+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30691 " movl %%eax, 8(%3)\n"
30692 " movl %%edx, 12(%3)\n"
30693- "4: movl 16(%4), %%eax\n"
30694- "41: movl 20(%4), %%edx\n"
30695+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30696+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30697 " movl %%eax, 16(%3)\n"
30698 " movl %%edx, 20(%3)\n"
30699- "10: movl 24(%4), %%eax\n"
30700- "51: movl 28(%4), %%edx\n"
30701+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30702+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30703 " movl %%eax, 24(%3)\n"
30704 " movl %%edx, 28(%3)\n"
30705- "11: movl 32(%4), %%eax\n"
30706- "61: movl 36(%4), %%edx\n"
30707+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30708+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30709 " movl %%eax, 32(%3)\n"
30710 " movl %%edx, 36(%3)\n"
30711- "12: movl 40(%4), %%eax\n"
30712- "71: movl 44(%4), %%edx\n"
30713+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30714+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30715 " movl %%eax, 40(%3)\n"
30716 " movl %%edx, 44(%3)\n"
30717- "13: movl 48(%4), %%eax\n"
30718- "81: movl 52(%4), %%edx\n"
30719+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30720+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30721 " movl %%eax, 48(%3)\n"
30722 " movl %%edx, 52(%3)\n"
30723- "14: movl 56(%4), %%eax\n"
30724- "91: movl 60(%4), %%edx\n"
30725+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30726+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30727 " movl %%eax, 56(%3)\n"
30728 " movl %%edx, 60(%3)\n"
30729 " addl $-64, %0\n"
30730@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
30731 " shrl $2, %0\n"
30732 " andl $3, %%eax\n"
30733 " cld\n"
30734- "6: rep; movsl\n"
30735+ "6: rep; "__copyuser_seg" movsl\n"
30736 " movl %%eax,%0\n"
30737- "7: rep; movsb\n"
30738+ "7: rep; "__copyuser_seg" movsb\n"
30739 "8:\n"
30740 ".section .fixup,\"ax\"\n"
30741 "9: lea 0(%%eax,%0,4),%0\n"
30742@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
30743
30744 __asm__ __volatile__(
30745 " .align 2,0x90\n"
30746- "0: movl 32(%4), %%eax\n"
30747+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30748 " cmpl $67, %0\n"
30749 " jbe 2f\n"
30750- "1: movl 64(%4), %%eax\n"
30751+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30752 " .align 2,0x90\n"
30753- "2: movl 0(%4), %%eax\n"
30754- "21: movl 4(%4), %%edx\n"
30755+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30756+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30757 " movnti %%eax, 0(%3)\n"
30758 " movnti %%edx, 4(%3)\n"
30759- "3: movl 8(%4), %%eax\n"
30760- "31: movl 12(%4),%%edx\n"
30761+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30762+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30763 " movnti %%eax, 8(%3)\n"
30764 " movnti %%edx, 12(%3)\n"
30765- "4: movl 16(%4), %%eax\n"
30766- "41: movl 20(%4), %%edx\n"
30767+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30768+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30769 " movnti %%eax, 16(%3)\n"
30770 " movnti %%edx, 20(%3)\n"
30771- "10: movl 24(%4), %%eax\n"
30772- "51: movl 28(%4), %%edx\n"
30773+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30774+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30775 " movnti %%eax, 24(%3)\n"
30776 " movnti %%edx, 28(%3)\n"
30777- "11: movl 32(%4), %%eax\n"
30778- "61: movl 36(%4), %%edx\n"
30779+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30780+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30781 " movnti %%eax, 32(%3)\n"
30782 " movnti %%edx, 36(%3)\n"
30783- "12: movl 40(%4), %%eax\n"
30784- "71: movl 44(%4), %%edx\n"
30785+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30786+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30787 " movnti %%eax, 40(%3)\n"
30788 " movnti %%edx, 44(%3)\n"
30789- "13: movl 48(%4), %%eax\n"
30790- "81: movl 52(%4), %%edx\n"
30791+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30792+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30793 " movnti %%eax, 48(%3)\n"
30794 " movnti %%edx, 52(%3)\n"
30795- "14: movl 56(%4), %%eax\n"
30796- "91: movl 60(%4), %%edx\n"
30797+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30798+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30799 " movnti %%eax, 56(%3)\n"
30800 " movnti %%edx, 60(%3)\n"
30801 " addl $-64, %0\n"
30802@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
30803 " shrl $2, %0\n"
30804 " andl $3, %%eax\n"
30805 " cld\n"
30806- "6: rep; movsl\n"
30807+ "6: rep; "__copyuser_seg" movsl\n"
30808 " movl %%eax,%0\n"
30809- "7: rep; movsb\n"
30810+ "7: rep; "__copyuser_seg" movsb\n"
30811 "8:\n"
30812 ".section .fixup,\"ax\"\n"
30813 "9: lea 0(%%eax,%0,4),%0\n"
30814@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
30815
30816 __asm__ __volatile__(
30817 " .align 2,0x90\n"
30818- "0: movl 32(%4), %%eax\n"
30819+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30820 " cmpl $67, %0\n"
30821 " jbe 2f\n"
30822- "1: movl 64(%4), %%eax\n"
30823+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30824 " .align 2,0x90\n"
30825- "2: movl 0(%4), %%eax\n"
30826- "21: movl 4(%4), %%edx\n"
30827+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30828+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30829 " movnti %%eax, 0(%3)\n"
30830 " movnti %%edx, 4(%3)\n"
30831- "3: movl 8(%4), %%eax\n"
30832- "31: movl 12(%4),%%edx\n"
30833+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30834+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30835 " movnti %%eax, 8(%3)\n"
30836 " movnti %%edx, 12(%3)\n"
30837- "4: movl 16(%4), %%eax\n"
30838- "41: movl 20(%4), %%edx\n"
30839+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30840+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30841 " movnti %%eax, 16(%3)\n"
30842 " movnti %%edx, 20(%3)\n"
30843- "10: movl 24(%4), %%eax\n"
30844- "51: movl 28(%4), %%edx\n"
30845+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30846+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30847 " movnti %%eax, 24(%3)\n"
30848 " movnti %%edx, 28(%3)\n"
30849- "11: movl 32(%4), %%eax\n"
30850- "61: movl 36(%4), %%edx\n"
30851+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30852+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30853 " movnti %%eax, 32(%3)\n"
30854 " movnti %%edx, 36(%3)\n"
30855- "12: movl 40(%4), %%eax\n"
30856- "71: movl 44(%4), %%edx\n"
30857+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30858+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30859 " movnti %%eax, 40(%3)\n"
30860 " movnti %%edx, 44(%3)\n"
30861- "13: movl 48(%4), %%eax\n"
30862- "81: movl 52(%4), %%edx\n"
30863+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30864+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30865 " movnti %%eax, 48(%3)\n"
30866 " movnti %%edx, 52(%3)\n"
30867- "14: movl 56(%4), %%eax\n"
30868- "91: movl 60(%4), %%edx\n"
30869+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30870+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30871 " movnti %%eax, 56(%3)\n"
30872 " movnti %%edx, 60(%3)\n"
30873 " addl $-64, %0\n"
30874@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
30875 " shrl $2, %0\n"
30876 " andl $3, %%eax\n"
30877 " cld\n"
30878- "6: rep; movsl\n"
30879+ "6: rep; "__copyuser_seg" movsl\n"
30880 " movl %%eax,%0\n"
30881- "7: rep; movsb\n"
30882+ "7: rep; "__copyuser_seg" movsb\n"
30883 "8:\n"
30884 ".section .fixup,\"ax\"\n"
30885 "9: lea 0(%%eax,%0,4),%0\n"
30886@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
30887 */
30888 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
30889 unsigned long size);
30890-unsigned long __copy_user_intel(void __user *to, const void *from,
30891+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
30892+ unsigned long size);
30893+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
30894 unsigned long size);
30895 unsigned long __copy_user_zeroing_intel_nocache(void *to,
30896 const void __user *from, unsigned long size);
30897 #endif /* CONFIG_X86_INTEL_USERCOPY */
30898
30899 /* Generic arbitrary sized copy. */
30900-#define __copy_user(to, from, size) \
30901+#define __copy_user(to, from, size, prefix, set, restore) \
30902 do { \
30903 int __d0, __d1, __d2; \
30904 __asm__ __volatile__( \
30905+ set \
30906 " cmp $7,%0\n" \
30907 " jbe 1f\n" \
30908 " movl %1,%0\n" \
30909 " negl %0\n" \
30910 " andl $7,%0\n" \
30911 " subl %0,%3\n" \
30912- "4: rep; movsb\n" \
30913+ "4: rep; "prefix"movsb\n" \
30914 " movl %3,%0\n" \
30915 " shrl $2,%0\n" \
30916 " andl $3,%3\n" \
30917 " .align 2,0x90\n" \
30918- "0: rep; movsl\n" \
30919+ "0: rep; "prefix"movsl\n" \
30920 " movl %3,%0\n" \
30921- "1: rep; movsb\n" \
30922+ "1: rep; "prefix"movsb\n" \
30923 "2:\n" \
30924+ restore \
30925 ".section .fixup,\"ax\"\n" \
30926 "5: addl %3,%0\n" \
30927 " jmp 2b\n" \
30928@@ -538,14 +650,14 @@ do { \
30929 " negl %0\n" \
30930 " andl $7,%0\n" \
30931 " subl %0,%3\n" \
30932- "4: rep; movsb\n" \
30933+ "4: rep; "__copyuser_seg"movsb\n" \
30934 " movl %3,%0\n" \
30935 " shrl $2,%0\n" \
30936 " andl $3,%3\n" \
30937 " .align 2,0x90\n" \
30938- "0: rep; movsl\n" \
30939+ "0: rep; "__copyuser_seg"movsl\n" \
30940 " movl %3,%0\n" \
30941- "1: rep; movsb\n" \
30942+ "1: rep; "__copyuser_seg"movsb\n" \
30943 "2:\n" \
30944 ".section .fixup,\"ax\"\n" \
30945 "5: addl %3,%0\n" \
30946@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
30947 {
30948 stac();
30949 if (movsl_is_ok(to, from, n))
30950- __copy_user(to, from, n);
30951+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
30952 else
30953- n = __copy_user_intel(to, from, n);
30954+ n = __generic_copy_to_user_intel(to, from, n);
30955 clac();
30956 return n;
30957 }
30958@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
30959 {
30960 stac();
30961 if (movsl_is_ok(to, from, n))
30962- __copy_user(to, from, n);
30963+ __copy_user(to, from, n, __copyuser_seg, "", "");
30964 else
30965- n = __copy_user_intel((void __user *)to,
30966- (const void *)from, n);
30967+ n = __generic_copy_from_user_intel(to, from, n);
30968 clac();
30969 return n;
30970 }
30971@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
30972 if (n > 64 && cpu_has_xmm2)
30973 n = __copy_user_intel_nocache(to, from, n);
30974 else
30975- __copy_user(to, from, n);
30976+ __copy_user(to, from, n, __copyuser_seg, "", "");
30977 #else
30978- __copy_user(to, from, n);
30979+ __copy_user(to, from, n, __copyuser_seg, "", "");
30980 #endif
30981 clac();
30982 return n;
30983 }
30984 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
30985
30986-/**
30987- * copy_to_user: - Copy a block of data into user space.
30988- * @to: Destination address, in user space.
30989- * @from: Source address, in kernel space.
30990- * @n: Number of bytes to copy.
30991- *
30992- * Context: User context only. This function may sleep.
30993- *
30994- * Copy data from kernel space to user space.
30995- *
30996- * Returns number of bytes that could not be copied.
30997- * On success, this will be zero.
30998- */
30999-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31000+#ifdef CONFIG_PAX_MEMORY_UDEREF
31001+void __set_fs(mm_segment_t x)
31002 {
31003- if (access_ok(VERIFY_WRITE, to, n))
31004- n = __copy_to_user(to, from, n);
31005- return n;
31006+ switch (x.seg) {
31007+ case 0:
31008+ loadsegment(gs, 0);
31009+ break;
31010+ case TASK_SIZE_MAX:
31011+ loadsegment(gs, __USER_DS);
31012+ break;
31013+ case -1UL:
31014+ loadsegment(gs, __KERNEL_DS);
31015+ break;
31016+ default:
31017+ BUG();
31018+ }
31019 }
31020-EXPORT_SYMBOL(_copy_to_user);
31021+EXPORT_SYMBOL(__set_fs);
31022
31023-/**
31024- * copy_from_user: - Copy a block of data from user space.
31025- * @to: Destination address, in kernel space.
31026- * @from: Source address, in user space.
31027- * @n: Number of bytes to copy.
31028- *
31029- * Context: User context only. This function may sleep.
31030- *
31031- * Copy data from user space to kernel space.
31032- *
31033- * Returns number of bytes that could not be copied.
31034- * On success, this will be zero.
31035- *
31036- * If some data could not be copied, this function will pad the copied
31037- * data to the requested size using zero bytes.
31038- */
31039-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31040+void set_fs(mm_segment_t x)
31041 {
31042- if (access_ok(VERIFY_READ, from, n))
31043- n = __copy_from_user(to, from, n);
31044- else
31045- memset(to, 0, n);
31046- return n;
31047+ current_thread_info()->addr_limit = x;
31048+ __set_fs(x);
31049 }
31050-EXPORT_SYMBOL(_copy_from_user);
31051+EXPORT_SYMBOL(set_fs);
31052+#endif
31053diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31054index c905e89..01ab928 100644
31055--- a/arch/x86/lib/usercopy_64.c
31056+++ b/arch/x86/lib/usercopy_64.c
31057@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31058 might_fault();
31059 /* no memory constraint because it doesn't change any memory gcc knows
31060 about */
31061+ pax_open_userland();
31062 stac();
31063 asm volatile(
31064 " testq %[size8],%[size8]\n"
31065@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31066 _ASM_EXTABLE(0b,3b)
31067 _ASM_EXTABLE(1b,2b)
31068 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31069- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31070+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31071 [zero] "r" (0UL), [eight] "r" (8UL));
31072 clac();
31073+ pax_close_userland();
31074 return size;
31075 }
31076 EXPORT_SYMBOL(__clear_user);
31077@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31078 }
31079 EXPORT_SYMBOL(clear_user);
31080
31081-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31082+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31083 {
31084- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31085- return copy_user_generic((__force void *)to, (__force void *)from, len);
31086- }
31087- return len;
31088+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31089+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31090+ return len;
31091 }
31092 EXPORT_SYMBOL(copy_in_user);
31093
31094@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31095 * it is not necessary to optimize tail handling.
31096 */
31097 __visible unsigned long
31098-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31099+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31100 {
31101 char c;
31102 unsigned zero_len;
31103
31104+ clac();
31105+ pax_close_userland();
31106 for (; len; --len, to++) {
31107 if (__get_user_nocheck(c, from++, sizeof(char)))
31108 break;
31109@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31110 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31111 if (__put_user_nocheck(c, to++, sizeof(char)))
31112 break;
31113- clac();
31114 return len;
31115 }
31116diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31117index 6a19ad9..1c48f9a 100644
31118--- a/arch/x86/mm/Makefile
31119+++ b/arch/x86/mm/Makefile
31120@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
31121 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31122
31123 obj-$(CONFIG_MEMTEST) += memtest.o
31124+
31125+quote:="
31126+obj-$(CONFIG_X86_64) += uderef_64.o
31127+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31128diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31129index 903ec1e..c4166b2 100644
31130--- a/arch/x86/mm/extable.c
31131+++ b/arch/x86/mm/extable.c
31132@@ -6,12 +6,24 @@
31133 static inline unsigned long
31134 ex_insn_addr(const struct exception_table_entry *x)
31135 {
31136- return (unsigned long)&x->insn + x->insn;
31137+ unsigned long reloc = 0;
31138+
31139+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31140+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31141+#endif
31142+
31143+ return (unsigned long)&x->insn + x->insn + reloc;
31144 }
31145 static inline unsigned long
31146 ex_fixup_addr(const struct exception_table_entry *x)
31147 {
31148- return (unsigned long)&x->fixup + x->fixup;
31149+ unsigned long reloc = 0;
31150+
31151+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31152+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31153+#endif
31154+
31155+ return (unsigned long)&x->fixup + x->fixup + reloc;
31156 }
31157
31158 int fixup_exception(struct pt_regs *regs)
31159@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31160 unsigned long new_ip;
31161
31162 #ifdef CONFIG_PNPBIOS
31163- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31164+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31165 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31166 extern u32 pnp_bios_is_utter_crap;
31167 pnp_bios_is_utter_crap = 1;
31168@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31169 i += 4;
31170 p->fixup -= i;
31171 i += 4;
31172+
31173+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31174+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31175+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31176+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31177+#endif
31178+
31179 }
31180 }
31181
31182diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31183index 6dea040..31e52ff 100644
31184--- a/arch/x86/mm/fault.c
31185+++ b/arch/x86/mm/fault.c
31186@@ -14,11 +14,18 @@
31187 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31188 #include <linux/prefetch.h> /* prefetchw */
31189 #include <linux/context_tracking.h> /* exception_enter(), ... */
31190+#include <linux/unistd.h>
31191+#include <linux/compiler.h>
31192
31193 #include <asm/traps.h> /* dotraplinkage, ... */
31194 #include <asm/pgalloc.h> /* pgd_*(), ... */
31195 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31196 #include <asm/fixmap.h> /* VSYSCALL_START */
31197+#include <asm/tlbflush.h>
31198+
31199+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31200+#include <asm/stacktrace.h>
31201+#endif
31202
31203 #define CREATE_TRACE_POINTS
31204 #include <asm/trace/exceptions.h>
31205@@ -59,7 +66,7 @@ static inline int __kprobes kprobes_fault(struct pt_regs *regs)
31206 int ret = 0;
31207
31208 /* kprobe_running() needs smp_processor_id() */
31209- if (kprobes_built_in() && !user_mode_vm(regs)) {
31210+ if (kprobes_built_in() && !user_mode(regs)) {
31211 preempt_disable();
31212 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31213 ret = 1;
31214@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31215 return !instr_lo || (instr_lo>>1) == 1;
31216 case 0x00:
31217 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31218- if (probe_kernel_address(instr, opcode))
31219+ if (user_mode(regs)) {
31220+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31221+ return 0;
31222+ } else if (probe_kernel_address(instr, opcode))
31223 return 0;
31224
31225 *prefetch = (instr_lo == 0xF) &&
31226@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31227 while (instr < max_instr) {
31228 unsigned char opcode;
31229
31230- if (probe_kernel_address(instr, opcode))
31231+ if (user_mode(regs)) {
31232+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31233+ break;
31234+ } else if (probe_kernel_address(instr, opcode))
31235 break;
31236
31237 instr++;
31238@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31239 force_sig_info(si_signo, &info, tsk);
31240 }
31241
31242+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31243+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31244+#endif
31245+
31246+#ifdef CONFIG_PAX_EMUTRAMP
31247+static int pax_handle_fetch_fault(struct pt_regs *regs);
31248+#endif
31249+
31250+#ifdef CONFIG_PAX_PAGEEXEC
31251+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31252+{
31253+ pgd_t *pgd;
31254+ pud_t *pud;
31255+ pmd_t *pmd;
31256+
31257+ pgd = pgd_offset(mm, address);
31258+ if (!pgd_present(*pgd))
31259+ return NULL;
31260+ pud = pud_offset(pgd, address);
31261+ if (!pud_present(*pud))
31262+ return NULL;
31263+ pmd = pmd_offset(pud, address);
31264+ if (!pmd_present(*pmd))
31265+ return NULL;
31266+ return pmd;
31267+}
31268+#endif
31269+
31270 DEFINE_SPINLOCK(pgd_lock);
31271 LIST_HEAD(pgd_list);
31272
31273@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31274 for (address = VMALLOC_START & PMD_MASK;
31275 address >= TASK_SIZE && address < FIXADDR_TOP;
31276 address += PMD_SIZE) {
31277+
31278+#ifdef CONFIG_PAX_PER_CPU_PGD
31279+ unsigned long cpu;
31280+#else
31281 struct page *page;
31282+#endif
31283
31284 spin_lock(&pgd_lock);
31285+
31286+#ifdef CONFIG_PAX_PER_CPU_PGD
31287+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31288+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31289+ pmd_t *ret;
31290+
31291+ ret = vmalloc_sync_one(pgd, address);
31292+ if (!ret)
31293+ break;
31294+ pgd = get_cpu_pgd(cpu, kernel);
31295+#else
31296 list_for_each_entry(page, &pgd_list, lru) {
31297+ pgd_t *pgd;
31298 spinlock_t *pgt_lock;
31299 pmd_t *ret;
31300
31301@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31302 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31303
31304 spin_lock(pgt_lock);
31305- ret = vmalloc_sync_one(page_address(page), address);
31306+ pgd = page_address(page);
31307+#endif
31308+
31309+ ret = vmalloc_sync_one(pgd, address);
31310+
31311+#ifndef CONFIG_PAX_PER_CPU_PGD
31312 spin_unlock(pgt_lock);
31313+#endif
31314
31315 if (!ret)
31316 break;
31317@@ -281,6 +345,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
31318 * an interrupt in the middle of a task switch..
31319 */
31320 pgd_paddr = read_cr3();
31321+
31322+#ifdef CONFIG_PAX_PER_CPU_PGD
31323+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31324+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31325+#endif
31326+
31327 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31328 if (!pmd_k)
31329 return -1;
31330@@ -376,11 +446,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
31331 * happen within a race in page table update. In the later
31332 * case just flush:
31333 */
31334- pgd = pgd_offset(current->active_mm, address);
31335+
31336 pgd_ref = pgd_offset_k(address);
31337 if (pgd_none(*pgd_ref))
31338 return -1;
31339
31340+#ifdef CONFIG_PAX_PER_CPU_PGD
31341+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31342+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31343+ if (pgd_none(*pgd)) {
31344+ set_pgd(pgd, *pgd_ref);
31345+ arch_flush_lazy_mmu_mode();
31346+ } else {
31347+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31348+ }
31349+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31350+#else
31351+ pgd = pgd_offset(current->active_mm, address);
31352+#endif
31353+
31354 if (pgd_none(*pgd)) {
31355 set_pgd(pgd, *pgd_ref);
31356 arch_flush_lazy_mmu_mode();
31357@@ -546,7 +630,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31358 static int is_errata100(struct pt_regs *regs, unsigned long address)
31359 {
31360 #ifdef CONFIG_X86_64
31361- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31362+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31363 return 1;
31364 #endif
31365 return 0;
31366@@ -573,7 +657,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31367 }
31368
31369 static const char nx_warning[] = KERN_CRIT
31370-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31371+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31372
31373 static void
31374 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31375@@ -582,15 +666,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31376 if (!oops_may_print())
31377 return;
31378
31379- if (error_code & PF_INSTR) {
31380+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31381 unsigned int level;
31382
31383 pte_t *pte = lookup_address(address, &level);
31384
31385 if (pte && pte_present(*pte) && !pte_exec(*pte))
31386- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31387+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31388 }
31389
31390+#ifdef CONFIG_PAX_KERNEXEC
31391+ if (init_mm.start_code <= address && address < init_mm.end_code) {
31392+ if (current->signal->curr_ip)
31393+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31394+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
31395+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31396+ else
31397+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
31398+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
31399+ }
31400+#endif
31401+
31402 printk(KERN_ALERT "BUG: unable to handle kernel ");
31403 if (address < PAGE_SIZE)
31404 printk(KERN_CONT "NULL pointer dereference");
31405@@ -771,6 +867,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
31406 return;
31407 }
31408 #endif
31409+
31410+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31411+ if (pax_is_fetch_fault(regs, error_code, address)) {
31412+
31413+#ifdef CONFIG_PAX_EMUTRAMP
31414+ switch (pax_handle_fetch_fault(regs)) {
31415+ case 2:
31416+ return;
31417+ }
31418+#endif
31419+
31420+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
31421+ do_group_exit(SIGKILL);
31422+ }
31423+#endif
31424+
31425 /* Kernel addresses are always protection faults: */
31426 if (address >= TASK_SIZE)
31427 error_code |= PF_PROT;
31428@@ -856,7 +968,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
31429 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
31430 printk(KERN_ERR
31431 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
31432- tsk->comm, tsk->pid, address);
31433+ tsk->comm, task_pid_nr(tsk), address);
31434 code = BUS_MCEERR_AR;
31435 }
31436 #endif
31437@@ -910,6 +1022,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
31438 return 1;
31439 }
31440
31441+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
31442+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
31443+{
31444+ pte_t *pte;
31445+ pmd_t *pmd;
31446+ spinlock_t *ptl;
31447+ unsigned char pte_mask;
31448+
31449+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
31450+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
31451+ return 0;
31452+
31453+ /* PaX: it's our fault, let's handle it if we can */
31454+
31455+ /* PaX: take a look at read faults before acquiring any locks */
31456+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
31457+ /* instruction fetch attempt from a protected page in user mode */
31458+ up_read(&mm->mmap_sem);
31459+
31460+#ifdef CONFIG_PAX_EMUTRAMP
31461+ switch (pax_handle_fetch_fault(regs)) {
31462+ case 2:
31463+ return 1;
31464+ }
31465+#endif
31466+
31467+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
31468+ do_group_exit(SIGKILL);
31469+ }
31470+
31471+ pmd = pax_get_pmd(mm, address);
31472+ if (unlikely(!pmd))
31473+ return 0;
31474+
31475+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
31476+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
31477+ pte_unmap_unlock(pte, ptl);
31478+ return 0;
31479+ }
31480+
31481+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
31482+ /* write attempt to a protected page in user mode */
31483+ pte_unmap_unlock(pte, ptl);
31484+ return 0;
31485+ }
31486+
31487+#ifdef CONFIG_SMP
31488+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
31489+#else
31490+ if (likely(address > get_limit(regs->cs)))
31491+#endif
31492+ {
31493+ set_pte(pte, pte_mkread(*pte));
31494+ __flush_tlb_one(address);
31495+ pte_unmap_unlock(pte, ptl);
31496+ up_read(&mm->mmap_sem);
31497+ return 1;
31498+ }
31499+
31500+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
31501+
31502+ /*
31503+ * PaX: fill DTLB with user rights and retry
31504+ */
31505+ __asm__ __volatile__ (
31506+ "orb %2,(%1)\n"
31507+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
31508+/*
31509+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
31510+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
31511+ * page fault when examined during a TLB load attempt. this is true not only
31512+ * for PTEs holding a non-present entry but also present entries that will
31513+ * raise a page fault (such as those set up by PaX, or the copy-on-write
31514+ * mechanism). in effect it means that we do *not* need to flush the TLBs
31515+ * for our target pages since their PTEs are simply not in the TLBs at all.
31516+
31517+ * the best thing in omitting it is that we gain around 15-20% speed in the
31518+ * fast path of the page fault handler and can get rid of tracing since we
31519+ * can no longer flush unintended entries.
31520+ */
31521+ "invlpg (%0)\n"
31522+#endif
31523+ __copyuser_seg"testb $0,(%0)\n"
31524+ "xorb %3,(%1)\n"
31525+ :
31526+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
31527+ : "memory", "cc");
31528+ pte_unmap_unlock(pte, ptl);
31529+ up_read(&mm->mmap_sem);
31530+ return 1;
31531+}
31532+#endif
31533+
31534 /*
31535 * Handle a spurious fault caused by a stale TLB entry.
31536 *
31537@@ -976,6 +1181,9 @@ int show_unhandled_signals = 1;
31538 static inline int
31539 access_error(unsigned long error_code, struct vm_area_struct *vma)
31540 {
31541+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
31542+ return 1;
31543+
31544 if (error_code & PF_WRITE) {
31545 /* write, present and write, not present: */
31546 if (unlikely(!(vma->vm_flags & VM_WRITE)))
31547@@ -1010,7 +1218,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
31548 if (error_code & PF_USER)
31549 return false;
31550
31551- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
31552+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
31553 return false;
31554
31555 return true;
31556@@ -1037,6 +1245,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
31557 /* Get the faulting address: */
31558 address = read_cr2();
31559
31560+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31561+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
31562+ if (!search_exception_tables(regs->ip)) {
31563+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
31564+ bad_area_nosemaphore(regs, error_code, address);
31565+ return;
31566+ }
31567+ if (address < pax_user_shadow_base) {
31568+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
31569+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
31570+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
31571+ } else
31572+ address -= pax_user_shadow_base;
31573+ }
31574+#endif
31575+
31576 /*
31577 * Detect and handle instructions that would cause a page fault for
31578 * both a tracked kernel page and a userspace page.
31579@@ -1114,7 +1338,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
31580 * User-mode registers count as a user access even for any
31581 * potential system fault or CPU buglet:
31582 */
31583- if (user_mode_vm(regs)) {
31584+ if (user_mode(regs)) {
31585 local_irq_enable();
31586 error_code |= PF_USER;
31587 flags |= FAULT_FLAG_USER;
31588@@ -1161,6 +1385,11 @@ retry:
31589 might_sleep();
31590 }
31591
31592+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
31593+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
31594+ return;
31595+#endif
31596+
31597 vma = find_vma(mm, address);
31598 if (unlikely(!vma)) {
31599 bad_area(regs, error_code, address);
31600@@ -1172,18 +1401,24 @@ retry:
31601 bad_area(regs, error_code, address);
31602 return;
31603 }
31604- if (error_code & PF_USER) {
31605- /*
31606- * Accessing the stack below %sp is always a bug.
31607- * The large cushion allows instructions like enter
31608- * and pusha to work. ("enter $65535, $31" pushes
31609- * 32 pointers and then decrements %sp by 65535.)
31610- */
31611- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
31612- bad_area(regs, error_code, address);
31613- return;
31614- }
31615+ /*
31616+ * Accessing the stack below %sp is always a bug.
31617+ * The large cushion allows instructions like enter
31618+ * and pusha to work. ("enter $65535, $31" pushes
31619+ * 32 pointers and then decrements %sp by 65535.)
31620+ */
31621+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
31622+ bad_area(regs, error_code, address);
31623+ return;
31624 }
31625+
31626+#ifdef CONFIG_PAX_SEGMEXEC
31627+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
31628+ bad_area(regs, error_code, address);
31629+ return;
31630+ }
31631+#endif
31632+
31633 if (unlikely(expand_stack(vma, address))) {
31634 bad_area(regs, error_code, address);
31635 return;
31636@@ -1277,3 +1512,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
31637 __do_page_fault(regs, error_code);
31638 exception_exit(prev_state);
31639 }
31640+
31641+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31642+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
31643+{
31644+ struct mm_struct *mm = current->mm;
31645+ unsigned long ip = regs->ip;
31646+
31647+ if (v8086_mode(regs))
31648+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
31649+
31650+#ifdef CONFIG_PAX_PAGEEXEC
31651+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
31652+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
31653+ return true;
31654+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
31655+ return true;
31656+ return false;
31657+ }
31658+#endif
31659+
31660+#ifdef CONFIG_PAX_SEGMEXEC
31661+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
31662+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
31663+ return true;
31664+ return false;
31665+ }
31666+#endif
31667+
31668+ return false;
31669+}
31670+#endif
31671+
31672+#ifdef CONFIG_PAX_EMUTRAMP
31673+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
31674+{
31675+ int err;
31676+
31677+ do { /* PaX: libffi trampoline emulation */
31678+ unsigned char mov, jmp;
31679+ unsigned int addr1, addr2;
31680+
31681+#ifdef CONFIG_X86_64
31682+ if ((regs->ip + 9) >> 32)
31683+ break;
31684+#endif
31685+
31686+ err = get_user(mov, (unsigned char __user *)regs->ip);
31687+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31688+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
31689+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31690+
31691+ if (err)
31692+ break;
31693+
31694+ if (mov == 0xB8 && jmp == 0xE9) {
31695+ regs->ax = addr1;
31696+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
31697+ return 2;
31698+ }
31699+ } while (0);
31700+
31701+ do { /* PaX: gcc trampoline emulation #1 */
31702+ unsigned char mov1, mov2;
31703+ unsigned short jmp;
31704+ unsigned int addr1, addr2;
31705+
31706+#ifdef CONFIG_X86_64
31707+ if ((regs->ip + 11) >> 32)
31708+ break;
31709+#endif
31710+
31711+ err = get_user(mov1, (unsigned char __user *)regs->ip);
31712+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31713+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
31714+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31715+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
31716+
31717+ if (err)
31718+ break;
31719+
31720+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
31721+ regs->cx = addr1;
31722+ regs->ax = addr2;
31723+ regs->ip = addr2;
31724+ return 2;
31725+ }
31726+ } while (0);
31727+
31728+ do { /* PaX: gcc trampoline emulation #2 */
31729+ unsigned char mov, jmp;
31730+ unsigned int addr1, addr2;
31731+
31732+#ifdef CONFIG_X86_64
31733+ if ((regs->ip + 9) >> 32)
31734+ break;
31735+#endif
31736+
31737+ err = get_user(mov, (unsigned char __user *)regs->ip);
31738+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31739+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
31740+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31741+
31742+ if (err)
31743+ break;
31744+
31745+ if (mov == 0xB9 && jmp == 0xE9) {
31746+ regs->cx = addr1;
31747+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
31748+ return 2;
31749+ }
31750+ } while (0);
31751+
31752+ return 1; /* PaX in action */
31753+}
31754+
31755+#ifdef CONFIG_X86_64
31756+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
31757+{
31758+ int err;
31759+
31760+ do { /* PaX: libffi trampoline emulation */
31761+ unsigned short mov1, mov2, jmp1;
31762+ unsigned char stcclc, jmp2;
31763+ unsigned long addr1, addr2;
31764+
31765+ err = get_user(mov1, (unsigned short __user *)regs->ip);
31766+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
31767+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
31768+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
31769+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
31770+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
31771+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
31772+
31773+ if (err)
31774+ break;
31775+
31776+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31777+ regs->r11 = addr1;
31778+ regs->r10 = addr2;
31779+ if (stcclc == 0xF8)
31780+ regs->flags &= ~X86_EFLAGS_CF;
31781+ else
31782+ regs->flags |= X86_EFLAGS_CF;
31783+ regs->ip = addr1;
31784+ return 2;
31785+ }
31786+ } while (0);
31787+
31788+ do { /* PaX: gcc trampoline emulation #1 */
31789+ unsigned short mov1, mov2, jmp1;
31790+ unsigned char jmp2;
31791+ unsigned int addr1;
31792+ unsigned long addr2;
31793+
31794+ err = get_user(mov1, (unsigned short __user *)regs->ip);
31795+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
31796+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
31797+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
31798+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
31799+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
31800+
31801+ if (err)
31802+ break;
31803+
31804+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31805+ regs->r11 = addr1;
31806+ regs->r10 = addr2;
31807+ regs->ip = addr1;
31808+ return 2;
31809+ }
31810+ } while (0);
31811+
31812+ do { /* PaX: gcc trampoline emulation #2 */
31813+ unsigned short mov1, mov2, jmp1;
31814+ unsigned char jmp2;
31815+ unsigned long addr1, addr2;
31816+
31817+ err = get_user(mov1, (unsigned short __user *)regs->ip);
31818+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
31819+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
31820+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
31821+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
31822+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
31823+
31824+ if (err)
31825+ break;
31826+
31827+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31828+ regs->r11 = addr1;
31829+ regs->r10 = addr2;
31830+ regs->ip = addr1;
31831+ return 2;
31832+ }
31833+ } while (0);
31834+
31835+ return 1; /* PaX in action */
31836+}
31837+#endif
31838+
31839+/*
31840+ * PaX: decide what to do with offenders (regs->ip = fault address)
31841+ *
31842+ * returns 1 when task should be killed
31843+ * 2 when gcc trampoline was detected
31844+ */
31845+static int pax_handle_fetch_fault(struct pt_regs *regs)
31846+{
31847+ if (v8086_mode(regs))
31848+ return 1;
31849+
31850+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
31851+ return 1;
31852+
31853+#ifdef CONFIG_X86_32
31854+ return pax_handle_fetch_fault_32(regs);
31855+#else
31856+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
31857+ return pax_handle_fetch_fault_32(regs);
31858+ else
31859+ return pax_handle_fetch_fault_64(regs);
31860+#endif
31861+}
31862+#endif
31863+
31864+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31865+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
31866+{
31867+ long i;
31868+
31869+ printk(KERN_ERR "PAX: bytes at PC: ");
31870+ for (i = 0; i < 20; i++) {
31871+ unsigned char c;
31872+ if (get_user(c, (unsigned char __force_user *)pc+i))
31873+ printk(KERN_CONT "?? ");
31874+ else
31875+ printk(KERN_CONT "%02x ", c);
31876+ }
31877+ printk("\n");
31878+
31879+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
31880+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
31881+ unsigned long c;
31882+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
31883+#ifdef CONFIG_X86_32
31884+ printk(KERN_CONT "???????? ");
31885+#else
31886+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
31887+ printk(KERN_CONT "???????? ???????? ");
31888+ else
31889+ printk(KERN_CONT "???????????????? ");
31890+#endif
31891+ } else {
31892+#ifdef CONFIG_X86_64
31893+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
31894+ printk(KERN_CONT "%08x ", (unsigned int)c);
31895+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
31896+ } else
31897+#endif
31898+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
31899+ }
31900+ }
31901+ printk("\n");
31902+}
31903+#endif
31904+
31905+/**
31906+ * probe_kernel_write(): safely attempt to write to a location
31907+ * @dst: address to write to
31908+ * @src: pointer to the data that shall be written
31909+ * @size: size of the data chunk
31910+ *
31911+ * Safely write to address @dst from the buffer at @src. If a kernel fault
31912+ * happens, handle that and return -EFAULT.
31913+ */
31914+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
31915+{
31916+ long ret;
31917+ mm_segment_t old_fs = get_fs();
31918+
31919+ set_fs(KERNEL_DS);
31920+ pagefault_disable();
31921+ pax_open_kernel();
31922+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
31923+ pax_close_kernel();
31924+ pagefault_enable();
31925+ set_fs(old_fs);
31926+
31927+ return ret ? -EFAULT : 0;
31928+}
31929diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
31930index 0596e8e..5626789 100644
31931--- a/arch/x86/mm/gup.c
31932+++ b/arch/x86/mm/gup.c
31933@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
31934 addr = start;
31935 len = (unsigned long) nr_pages << PAGE_SHIFT;
31936 end = start + len;
31937- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31938+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
31939 (void __user *)start, len)))
31940 return 0;
31941
31942@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
31943 goto slow_irqon;
31944 #endif
31945
31946+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
31947+ (void __user *)start, len)))
31948+ return 0;
31949+
31950 /*
31951 * XXX: batch / limit 'nr', to avoid large irq off latency
31952 * needs some instrumenting to determine the common sizes used by
31953diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
31954index 4500142..53a363c 100644
31955--- a/arch/x86/mm/highmem_32.c
31956+++ b/arch/x86/mm/highmem_32.c
31957@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
31958 idx = type + KM_TYPE_NR*smp_processor_id();
31959 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
31960 BUG_ON(!pte_none(*(kmap_pte-idx)));
31961+
31962+ pax_open_kernel();
31963 set_pte(kmap_pte-idx, mk_pte(page, prot));
31964+ pax_close_kernel();
31965+
31966 arch_flush_lazy_mmu_mode();
31967
31968 return (void *)vaddr;
31969diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
31970index 9d980d8..6bbfacb 100644
31971--- a/arch/x86/mm/hugetlbpage.c
31972+++ b/arch/x86/mm/hugetlbpage.c
31973@@ -92,23 +92,30 @@ int pmd_huge_support(void)
31974 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
31975 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
31976 unsigned long addr, unsigned long len,
31977- unsigned long pgoff, unsigned long flags)
31978+ unsigned long pgoff, unsigned long flags, unsigned long offset)
31979 {
31980 struct hstate *h = hstate_file(file);
31981 struct vm_unmapped_area_info info;
31982-
31983+
31984 info.flags = 0;
31985 info.length = len;
31986 info.low_limit = TASK_UNMAPPED_BASE;
31987+
31988+#ifdef CONFIG_PAX_RANDMMAP
31989+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
31990+ info.low_limit += current->mm->delta_mmap;
31991+#endif
31992+
31993 info.high_limit = TASK_SIZE;
31994 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
31995 info.align_offset = 0;
31996+ info.threadstack_offset = offset;
31997 return vm_unmapped_area(&info);
31998 }
31999
32000 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32001 unsigned long addr0, unsigned long len,
32002- unsigned long pgoff, unsigned long flags)
32003+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32004 {
32005 struct hstate *h = hstate_file(file);
32006 struct vm_unmapped_area_info info;
32007@@ -120,6 +127,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32008 info.high_limit = current->mm->mmap_base;
32009 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32010 info.align_offset = 0;
32011+ info.threadstack_offset = offset;
32012 addr = vm_unmapped_area(&info);
32013
32014 /*
32015@@ -132,6 +140,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32016 VM_BUG_ON(addr != -ENOMEM);
32017 info.flags = 0;
32018 info.low_limit = TASK_UNMAPPED_BASE;
32019+
32020+#ifdef CONFIG_PAX_RANDMMAP
32021+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32022+ info.low_limit += current->mm->delta_mmap;
32023+#endif
32024+
32025 info.high_limit = TASK_SIZE;
32026 addr = vm_unmapped_area(&info);
32027 }
32028@@ -146,10 +160,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32029 struct hstate *h = hstate_file(file);
32030 struct mm_struct *mm = current->mm;
32031 struct vm_area_struct *vma;
32032+ unsigned long pax_task_size = TASK_SIZE;
32033+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32034
32035 if (len & ~huge_page_mask(h))
32036 return -EINVAL;
32037- if (len > TASK_SIZE)
32038+
32039+#ifdef CONFIG_PAX_SEGMEXEC
32040+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32041+ pax_task_size = SEGMEXEC_TASK_SIZE;
32042+#endif
32043+
32044+ pax_task_size -= PAGE_SIZE;
32045+
32046+ if (len > pax_task_size)
32047 return -ENOMEM;
32048
32049 if (flags & MAP_FIXED) {
32050@@ -158,19 +182,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32051 return addr;
32052 }
32053
32054+#ifdef CONFIG_PAX_RANDMMAP
32055+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32056+#endif
32057+
32058 if (addr) {
32059 addr = ALIGN(addr, huge_page_size(h));
32060 vma = find_vma(mm, addr);
32061- if (TASK_SIZE - len >= addr &&
32062- (!vma || addr + len <= vma->vm_start))
32063+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32064 return addr;
32065 }
32066 if (mm->get_unmapped_area == arch_get_unmapped_area)
32067 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32068- pgoff, flags);
32069+ pgoff, flags, offset);
32070 else
32071 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32072- pgoff, flags);
32073+ pgoff, flags, offset);
32074 }
32075
32076 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
32077diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32078index f971306..e83e0f6 100644
32079--- a/arch/x86/mm/init.c
32080+++ b/arch/x86/mm/init.c
32081@@ -4,6 +4,7 @@
32082 #include <linux/swap.h>
32083 #include <linux/memblock.h>
32084 #include <linux/bootmem.h> /* for max_low_pfn */
32085+#include <linux/tboot.h>
32086
32087 #include <asm/cacheflush.h>
32088 #include <asm/e820.h>
32089@@ -17,6 +18,8 @@
32090 #include <asm/proto.h>
32091 #include <asm/dma.h> /* for MAX_DMA_PFN */
32092 #include <asm/microcode.h>
32093+#include <asm/desc.h>
32094+#include <asm/bios_ebda.h>
32095
32096 #include "mm_internal.h"
32097
32098@@ -563,7 +566,18 @@ void __init init_mem_mapping(void)
32099 early_ioremap_page_table_range_init();
32100 #endif
32101
32102+#ifdef CONFIG_PAX_PER_CPU_PGD
32103+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32104+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32105+ KERNEL_PGD_PTRS);
32106+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32107+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32108+ KERNEL_PGD_PTRS);
32109+ load_cr3(get_cpu_pgd(0, kernel));
32110+#else
32111 load_cr3(swapper_pg_dir);
32112+#endif
32113+
32114 __flush_tlb_all();
32115
32116 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32117@@ -579,10 +593,40 @@ void __init init_mem_mapping(void)
32118 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32119 * mmio resources as well as potential bios/acpi data regions.
32120 */
32121+
32122+#ifdef CONFIG_GRKERNSEC_KMEM
32123+static unsigned int ebda_start __read_only;
32124+static unsigned int ebda_end __read_only;
32125+#endif
32126+
32127 int devmem_is_allowed(unsigned long pagenr)
32128 {
32129- if (pagenr < 256)
32130+#ifdef CONFIG_GRKERNSEC_KMEM
32131+ /* allow BDA */
32132+ if (!pagenr)
32133 return 1;
32134+ /* allow EBDA */
32135+ if (pagenr >= ebda_start && pagenr < ebda_end)
32136+ return 1;
32137+ /* if tboot is in use, allow access to its hardcoded serial log range */
32138+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32139+ return 1;
32140+#else
32141+ if (!pagenr)
32142+ return 1;
32143+#ifdef CONFIG_VM86
32144+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32145+ return 1;
32146+#endif
32147+#endif
32148+
32149+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32150+ return 1;
32151+#ifdef CONFIG_GRKERNSEC_KMEM
32152+ /* throw out everything else below 1MB */
32153+ if (pagenr <= 256)
32154+ return 0;
32155+#endif
32156 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32157 return 0;
32158 if (!page_is_ram(pagenr))
32159@@ -628,8 +672,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32160 #endif
32161 }
32162
32163+#ifdef CONFIG_GRKERNSEC_KMEM
32164+static inline void gr_init_ebda(void)
32165+{
32166+ unsigned int ebda_addr;
32167+ unsigned int ebda_size = 0;
32168+
32169+ ebda_addr = get_bios_ebda();
32170+ if (ebda_addr) {
32171+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32172+ ebda_size <<= 10;
32173+ }
32174+ if (ebda_addr && ebda_size) {
32175+ ebda_start = ebda_addr >> PAGE_SHIFT;
32176+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32177+ } else {
32178+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32179+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32180+ }
32181+}
32182+#else
32183+static inline void gr_init_ebda(void) { }
32184+#endif
32185+
32186 void free_initmem(void)
32187 {
32188+#ifdef CONFIG_PAX_KERNEXEC
32189+#ifdef CONFIG_X86_32
32190+ /* PaX: limit KERNEL_CS to actual size */
32191+ unsigned long addr, limit;
32192+ struct desc_struct d;
32193+ int cpu;
32194+#else
32195+ pgd_t *pgd;
32196+ pud_t *pud;
32197+ pmd_t *pmd;
32198+ unsigned long addr, end;
32199+#endif
32200+#endif
32201+
32202+ gr_init_ebda();
32203+
32204+#ifdef CONFIG_PAX_KERNEXEC
32205+#ifdef CONFIG_X86_32
32206+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32207+ limit = (limit - 1UL) >> PAGE_SHIFT;
32208+
32209+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32210+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32211+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32212+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32213+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32214+ }
32215+
32216+ /* PaX: make KERNEL_CS read-only */
32217+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32218+ if (!paravirt_enabled())
32219+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32220+/*
32221+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32222+ pgd = pgd_offset_k(addr);
32223+ pud = pud_offset(pgd, addr);
32224+ pmd = pmd_offset(pud, addr);
32225+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32226+ }
32227+*/
32228+#ifdef CONFIG_X86_PAE
32229+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32230+/*
32231+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32232+ pgd = pgd_offset_k(addr);
32233+ pud = pud_offset(pgd, addr);
32234+ pmd = pmd_offset(pud, addr);
32235+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32236+ }
32237+*/
32238+#endif
32239+
32240+#ifdef CONFIG_MODULES
32241+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32242+#endif
32243+
32244+#else
32245+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32246+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32247+ pgd = pgd_offset_k(addr);
32248+ pud = pud_offset(pgd, addr);
32249+ pmd = pmd_offset(pud, addr);
32250+ if (!pmd_present(*pmd))
32251+ continue;
32252+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32253+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32254+ else
32255+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32256+ }
32257+
32258+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32259+ end = addr + KERNEL_IMAGE_SIZE;
32260+ for (; addr < end; addr += PMD_SIZE) {
32261+ pgd = pgd_offset_k(addr);
32262+ pud = pud_offset(pgd, addr);
32263+ pmd = pmd_offset(pud, addr);
32264+ if (!pmd_present(*pmd))
32265+ continue;
32266+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32267+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32268+ }
32269+#endif
32270+
32271+ flush_tlb_all();
32272+#endif
32273+
32274 free_init_pages("unused kernel",
32275 (unsigned long)(&__init_begin),
32276 (unsigned long)(&__init_end));
32277diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32278index 4287f1f..3b99c71 100644
32279--- a/arch/x86/mm/init_32.c
32280+++ b/arch/x86/mm/init_32.c
32281@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32282 bool __read_mostly __vmalloc_start_set = false;
32283
32284 /*
32285- * Creates a middle page table and puts a pointer to it in the
32286- * given global directory entry. This only returns the gd entry
32287- * in non-PAE compilation mode, since the middle layer is folded.
32288- */
32289-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32290-{
32291- pud_t *pud;
32292- pmd_t *pmd_table;
32293-
32294-#ifdef CONFIG_X86_PAE
32295- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32296- pmd_table = (pmd_t *)alloc_low_page();
32297- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32298- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32299- pud = pud_offset(pgd, 0);
32300- BUG_ON(pmd_table != pmd_offset(pud, 0));
32301-
32302- return pmd_table;
32303- }
32304-#endif
32305- pud = pud_offset(pgd, 0);
32306- pmd_table = pmd_offset(pud, 0);
32307-
32308- return pmd_table;
32309-}
32310-
32311-/*
32312 * Create a page table and place a pointer to it in a middle page
32313 * directory entry:
32314 */
32315@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32316 pte_t *page_table = (pte_t *)alloc_low_page();
32317
32318 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32319+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32320+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32321+#else
32322 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32323+#endif
32324 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32325 }
32326
32327 return pte_offset_kernel(pmd, 0);
32328 }
32329
32330+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32331+{
32332+ pud_t *pud;
32333+ pmd_t *pmd_table;
32334+
32335+ pud = pud_offset(pgd, 0);
32336+ pmd_table = pmd_offset(pud, 0);
32337+
32338+ return pmd_table;
32339+}
32340+
32341 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32342 {
32343 int pgd_idx = pgd_index(vaddr);
32344@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32345 int pgd_idx, pmd_idx;
32346 unsigned long vaddr;
32347 pgd_t *pgd;
32348+ pud_t *pud;
32349 pmd_t *pmd;
32350 pte_t *pte = NULL;
32351 unsigned long count = page_table_range_init_count(start, end);
32352@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32353 pgd = pgd_base + pgd_idx;
32354
32355 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32356- pmd = one_md_table_init(pgd);
32357- pmd = pmd + pmd_index(vaddr);
32358+ pud = pud_offset(pgd, vaddr);
32359+ pmd = pmd_offset(pud, vaddr);
32360+
32361+#ifdef CONFIG_X86_PAE
32362+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32363+#endif
32364+
32365 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32366 pmd++, pmd_idx++) {
32367 pte = page_table_kmap_check(one_page_table_init(pmd),
32368@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32369 }
32370 }
32371
32372-static inline int is_kernel_text(unsigned long addr)
32373+static inline int is_kernel_text(unsigned long start, unsigned long end)
32374 {
32375- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32376- return 1;
32377- return 0;
32378+ if ((start > ktla_ktva((unsigned long)_etext) ||
32379+ end <= ktla_ktva((unsigned long)_stext)) &&
32380+ (start > ktla_ktva((unsigned long)_einittext) ||
32381+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32382+
32383+#ifdef CONFIG_ACPI_SLEEP
32384+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32385+#endif
32386+
32387+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32388+ return 0;
32389+ return 1;
32390 }
32391
32392 /*
32393@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32394 unsigned long last_map_addr = end;
32395 unsigned long start_pfn, end_pfn;
32396 pgd_t *pgd_base = swapper_pg_dir;
32397- int pgd_idx, pmd_idx, pte_ofs;
32398+ unsigned int pgd_idx, pmd_idx, pte_ofs;
32399 unsigned long pfn;
32400 pgd_t *pgd;
32401+ pud_t *pud;
32402 pmd_t *pmd;
32403 pte_t *pte;
32404 unsigned pages_2m, pages_4k;
32405@@ -291,8 +295,13 @@ repeat:
32406 pfn = start_pfn;
32407 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32408 pgd = pgd_base + pgd_idx;
32409- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
32410- pmd = one_md_table_init(pgd);
32411+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
32412+ pud = pud_offset(pgd, 0);
32413+ pmd = pmd_offset(pud, 0);
32414+
32415+#ifdef CONFIG_X86_PAE
32416+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32417+#endif
32418
32419 if (pfn >= end_pfn)
32420 continue;
32421@@ -304,14 +313,13 @@ repeat:
32422 #endif
32423 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
32424 pmd++, pmd_idx++) {
32425- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
32426+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
32427
32428 /*
32429 * Map with big pages if possible, otherwise
32430 * create normal page tables:
32431 */
32432 if (use_pse) {
32433- unsigned int addr2;
32434 pgprot_t prot = PAGE_KERNEL_LARGE;
32435 /*
32436 * first pass will use the same initial
32437@@ -322,11 +330,7 @@ repeat:
32438 _PAGE_PSE);
32439
32440 pfn &= PMD_MASK >> PAGE_SHIFT;
32441- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
32442- PAGE_OFFSET + PAGE_SIZE-1;
32443-
32444- if (is_kernel_text(addr) ||
32445- is_kernel_text(addr2))
32446+ if (is_kernel_text(address, address + PMD_SIZE))
32447 prot = PAGE_KERNEL_LARGE_EXEC;
32448
32449 pages_2m++;
32450@@ -343,7 +347,7 @@ repeat:
32451 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
32452 pte += pte_ofs;
32453 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
32454- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
32455+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
32456 pgprot_t prot = PAGE_KERNEL;
32457 /*
32458 * first pass will use the same initial
32459@@ -351,7 +355,7 @@ repeat:
32460 */
32461 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
32462
32463- if (is_kernel_text(addr))
32464+ if (is_kernel_text(address, address + PAGE_SIZE))
32465 prot = PAGE_KERNEL_EXEC;
32466
32467 pages_4k++;
32468@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
32469
32470 pud = pud_offset(pgd, va);
32471 pmd = pmd_offset(pud, va);
32472- if (!pmd_present(*pmd))
32473+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
32474 break;
32475
32476 /* should not be large page here */
32477@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
32478
32479 static void __init pagetable_init(void)
32480 {
32481- pgd_t *pgd_base = swapper_pg_dir;
32482-
32483- permanent_kmaps_init(pgd_base);
32484+ permanent_kmaps_init(swapper_pg_dir);
32485 }
32486
32487-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
32488+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
32489 EXPORT_SYMBOL_GPL(__supported_pte_mask);
32490
32491 /* user-defined highmem size */
32492@@ -787,10 +789,10 @@ void __init mem_init(void)
32493 ((unsigned long)&__init_end -
32494 (unsigned long)&__init_begin) >> 10,
32495
32496- (unsigned long)&_etext, (unsigned long)&_edata,
32497- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
32498+ (unsigned long)&_sdata, (unsigned long)&_edata,
32499+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
32500
32501- (unsigned long)&_text, (unsigned long)&_etext,
32502+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
32503 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
32504
32505 /*
32506@@ -880,6 +882,7 @@ void set_kernel_text_rw(void)
32507 if (!kernel_set_to_readonly)
32508 return;
32509
32510+ start = ktla_ktva(start);
32511 pr_debug("Set kernel text: %lx - %lx for read write\n",
32512 start, start+size);
32513
32514@@ -894,6 +897,7 @@ void set_kernel_text_ro(void)
32515 if (!kernel_set_to_readonly)
32516 return;
32517
32518+ start = ktla_ktva(start);
32519 pr_debug("Set kernel text: %lx - %lx for read only\n",
32520 start, start+size);
32521
32522@@ -922,6 +926,7 @@ void mark_rodata_ro(void)
32523 unsigned long start = PFN_ALIGN(_text);
32524 unsigned long size = PFN_ALIGN(_etext) - start;
32525
32526+ start = ktla_ktva(start);
32527 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
32528 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
32529 size >> 10);
32530diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
32531index 104d56a..62ba13f1 100644
32532--- a/arch/x86/mm/init_64.c
32533+++ b/arch/x86/mm/init_64.c
32534@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
32535 * around without checking the pgd every time.
32536 */
32537
32538-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
32539+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
32540 EXPORT_SYMBOL_GPL(__supported_pte_mask);
32541
32542 int force_personality32;
32543@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
32544
32545 for (address = start; address <= end; address += PGDIR_SIZE) {
32546 const pgd_t *pgd_ref = pgd_offset_k(address);
32547+
32548+#ifdef CONFIG_PAX_PER_CPU_PGD
32549+ unsigned long cpu;
32550+#else
32551 struct page *page;
32552+#endif
32553
32554 if (pgd_none(*pgd_ref))
32555 continue;
32556
32557 spin_lock(&pgd_lock);
32558+
32559+#ifdef CONFIG_PAX_PER_CPU_PGD
32560+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32561+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
32562+
32563+ if (pgd_none(*pgd))
32564+ set_pgd(pgd, *pgd_ref);
32565+ else
32566+ BUG_ON(pgd_page_vaddr(*pgd)
32567+ != pgd_page_vaddr(*pgd_ref));
32568+ pgd = pgd_offset_cpu(cpu, kernel, address);
32569+#else
32570 list_for_each_entry(page, &pgd_list, lru) {
32571 pgd_t *pgd;
32572 spinlock_t *pgt_lock;
32573@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
32574 /* the pgt_lock only for Xen */
32575 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
32576 spin_lock(pgt_lock);
32577+#endif
32578
32579 if (pgd_none(*pgd))
32580 set_pgd(pgd, *pgd_ref);
32581@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
32582 BUG_ON(pgd_page_vaddr(*pgd)
32583 != pgd_page_vaddr(*pgd_ref));
32584
32585+#ifndef CONFIG_PAX_PER_CPU_PGD
32586 spin_unlock(pgt_lock);
32587+#endif
32588+
32589 }
32590 spin_unlock(&pgd_lock);
32591 }
32592@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
32593 {
32594 if (pgd_none(*pgd)) {
32595 pud_t *pud = (pud_t *)spp_getpage();
32596- pgd_populate(&init_mm, pgd, pud);
32597+ pgd_populate_kernel(&init_mm, pgd, pud);
32598 if (pud != pud_offset(pgd, 0))
32599 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
32600 pud, pud_offset(pgd, 0));
32601@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
32602 {
32603 if (pud_none(*pud)) {
32604 pmd_t *pmd = (pmd_t *) spp_getpage();
32605- pud_populate(&init_mm, pud, pmd);
32606+ pud_populate_kernel(&init_mm, pud, pmd);
32607 if (pmd != pmd_offset(pud, 0))
32608 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
32609 pmd, pmd_offset(pud, 0));
32610@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
32611 pmd = fill_pmd(pud, vaddr);
32612 pte = fill_pte(pmd, vaddr);
32613
32614+ pax_open_kernel();
32615 set_pte(pte, new_pte);
32616+ pax_close_kernel();
32617
32618 /*
32619 * It's enough to flush this one mapping.
32620@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
32621 pgd = pgd_offset_k((unsigned long)__va(phys));
32622 if (pgd_none(*pgd)) {
32623 pud = (pud_t *) spp_getpage();
32624- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
32625- _PAGE_USER));
32626+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
32627 }
32628 pud = pud_offset(pgd, (unsigned long)__va(phys));
32629 if (pud_none(*pud)) {
32630 pmd = (pmd_t *) spp_getpage();
32631- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
32632- _PAGE_USER));
32633+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
32634 }
32635 pmd = pmd_offset(pud, phys);
32636 BUG_ON(!pmd_none(*pmd));
32637@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
32638 prot);
32639
32640 spin_lock(&init_mm.page_table_lock);
32641- pud_populate(&init_mm, pud, pmd);
32642+ pud_populate_kernel(&init_mm, pud, pmd);
32643 spin_unlock(&init_mm.page_table_lock);
32644 }
32645 __flush_tlb_all();
32646@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
32647 page_size_mask);
32648
32649 spin_lock(&init_mm.page_table_lock);
32650- pgd_populate(&init_mm, pgd, pud);
32651+ pgd_populate_kernel(&init_mm, pgd, pud);
32652 spin_unlock(&init_mm.page_table_lock);
32653 pgd_changed = true;
32654 }
32655@@ -1188,8 +1209,8 @@ int kern_addr_valid(unsigned long addr)
32656 static struct vm_area_struct gate_vma = {
32657 .vm_start = VSYSCALL_START,
32658 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
32659- .vm_page_prot = PAGE_READONLY_EXEC,
32660- .vm_flags = VM_READ | VM_EXEC
32661+ .vm_page_prot = PAGE_READONLY,
32662+ .vm_flags = VM_READ
32663 };
32664
32665 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
32666@@ -1223,7 +1244,7 @@ int in_gate_area_no_mm(unsigned long addr)
32667
32668 const char *arch_vma_name(struct vm_area_struct *vma)
32669 {
32670- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
32671+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
32672 return "[vdso]";
32673 if (vma == &gate_vma)
32674 return "[vsyscall]";
32675diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
32676index 7b179b4..6bd17777 100644
32677--- a/arch/x86/mm/iomap_32.c
32678+++ b/arch/x86/mm/iomap_32.c
32679@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
32680 type = kmap_atomic_idx_push();
32681 idx = type + KM_TYPE_NR * smp_processor_id();
32682 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32683+
32684+ pax_open_kernel();
32685 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
32686+ pax_close_kernel();
32687+
32688 arch_flush_lazy_mmu_mode();
32689
32690 return (void *)vaddr;
32691diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
32692index 799580c..72f9fe0 100644
32693--- a/arch/x86/mm/ioremap.c
32694+++ b/arch/x86/mm/ioremap.c
32695@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
32696 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
32697 int is_ram = page_is_ram(pfn);
32698
32699- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
32700+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
32701 return NULL;
32702 WARN_ON_ONCE(is_ram);
32703 }
32704@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
32705 *
32706 * Caller must ensure there is only one unmapping for the same pointer.
32707 */
32708-void iounmap(volatile void __iomem *addr)
32709+void iounmap(const volatile void __iomem *addr)
32710 {
32711 struct vm_struct *p, *o;
32712
32713@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
32714
32715 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
32716 if (page_is_ram(start >> PAGE_SHIFT))
32717+#ifdef CONFIG_HIGHMEM
32718+ if ((start >> PAGE_SHIFT) < max_low_pfn)
32719+#endif
32720 return __va(phys);
32721
32722 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
32723@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
32724 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
32725 {
32726 if (page_is_ram(phys >> PAGE_SHIFT))
32727+#ifdef CONFIG_HIGHMEM
32728+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
32729+#endif
32730 return;
32731
32732 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
32733@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
32734 early_param("early_ioremap_debug", early_ioremap_debug_setup);
32735
32736 static __initdata int after_paging_init;
32737-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
32738+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
32739
32740 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
32741 {
32742@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
32743 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
32744
32745 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
32746- memset(bm_pte, 0, sizeof(bm_pte));
32747- pmd_populate_kernel(&init_mm, pmd, bm_pte);
32748+ pmd_populate_user(&init_mm, pmd, bm_pte);
32749
32750 /*
32751 * The boot-ioremap range spans multiple pmds, for which
32752diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
32753index d87dd6d..bf3fa66 100644
32754--- a/arch/x86/mm/kmemcheck/kmemcheck.c
32755+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
32756@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
32757 * memory (e.g. tracked pages)? For now, we need this to avoid
32758 * invoking kmemcheck for PnP BIOS calls.
32759 */
32760- if (regs->flags & X86_VM_MASK)
32761+ if (v8086_mode(regs))
32762 return false;
32763- if (regs->cs != __KERNEL_CS)
32764+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
32765 return false;
32766
32767 pte = kmemcheck_pte_lookup(address);
32768diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
32769index 25e7e13..1964579 100644
32770--- a/arch/x86/mm/mmap.c
32771+++ b/arch/x86/mm/mmap.c
32772@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
32773 * Leave an at least ~128 MB hole with possible stack randomization.
32774 */
32775 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
32776-#define MAX_GAP (TASK_SIZE/6*5)
32777+#define MAX_GAP (pax_task_size/6*5)
32778
32779 static int mmap_is_legacy(void)
32780 {
32781@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
32782 return rnd << PAGE_SHIFT;
32783 }
32784
32785-static unsigned long mmap_base(void)
32786+static unsigned long mmap_base(struct mm_struct *mm)
32787 {
32788 unsigned long gap = rlimit(RLIMIT_STACK);
32789+ unsigned long pax_task_size = TASK_SIZE;
32790+
32791+#ifdef CONFIG_PAX_SEGMEXEC
32792+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32793+ pax_task_size = SEGMEXEC_TASK_SIZE;
32794+#endif
32795
32796 if (gap < MIN_GAP)
32797 gap = MIN_GAP;
32798 else if (gap > MAX_GAP)
32799 gap = MAX_GAP;
32800
32801- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
32802+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
32803 }
32804
32805 /*
32806 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
32807 * does, but not when emulating X86_32
32808 */
32809-static unsigned long mmap_legacy_base(void)
32810+static unsigned long mmap_legacy_base(struct mm_struct *mm)
32811 {
32812- if (mmap_is_ia32())
32813+ if (mmap_is_ia32()) {
32814+
32815+#ifdef CONFIG_PAX_SEGMEXEC
32816+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32817+ return SEGMEXEC_TASK_UNMAPPED_BASE;
32818+ else
32819+#endif
32820+
32821 return TASK_UNMAPPED_BASE;
32822- else
32823+ } else
32824 return TASK_UNMAPPED_BASE + mmap_rnd();
32825 }
32826
32827@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
32828 */
32829 void arch_pick_mmap_layout(struct mm_struct *mm)
32830 {
32831- mm->mmap_legacy_base = mmap_legacy_base();
32832- mm->mmap_base = mmap_base();
32833+ mm->mmap_legacy_base = mmap_legacy_base(mm);
32834+ mm->mmap_base = mmap_base(mm);
32835+
32836+#ifdef CONFIG_PAX_RANDMMAP
32837+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
32838+ mm->mmap_legacy_base += mm->delta_mmap;
32839+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
32840+ }
32841+#endif
32842
32843 if (mmap_is_legacy()) {
32844 mm->mmap_base = mm->mmap_legacy_base;
32845diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
32846index 0057a7a..95c7edd 100644
32847--- a/arch/x86/mm/mmio-mod.c
32848+++ b/arch/x86/mm/mmio-mod.c
32849@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
32850 break;
32851 default:
32852 {
32853- unsigned char *ip = (unsigned char *)instptr;
32854+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
32855 my_trace->opcode = MMIO_UNKNOWN_OP;
32856 my_trace->width = 0;
32857 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
32858@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
32859 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
32860 void __iomem *addr)
32861 {
32862- static atomic_t next_id;
32863+ static atomic_unchecked_t next_id;
32864 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
32865 /* These are page-unaligned. */
32866 struct mmiotrace_map map = {
32867@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
32868 .private = trace
32869 },
32870 .phys = offset,
32871- .id = atomic_inc_return(&next_id)
32872+ .id = atomic_inc_return_unchecked(&next_id)
32873 };
32874 map.map_id = trace->id;
32875
32876@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
32877 ioremap_trace_core(offset, size, addr);
32878 }
32879
32880-static void iounmap_trace_core(volatile void __iomem *addr)
32881+static void iounmap_trace_core(const volatile void __iomem *addr)
32882 {
32883 struct mmiotrace_map map = {
32884 .phys = 0,
32885@@ -328,7 +328,7 @@ not_enabled:
32886 }
32887 }
32888
32889-void mmiotrace_iounmap(volatile void __iomem *addr)
32890+void mmiotrace_iounmap(const volatile void __iomem *addr)
32891 {
32892 might_sleep();
32893 if (is_enabled()) /* recheck and proper locking in *_core() */
32894diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
32895index 24aec58..c39fe8b 100644
32896--- a/arch/x86/mm/numa.c
32897+++ b/arch/x86/mm/numa.c
32898@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
32899 return true;
32900 }
32901
32902-static int __init numa_register_memblks(struct numa_meminfo *mi)
32903+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
32904 {
32905 unsigned long uninitialized_var(pfn_align);
32906 int i, nid;
32907diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
32908index d0b1773..4c3327c 100644
32909--- a/arch/x86/mm/pageattr-test.c
32910+++ b/arch/x86/mm/pageattr-test.c
32911@@ -36,7 +36,7 @@ enum {
32912
32913 static int pte_testbit(pte_t pte)
32914 {
32915- return pte_flags(pte) & _PAGE_UNUSED1;
32916+ return pte_flags(pte) & _PAGE_CPA_TEST;
32917 }
32918
32919 struct split_state {
32920diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
32921index bb32480..75f2f5e 100644
32922--- a/arch/x86/mm/pageattr.c
32923+++ b/arch/x86/mm/pageattr.c
32924@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32925 */
32926 #ifdef CONFIG_PCI_BIOS
32927 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
32928- pgprot_val(forbidden) |= _PAGE_NX;
32929+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32930 #endif
32931
32932 /*
32933@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32934 * Does not cover __inittext since that is gone later on. On
32935 * 64bit we do not enforce !NX on the low mapping
32936 */
32937- if (within(address, (unsigned long)_text, (unsigned long)_etext))
32938- pgprot_val(forbidden) |= _PAGE_NX;
32939+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
32940+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32941
32942+#ifdef CONFIG_DEBUG_RODATA
32943 /*
32944 * The .rodata section needs to be read-only. Using the pfn
32945 * catches all aliases.
32946@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32947 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
32948 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
32949 pgprot_val(forbidden) |= _PAGE_RW;
32950+#endif
32951
32952 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
32953 /*
32954@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32955 }
32956 #endif
32957
32958+#ifdef CONFIG_PAX_KERNEXEC
32959+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
32960+ pgprot_val(forbidden) |= _PAGE_RW;
32961+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32962+ }
32963+#endif
32964+
32965 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
32966
32967 return prot;
32968@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
32969 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
32970 {
32971 /* change init_mm */
32972+ pax_open_kernel();
32973 set_pte_atomic(kpte, pte);
32974+
32975 #ifdef CONFIG_X86_32
32976 if (!SHARED_KERNEL_PMD) {
32977+
32978+#ifdef CONFIG_PAX_PER_CPU_PGD
32979+ unsigned long cpu;
32980+#else
32981 struct page *page;
32982+#endif
32983
32984+#ifdef CONFIG_PAX_PER_CPU_PGD
32985+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32986+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
32987+#else
32988 list_for_each_entry(page, &pgd_list, lru) {
32989- pgd_t *pgd;
32990+ pgd_t *pgd = (pgd_t *)page_address(page);
32991+#endif
32992+
32993 pud_t *pud;
32994 pmd_t *pmd;
32995
32996- pgd = (pgd_t *)page_address(page) + pgd_index(address);
32997+ pgd += pgd_index(address);
32998 pud = pud_offset(pgd, address);
32999 pmd = pmd_offset(pud, address);
33000 set_pte_atomic((pte_t *)pmd, pte);
33001 }
33002 }
33003 #endif
33004+ pax_close_kernel();
33005 }
33006
33007 static int
33008diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33009index 6574388..87e9bef 100644
33010--- a/arch/x86/mm/pat.c
33011+++ b/arch/x86/mm/pat.c
33012@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
33013
33014 if (!entry) {
33015 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33016- current->comm, current->pid, start, end - 1);
33017+ current->comm, task_pid_nr(current), start, end - 1);
33018 return -EINVAL;
33019 }
33020
33021@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33022
33023 while (cursor < to) {
33024 if (!devmem_is_allowed(pfn)) {
33025- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33026- current->comm, from, to - 1);
33027+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33028+ current->comm, from, to - 1, cursor);
33029 return 0;
33030 }
33031 cursor += PAGE_SIZE;
33032@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
33033 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
33034 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33035 "for [mem %#010Lx-%#010Lx]\n",
33036- current->comm, current->pid,
33037+ current->comm, task_pid_nr(current),
33038 cattr_name(flags),
33039 base, (unsigned long long)(base + size-1));
33040 return -EINVAL;
33041@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33042 flags = lookup_memtype(paddr);
33043 if (want_flags != flags) {
33044 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33045- current->comm, current->pid,
33046+ current->comm, task_pid_nr(current),
33047 cattr_name(want_flags),
33048 (unsigned long long)paddr,
33049 (unsigned long long)(paddr + size - 1),
33050@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33051 free_memtype(paddr, paddr + size);
33052 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33053 " for [mem %#010Lx-%#010Lx], got %s\n",
33054- current->comm, current->pid,
33055+ current->comm, task_pid_nr(current),
33056 cattr_name(want_flags),
33057 (unsigned long long)paddr,
33058 (unsigned long long)(paddr + size - 1),
33059diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33060index 415f6c4..d319983 100644
33061--- a/arch/x86/mm/pat_rbtree.c
33062+++ b/arch/x86/mm/pat_rbtree.c
33063@@ -160,7 +160,7 @@ success:
33064
33065 failure:
33066 printk(KERN_INFO "%s:%d conflicting memory types "
33067- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33068+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33069 end, cattr_name(found_type), cattr_name(match->type));
33070 return -EBUSY;
33071 }
33072diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33073index 9f0614d..92ae64a 100644
33074--- a/arch/x86/mm/pf_in.c
33075+++ b/arch/x86/mm/pf_in.c
33076@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33077 int i;
33078 enum reason_type rv = OTHERS;
33079
33080- p = (unsigned char *)ins_addr;
33081+ p = (unsigned char *)ktla_ktva(ins_addr);
33082 p += skip_prefix(p, &prf);
33083 p += get_opcode(p, &opcode);
33084
33085@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33086 struct prefix_bits prf;
33087 int i;
33088
33089- p = (unsigned char *)ins_addr;
33090+ p = (unsigned char *)ktla_ktva(ins_addr);
33091 p += skip_prefix(p, &prf);
33092 p += get_opcode(p, &opcode);
33093
33094@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33095 struct prefix_bits prf;
33096 int i;
33097
33098- p = (unsigned char *)ins_addr;
33099+ p = (unsigned char *)ktla_ktva(ins_addr);
33100 p += skip_prefix(p, &prf);
33101 p += get_opcode(p, &opcode);
33102
33103@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33104 struct prefix_bits prf;
33105 int i;
33106
33107- p = (unsigned char *)ins_addr;
33108+ p = (unsigned char *)ktla_ktva(ins_addr);
33109 p += skip_prefix(p, &prf);
33110 p += get_opcode(p, &opcode);
33111 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33112@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33113 struct prefix_bits prf;
33114 int i;
33115
33116- p = (unsigned char *)ins_addr;
33117+ p = (unsigned char *)ktla_ktva(ins_addr);
33118 p += skip_prefix(p, &prf);
33119 p += get_opcode(p, &opcode);
33120 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33121diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33122index c96314a..433b127 100644
33123--- a/arch/x86/mm/pgtable.c
33124+++ b/arch/x86/mm/pgtable.c
33125@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33126 list_del(&page->lru);
33127 }
33128
33129-#define UNSHARED_PTRS_PER_PGD \
33130- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33131+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33132+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33133
33134+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33135+{
33136+ unsigned int count = USER_PGD_PTRS;
33137
33138+ if (!pax_user_shadow_base)
33139+ return;
33140+
33141+ while (count--)
33142+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33143+}
33144+#endif
33145+
33146+#ifdef CONFIG_PAX_PER_CPU_PGD
33147+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33148+{
33149+ unsigned int count = USER_PGD_PTRS;
33150+
33151+ while (count--) {
33152+ pgd_t pgd;
33153+
33154+#ifdef CONFIG_X86_64
33155+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33156+#else
33157+ pgd = *src++;
33158+#endif
33159+
33160+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33161+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33162+#endif
33163+
33164+ *dst++ = pgd;
33165+ }
33166+
33167+}
33168+#endif
33169+
33170+#ifdef CONFIG_X86_64
33171+#define pxd_t pud_t
33172+#define pyd_t pgd_t
33173+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33174+#define pgtable_pxd_page_ctor(page) true
33175+#define pgtable_pxd_page_dtor(page)
33176+#define pxd_free(mm, pud) pud_free((mm), (pud))
33177+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33178+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33179+#define PYD_SIZE PGDIR_SIZE
33180+#else
33181+#define pxd_t pmd_t
33182+#define pyd_t pud_t
33183+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33184+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33185+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33186+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33187+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33188+#define pyd_offset(mm, address) pud_offset((mm), (address))
33189+#define PYD_SIZE PUD_SIZE
33190+#endif
33191+
33192+#ifdef CONFIG_PAX_PER_CPU_PGD
33193+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33194+static inline void pgd_dtor(pgd_t *pgd) {}
33195+#else
33196 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33197 {
33198 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33199@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33200 pgd_list_del(pgd);
33201 spin_unlock(&pgd_lock);
33202 }
33203+#endif
33204
33205 /*
33206 * List of all pgd's needed for non-PAE so it can invalidate entries
33207@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33208 * -- nyc
33209 */
33210
33211-#ifdef CONFIG_X86_PAE
33212+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33213 /*
33214 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33215 * updating the top-level pagetable entries to guarantee the
33216@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33217 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33218 * and initialize the kernel pmds here.
33219 */
33220-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33221+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33222
33223 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33224 {
33225@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33226 */
33227 flush_tlb_mm(mm);
33228 }
33229+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33230+#define PREALLOCATED_PXDS USER_PGD_PTRS
33231 #else /* !CONFIG_X86_PAE */
33232
33233 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33234-#define PREALLOCATED_PMDS 0
33235+#define PREALLOCATED_PXDS 0
33236
33237 #endif /* CONFIG_X86_PAE */
33238
33239-static void free_pmds(pmd_t *pmds[])
33240+static void free_pxds(pxd_t *pxds[])
33241 {
33242 int i;
33243
33244- for(i = 0; i < PREALLOCATED_PMDS; i++)
33245- if (pmds[i]) {
33246- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33247- free_page((unsigned long)pmds[i]);
33248+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33249+ if (pxds[i]) {
33250+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33251+ free_page((unsigned long)pxds[i]);
33252 }
33253 }
33254
33255-static int preallocate_pmds(pmd_t *pmds[])
33256+static int preallocate_pxds(pxd_t *pxds[])
33257 {
33258 int i;
33259 bool failed = false;
33260
33261- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33262- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33263- if (!pmd)
33264+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33265+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33266+ if (!pxd)
33267 failed = true;
33268- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33269- free_page((unsigned long)pmd);
33270- pmd = NULL;
33271+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33272+ free_page((unsigned long)pxd);
33273+ pxd = NULL;
33274 failed = true;
33275 }
33276- pmds[i] = pmd;
33277+ pxds[i] = pxd;
33278 }
33279
33280 if (failed) {
33281- free_pmds(pmds);
33282+ free_pxds(pxds);
33283 return -ENOMEM;
33284 }
33285
33286@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33287 * preallocate which never got a corresponding vma will need to be
33288 * freed manually.
33289 */
33290-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33291+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33292 {
33293 int i;
33294
33295- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33296+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33297 pgd_t pgd = pgdp[i];
33298
33299 if (pgd_val(pgd) != 0) {
33300- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33301+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33302
33303- pgdp[i] = native_make_pgd(0);
33304+ set_pgd(pgdp + i, native_make_pgd(0));
33305
33306- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33307- pmd_free(mm, pmd);
33308+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33309+ pxd_free(mm, pxd);
33310 }
33311 }
33312 }
33313
33314-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33315+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33316 {
33317- pud_t *pud;
33318+ pyd_t *pyd;
33319 int i;
33320
33321- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33322+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33323 return;
33324
33325- pud = pud_offset(pgd, 0);
33326-
33327- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33328- pmd_t *pmd = pmds[i];
33329+#ifdef CONFIG_X86_64
33330+ pyd = pyd_offset(mm, 0L);
33331+#else
33332+ pyd = pyd_offset(pgd, 0L);
33333+#endif
33334
33335+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33336+ pxd_t *pxd = pxds[i];
33337 if (i >= KERNEL_PGD_BOUNDARY)
33338- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33339- sizeof(pmd_t) * PTRS_PER_PMD);
33340+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33341+ sizeof(pxd_t) * PTRS_PER_PMD);
33342
33343- pud_populate(mm, pud, pmd);
33344+ pyd_populate(mm, pyd, pxd);
33345 }
33346 }
33347
33348 pgd_t *pgd_alloc(struct mm_struct *mm)
33349 {
33350 pgd_t *pgd;
33351- pmd_t *pmds[PREALLOCATED_PMDS];
33352+ pxd_t *pxds[PREALLOCATED_PXDS];
33353
33354 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33355
33356@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33357
33358 mm->pgd = pgd;
33359
33360- if (preallocate_pmds(pmds) != 0)
33361+ if (preallocate_pxds(pxds) != 0)
33362 goto out_free_pgd;
33363
33364 if (paravirt_pgd_alloc(mm) != 0)
33365- goto out_free_pmds;
33366+ goto out_free_pxds;
33367
33368 /*
33369 * Make sure that pre-populating the pmds is atomic with
33370@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33371 spin_lock(&pgd_lock);
33372
33373 pgd_ctor(mm, pgd);
33374- pgd_prepopulate_pmd(mm, pgd, pmds);
33375+ pgd_prepopulate_pxd(mm, pgd, pxds);
33376
33377 spin_unlock(&pgd_lock);
33378
33379 return pgd;
33380
33381-out_free_pmds:
33382- free_pmds(pmds);
33383+out_free_pxds:
33384+ free_pxds(pxds);
33385 out_free_pgd:
33386 free_page((unsigned long)pgd);
33387 out:
33388@@ -313,7 +380,7 @@ out:
33389
33390 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
33391 {
33392- pgd_mop_up_pmds(mm, pgd);
33393+ pgd_mop_up_pxds(mm, pgd);
33394 pgd_dtor(pgd);
33395 paravirt_pgd_free(mm, pgd);
33396 free_page((unsigned long)pgd);
33397diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
33398index a69bcb8..19068ab 100644
33399--- a/arch/x86/mm/pgtable_32.c
33400+++ b/arch/x86/mm/pgtable_32.c
33401@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
33402 return;
33403 }
33404 pte = pte_offset_kernel(pmd, vaddr);
33405+
33406+ pax_open_kernel();
33407 if (pte_val(pteval))
33408 set_pte_at(&init_mm, vaddr, pte, pteval);
33409 else
33410 pte_clear(&init_mm, vaddr, pte);
33411+ pax_close_kernel();
33412
33413 /*
33414 * It's enough to flush this one mapping.
33415diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
33416index e666cbb..61788c45 100644
33417--- a/arch/x86/mm/physaddr.c
33418+++ b/arch/x86/mm/physaddr.c
33419@@ -10,7 +10,7 @@
33420 #ifdef CONFIG_X86_64
33421
33422 #ifdef CONFIG_DEBUG_VIRTUAL
33423-unsigned long __phys_addr(unsigned long x)
33424+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
33425 {
33426 unsigned long y = x - __START_KERNEL_map;
33427
33428@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
33429 #else
33430
33431 #ifdef CONFIG_DEBUG_VIRTUAL
33432-unsigned long __phys_addr(unsigned long x)
33433+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
33434 {
33435 unsigned long phys_addr = x - PAGE_OFFSET;
33436 /* VMALLOC_* aren't constants */
33437diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
33438index 90555bf..f5f1828 100644
33439--- a/arch/x86/mm/setup_nx.c
33440+++ b/arch/x86/mm/setup_nx.c
33441@@ -5,8 +5,10 @@
33442 #include <asm/pgtable.h>
33443 #include <asm/proto.h>
33444
33445+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
33446 static int disable_nx;
33447
33448+#ifndef CONFIG_PAX_PAGEEXEC
33449 /*
33450 * noexec = on|off
33451 *
33452@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
33453 return 0;
33454 }
33455 early_param("noexec", noexec_setup);
33456+#endif
33457+
33458+#endif
33459
33460 void x86_configure_nx(void)
33461 {
33462+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
33463 if (cpu_has_nx && !disable_nx)
33464 __supported_pte_mask |= _PAGE_NX;
33465 else
33466+#endif
33467 __supported_pte_mask &= ~_PAGE_NX;
33468 }
33469
33470diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
33471index ae699b3..f1b2ad2 100644
33472--- a/arch/x86/mm/tlb.c
33473+++ b/arch/x86/mm/tlb.c
33474@@ -48,7 +48,11 @@ void leave_mm(int cpu)
33475 BUG();
33476 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
33477 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
33478+
33479+#ifndef CONFIG_PAX_PER_CPU_PGD
33480 load_cr3(swapper_pg_dir);
33481+#endif
33482+
33483 }
33484 }
33485 EXPORT_SYMBOL_GPL(leave_mm);
33486diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
33487new file mode 100644
33488index 0000000..dace51c
33489--- /dev/null
33490+++ b/arch/x86/mm/uderef_64.c
33491@@ -0,0 +1,37 @@
33492+#include <linux/mm.h>
33493+#include <asm/pgtable.h>
33494+#include <asm/uaccess.h>
33495+
33496+#ifdef CONFIG_PAX_MEMORY_UDEREF
33497+/* PaX: due to the special call convention these functions must
33498+ * - remain leaf functions under all configurations,
33499+ * - never be called directly, only dereferenced from the wrappers.
33500+ */
33501+void __pax_open_userland(void)
33502+{
33503+ unsigned int cpu;
33504+
33505+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
33506+ return;
33507+
33508+ cpu = raw_get_cpu();
33509+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
33510+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
33511+ raw_put_cpu_no_resched();
33512+}
33513+EXPORT_SYMBOL(__pax_open_userland);
33514+
33515+void __pax_close_userland(void)
33516+{
33517+ unsigned int cpu;
33518+
33519+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
33520+ return;
33521+
33522+ cpu = raw_get_cpu();
33523+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
33524+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
33525+ raw_put_cpu_no_resched();
33526+}
33527+EXPORT_SYMBOL(__pax_close_userland);
33528+#endif
33529diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
33530index 877b9a1..f746de8 100644
33531--- a/arch/x86/net/bpf_jit.S
33532+++ b/arch/x86/net/bpf_jit.S
33533@@ -9,6 +9,7 @@
33534 */
33535 #include <linux/linkage.h>
33536 #include <asm/dwarf2.h>
33537+#include <asm/alternative-asm.h>
33538
33539 /*
33540 * Calling convention :
33541@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
33542 jle bpf_slow_path_word
33543 mov (SKBDATA,%rsi),%eax
33544 bswap %eax /* ntohl() */
33545+ pax_force_retaddr
33546 ret
33547
33548 sk_load_half:
33549@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
33550 jle bpf_slow_path_half
33551 movzwl (SKBDATA,%rsi),%eax
33552 rol $8,%ax # ntohs()
33553+ pax_force_retaddr
33554 ret
33555
33556 sk_load_byte:
33557@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
33558 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
33559 jle bpf_slow_path_byte
33560 movzbl (SKBDATA,%rsi),%eax
33561+ pax_force_retaddr
33562 ret
33563
33564 /**
33565@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
33566 movzbl (SKBDATA,%rsi),%ebx
33567 and $15,%bl
33568 shl $2,%bl
33569+ pax_force_retaddr
33570 ret
33571
33572 /* rsi contains offset and can be scratched */
33573@@ -109,6 +114,7 @@ bpf_slow_path_word:
33574 js bpf_error
33575 mov -12(%rbp),%eax
33576 bswap %eax
33577+ pax_force_retaddr
33578 ret
33579
33580 bpf_slow_path_half:
33581@@ -117,12 +123,14 @@ bpf_slow_path_half:
33582 mov -12(%rbp),%ax
33583 rol $8,%ax
33584 movzwl %ax,%eax
33585+ pax_force_retaddr
33586 ret
33587
33588 bpf_slow_path_byte:
33589 bpf_slow_path_common(1)
33590 js bpf_error
33591 movzbl -12(%rbp),%eax
33592+ pax_force_retaddr
33593 ret
33594
33595 bpf_slow_path_byte_msh:
33596@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
33597 and $15,%al
33598 shl $2,%al
33599 xchg %eax,%ebx
33600+ pax_force_retaddr
33601 ret
33602
33603 #define sk_negative_common(SIZE) \
33604@@ -140,7 +149,7 @@ bpf_slow_path_byte_msh:
33605 push %r9; \
33606 push SKBDATA; \
33607 /* rsi already has offset */ \
33608- mov $SIZE,%ecx; /* size */ \
33609+ mov $SIZE,%edx; /* size */ \
33610 call bpf_internal_load_pointer_neg_helper; \
33611 test %rax,%rax; \
33612 pop SKBDATA; \
33613@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
33614 sk_negative_common(4)
33615 mov (%rax), %eax
33616 bswap %eax
33617+ pax_force_retaddr
33618 ret
33619
33620 bpf_slow_path_half_neg:
33621@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
33622 mov (%rax),%ax
33623 rol $8,%ax
33624 movzwl %ax,%eax
33625+ pax_force_retaddr
33626 ret
33627
33628 bpf_slow_path_byte_neg:
33629@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
33630 .globl sk_load_byte_negative_offset
33631 sk_negative_common(1)
33632 movzbl (%rax), %eax
33633+ pax_force_retaddr
33634 ret
33635
33636 bpf_slow_path_byte_msh_neg:
33637@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
33638 and $15,%al
33639 shl $2,%al
33640 xchg %eax,%ebx
33641+ pax_force_retaddr
33642 ret
33643
33644 bpf_error:
33645@@ -197,4 +210,5 @@ bpf_error:
33646 xor %eax,%eax
33647 mov -8(%rbp),%rbx
33648 leaveq
33649+ pax_force_retaddr
33650 ret
33651diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
33652index 4ed75dd..3cf24f0b 100644
33653--- a/arch/x86/net/bpf_jit_comp.c
33654+++ b/arch/x86/net/bpf_jit_comp.c
33655@@ -50,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
33656 return ptr + len;
33657 }
33658
33659+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33660+#define MAX_INSTR_CODE_SIZE 96
33661+#else
33662+#define MAX_INSTR_CODE_SIZE 64
33663+#endif
33664+
33665 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
33666
33667 #define EMIT1(b1) EMIT(b1, 1)
33668 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
33669 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
33670 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
33671+
33672+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33673+/* original constant will appear in ecx */
33674+#define DILUTE_CONST_SEQUENCE(_off, _key) \
33675+do { \
33676+ /* mov ecx, randkey */ \
33677+ EMIT1(0xb9); \
33678+ EMIT(_key, 4); \
33679+ /* xor ecx, randkey ^ off */ \
33680+ EMIT2(0x81, 0xf1); \
33681+ EMIT((_key) ^ (_off), 4); \
33682+} while (0)
33683+
33684+#define EMIT1_off32(b1, _off) \
33685+do { \
33686+ switch (b1) { \
33687+ case 0x05: /* add eax, imm32 */ \
33688+ case 0x2d: /* sub eax, imm32 */ \
33689+ case 0x25: /* and eax, imm32 */ \
33690+ case 0x0d: /* or eax, imm32 */ \
33691+ case 0xb8: /* mov eax, imm32 */ \
33692+ case 0x35: /* xor eax, imm32 */ \
33693+ case 0x3d: /* cmp eax, imm32 */ \
33694+ case 0xa9: /* test eax, imm32 */ \
33695+ DILUTE_CONST_SEQUENCE(_off, randkey); \
33696+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
33697+ break; \
33698+ case 0xbb: /* mov ebx, imm32 */ \
33699+ DILUTE_CONST_SEQUENCE(_off, randkey); \
33700+ /* mov ebx, ecx */ \
33701+ EMIT2(0x89, 0xcb); \
33702+ break; \
33703+ case 0xbe: /* mov esi, imm32 */ \
33704+ DILUTE_CONST_SEQUENCE(_off, randkey); \
33705+ /* mov esi, ecx */ \
33706+ EMIT2(0x89, 0xce); \
33707+ break; \
33708+ case 0xe8: /* call rel imm32, always to known funcs */ \
33709+ EMIT1(b1); \
33710+ EMIT(_off, 4); \
33711+ break; \
33712+ case 0xe9: /* jmp rel imm32 */ \
33713+ EMIT1(b1); \
33714+ EMIT(_off, 4); \
33715+ /* prevent fall-through, we're not called if off = 0 */ \
33716+ EMIT(0xcccccccc, 4); \
33717+ EMIT(0xcccccccc, 4); \
33718+ break; \
33719+ default: \
33720+ BUILD_BUG(); \
33721+ } \
33722+} while (0)
33723+
33724+#define EMIT2_off32(b1, b2, _off) \
33725+do { \
33726+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
33727+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
33728+ EMIT(randkey, 4); \
33729+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
33730+ EMIT((_off) - randkey, 4); \
33731+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
33732+ DILUTE_CONST_SEQUENCE(_off, randkey); \
33733+ /* imul eax, ecx */ \
33734+ EMIT3(0x0f, 0xaf, 0xc1); \
33735+ } else { \
33736+ BUILD_BUG(); \
33737+ } \
33738+} while (0)
33739+#else
33740 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
33741+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
33742+#endif
33743
33744 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
33745 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
33746@@ -91,6 +168,24 @@ do { \
33747 #define X86_JBE 0x76
33748 #define X86_JA 0x77
33749
33750+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33751+#define APPEND_FLOW_VERIFY() \
33752+do { \
33753+ /* mov ecx, randkey */ \
33754+ EMIT1(0xb9); \
33755+ EMIT(randkey, 4); \
33756+ /* cmp ecx, randkey */ \
33757+ EMIT2(0x81, 0xf9); \
33758+ EMIT(randkey, 4); \
33759+ /* jz after 8 int 3s */ \
33760+ EMIT2(0x74, 0x08); \
33761+ EMIT(0xcccccccc, 4); \
33762+ EMIT(0xcccccccc, 4); \
33763+} while (0)
33764+#else
33765+#define APPEND_FLOW_VERIFY() do { } while (0)
33766+#endif
33767+
33768 #define EMIT_COND_JMP(op, offset) \
33769 do { \
33770 if (is_near(offset)) \
33771@@ -98,6 +193,7 @@ do { \
33772 else { \
33773 EMIT2(0x0f, op + 0x10); \
33774 EMIT(offset, 4); /* jxx .+off32 */ \
33775+ APPEND_FLOW_VERIFY(); \
33776 } \
33777 } while (0)
33778
33779@@ -145,55 +241,54 @@ static int pkt_type_offset(void)
33780 return -1;
33781 }
33782
33783-struct bpf_binary_header {
33784- unsigned int pages;
33785- /* Note : for security reasons, bpf code will follow a randomly
33786- * sized amount of int3 instructions
33787- */
33788- u8 image[];
33789-};
33790-
33791-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
33792+/* Note : for security reasons, bpf code will follow a randomly
33793+ * sized amount of int3 instructions
33794+ */
33795+static u8 *bpf_alloc_binary(unsigned int proglen,
33796 u8 **image_ptr)
33797 {
33798 unsigned int sz, hole;
33799- struct bpf_binary_header *header;
33800+ u8 *header;
33801
33802 /* Most of BPF filters are really small,
33803 * but if some of them fill a page, allow at least
33804 * 128 extra bytes to insert a random section of int3
33805 */
33806- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
33807- header = module_alloc(sz);
33808+ sz = round_up(proglen + 128, PAGE_SIZE);
33809+ header = module_alloc_exec(sz);
33810 if (!header)
33811 return NULL;
33812
33813+ pax_open_kernel();
33814 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
33815+ pax_close_kernel();
33816
33817- header->pages = sz / PAGE_SIZE;
33818- hole = sz - (proglen + sizeof(*header));
33819+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
33820
33821 /* insert a random number of int3 instructions before BPF code */
33822- *image_ptr = &header->image[prandom_u32() % hole];
33823+ *image_ptr = &header[prandom_u32() % hole];
33824 return header;
33825 }
33826
33827 void bpf_jit_compile(struct sk_filter *fp)
33828 {
33829- u8 temp[64];
33830+ u8 temp[MAX_INSTR_CODE_SIZE];
33831 u8 *prog;
33832 unsigned int proglen, oldproglen = 0;
33833 int ilen, i;
33834 int t_offset, f_offset;
33835 u8 t_op, f_op, seen = 0, pass;
33836 u8 *image = NULL;
33837- struct bpf_binary_header *header = NULL;
33838+ u8 *header = NULL;
33839 u8 *func;
33840 int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
33841 unsigned int cleanup_addr; /* epilogue code offset */
33842 unsigned int *addrs;
33843 const struct sock_filter *filter = fp->insns;
33844 int flen = fp->len;
33845+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33846+ unsigned int randkey;
33847+#endif
33848
33849 if (!bpf_jit_enable)
33850 return;
33851@@ -203,10 +298,10 @@ void bpf_jit_compile(struct sk_filter *fp)
33852 return;
33853
33854 /* Before first pass, make a rough estimation of addrs[]
33855- * each bpf instruction is translated to less than 64 bytes
33856+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
33857 */
33858 for (proglen = 0, i = 0; i < flen; i++) {
33859- proglen += 64;
33860+ proglen += MAX_INSTR_CODE_SIZE;
33861 addrs[i] = proglen;
33862 }
33863 cleanup_addr = proglen; /* epilogue address */
33864@@ -285,6 +380,10 @@ void bpf_jit_compile(struct sk_filter *fp)
33865 for (i = 0; i < flen; i++) {
33866 unsigned int K = filter[i].k;
33867
33868+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33869+ randkey = prandom_u32();
33870+#endif
33871+
33872 switch (filter[i].code) {
33873 case BPF_S_ALU_ADD_X: /* A += X; */
33874 seen |= SEEN_XREG;
33875@@ -317,10 +416,8 @@ void bpf_jit_compile(struct sk_filter *fp)
33876 case BPF_S_ALU_MUL_K: /* A *= K */
33877 if (is_imm8(K))
33878 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
33879- else {
33880- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
33881- EMIT(K, 4);
33882- }
33883+ else
33884+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
33885 break;
33886 case BPF_S_ALU_DIV_X: /* A /= X; */
33887 seen |= SEEN_XREG;
33888@@ -364,7 +461,11 @@ void bpf_jit_compile(struct sk_filter *fp)
33889 break;
33890 }
33891 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
33892+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33893+ DILUTE_CONST_SEQUENCE(K, randkey);
33894+#else
33895 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
33896+#endif
33897 EMIT2(0xf7, 0xf1); /* div %ecx */
33898 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
33899 break;
33900@@ -372,7 +473,11 @@ void bpf_jit_compile(struct sk_filter *fp)
33901 if (K == 1)
33902 break;
33903 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
33904+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33905+ DILUTE_CONST_SEQUENCE(K, randkey);
33906+#else
33907 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
33908+#endif
33909 EMIT2(0xf7, 0xf1); /* div %ecx */
33910 break;
33911 case BPF_S_ALU_AND_X:
33912@@ -643,8 +748,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
33913 if (is_imm8(K)) {
33914 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
33915 } else {
33916- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
33917- EMIT(K, 4);
33918+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
33919 }
33920 } else {
33921 EMIT2(0x89,0xde); /* mov %ebx,%esi */
33922@@ -734,10 +838,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
33923 if (unlikely(proglen + ilen > oldproglen)) {
33924 pr_err("bpb_jit_compile fatal error\n");
33925 kfree(addrs);
33926- module_free(NULL, header);
33927+ module_free_exec(NULL, image);
33928 return;
33929 }
33930+ pax_open_kernel();
33931 memcpy(image + proglen, temp, ilen);
33932+ pax_close_kernel();
33933 }
33934 proglen += ilen;
33935 addrs[i] = proglen;
33936@@ -770,7 +876,6 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
33937
33938 if (image) {
33939 bpf_flush_icache(header, image + proglen);
33940- set_memory_ro((unsigned long)header, header->pages);
33941 fp->bpf_func = (void *)image;
33942 }
33943 out:
33944@@ -782,10 +887,9 @@ static void bpf_jit_free_deferred(struct work_struct *work)
33945 {
33946 struct sk_filter *fp = container_of(work, struct sk_filter, work);
33947 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
33948- struct bpf_binary_header *header = (void *)addr;
33949
33950- set_memory_rw(addr, header->pages);
33951- module_free(NULL, header);
33952+ set_memory_rw(addr, 1);
33953+ module_free_exec(NULL, (void *)addr);
33954 kfree(fp);
33955 }
33956
33957diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
33958index 5d04be5..2beeaa2 100644
33959--- a/arch/x86/oprofile/backtrace.c
33960+++ b/arch/x86/oprofile/backtrace.c
33961@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
33962 struct stack_frame_ia32 *fp;
33963 unsigned long bytes;
33964
33965- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
33966+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
33967 if (bytes != 0)
33968 return NULL;
33969
33970- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
33971+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
33972
33973 oprofile_add_trace(bufhead[0].return_address);
33974
33975@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
33976 struct stack_frame bufhead[2];
33977 unsigned long bytes;
33978
33979- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
33980+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
33981 if (bytes != 0)
33982 return NULL;
33983
33984@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
33985 {
33986 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
33987
33988- if (!user_mode_vm(regs)) {
33989+ if (!user_mode(regs)) {
33990 unsigned long stack = kernel_stack_pointer(regs);
33991 if (depth)
33992 dump_trace(NULL, regs, (unsigned long *)stack, 0,
33993diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
33994index 6890d84..1dad1f1 100644
33995--- a/arch/x86/oprofile/nmi_int.c
33996+++ b/arch/x86/oprofile/nmi_int.c
33997@@ -23,6 +23,7 @@
33998 #include <asm/nmi.h>
33999 #include <asm/msr.h>
34000 #include <asm/apic.h>
34001+#include <asm/pgtable.h>
34002
34003 #include "op_counter.h"
34004 #include "op_x86_model.h"
34005@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34006 if (ret)
34007 return ret;
34008
34009- if (!model->num_virt_counters)
34010- model->num_virt_counters = model->num_counters;
34011+ if (!model->num_virt_counters) {
34012+ pax_open_kernel();
34013+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34014+ pax_close_kernel();
34015+ }
34016
34017 mux_init(ops);
34018
34019diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34020index 50d86c0..7985318 100644
34021--- a/arch/x86/oprofile/op_model_amd.c
34022+++ b/arch/x86/oprofile/op_model_amd.c
34023@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34024 num_counters = AMD64_NUM_COUNTERS;
34025 }
34026
34027- op_amd_spec.num_counters = num_counters;
34028- op_amd_spec.num_controls = num_counters;
34029- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34030+ pax_open_kernel();
34031+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34032+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34033+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34034+ pax_close_kernel();
34035
34036 return 0;
34037 }
34038diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34039index d90528e..0127e2b 100644
34040--- a/arch/x86/oprofile/op_model_ppro.c
34041+++ b/arch/x86/oprofile/op_model_ppro.c
34042@@ -19,6 +19,7 @@
34043 #include <asm/msr.h>
34044 #include <asm/apic.h>
34045 #include <asm/nmi.h>
34046+#include <asm/pgtable.h>
34047
34048 #include "op_x86_model.h"
34049 #include "op_counter.h"
34050@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34051
34052 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34053
34054- op_arch_perfmon_spec.num_counters = num_counters;
34055- op_arch_perfmon_spec.num_controls = num_counters;
34056+ pax_open_kernel();
34057+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34058+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34059+ pax_close_kernel();
34060 }
34061
34062 static int arch_perfmon_init(struct oprofile_operations *ignore)
34063diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34064index 71e8a67..6a313bb 100644
34065--- a/arch/x86/oprofile/op_x86_model.h
34066+++ b/arch/x86/oprofile/op_x86_model.h
34067@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34068 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34069 struct op_msrs const * const msrs);
34070 #endif
34071-};
34072+} __do_const;
34073
34074 struct op_counter_config;
34075
34076diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34077index 51384ca..a25f51e 100644
34078--- a/arch/x86/pci/intel_mid_pci.c
34079+++ b/arch/x86/pci/intel_mid_pci.c
34080@@ -241,7 +241,7 @@ int __init intel_mid_pci_init(void)
34081 pr_info("Intel MID platform detected, using MID PCI ops\n");
34082 pci_mmcfg_late_init();
34083 pcibios_enable_irq = intel_mid_pci_irq_enable;
34084- pci_root_ops = intel_mid_pci_ops;
34085+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34086 pci_soc_mode = 1;
34087 /* Continue with standard init */
34088 return 1;
34089diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34090index 372e9b8..e775a6c 100644
34091--- a/arch/x86/pci/irq.c
34092+++ b/arch/x86/pci/irq.c
34093@@ -50,7 +50,7 @@ struct irq_router {
34094 struct irq_router_handler {
34095 u16 vendor;
34096 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34097-};
34098+} __do_const;
34099
34100 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34101 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
34102@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34103 return 0;
34104 }
34105
34106-static __initdata struct irq_router_handler pirq_routers[] = {
34107+static __initconst const struct irq_router_handler pirq_routers[] = {
34108 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34109 { PCI_VENDOR_ID_AL, ali_router_probe },
34110 { PCI_VENDOR_ID_ITE, ite_router_probe },
34111@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
34112 static void __init pirq_find_router(struct irq_router *r)
34113 {
34114 struct irq_routing_table *rt = pirq_table;
34115- struct irq_router_handler *h;
34116+ const struct irq_router_handler *h;
34117
34118 #ifdef CONFIG_PCI_BIOS
34119 if (!rt->signature) {
34120@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34121 return 0;
34122 }
34123
34124-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34125+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34126 {
34127 .callback = fix_broken_hp_bios_irq9,
34128 .ident = "HP Pavilion N5400 Series Laptop",
34129diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34130index c77b24a..c979855 100644
34131--- a/arch/x86/pci/pcbios.c
34132+++ b/arch/x86/pci/pcbios.c
34133@@ -79,7 +79,7 @@ union bios32 {
34134 static struct {
34135 unsigned long address;
34136 unsigned short segment;
34137-} bios32_indirect = { 0, __KERNEL_CS };
34138+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
34139
34140 /*
34141 * Returns the entry point for the given service, NULL on error
34142@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
34143 unsigned long length; /* %ecx */
34144 unsigned long entry; /* %edx */
34145 unsigned long flags;
34146+ struct desc_struct d, *gdt;
34147
34148 local_irq_save(flags);
34149- __asm__("lcall *(%%edi); cld"
34150+
34151+ gdt = get_cpu_gdt_table(smp_processor_id());
34152+
34153+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34154+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34155+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34156+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34157+
34158+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34159 : "=a" (return_code),
34160 "=b" (address),
34161 "=c" (length),
34162 "=d" (entry)
34163 : "0" (service),
34164 "1" (0),
34165- "D" (&bios32_indirect));
34166+ "D" (&bios32_indirect),
34167+ "r"(__PCIBIOS_DS)
34168+ : "memory");
34169+
34170+ pax_open_kernel();
34171+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34172+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34173+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34174+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34175+ pax_close_kernel();
34176+
34177 local_irq_restore(flags);
34178
34179 switch (return_code) {
34180- case 0:
34181- return address + entry;
34182- case 0x80: /* Not present */
34183- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34184- return 0;
34185- default: /* Shouldn't happen */
34186- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34187- service, return_code);
34188+ case 0: {
34189+ int cpu;
34190+ unsigned char flags;
34191+
34192+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34193+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34194+ printk(KERN_WARNING "bios32_service: not valid\n");
34195 return 0;
34196+ }
34197+ address = address + PAGE_OFFSET;
34198+ length += 16UL; /* some BIOSs underreport this... */
34199+ flags = 4;
34200+ if (length >= 64*1024*1024) {
34201+ length >>= PAGE_SHIFT;
34202+ flags |= 8;
34203+ }
34204+
34205+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34206+ gdt = get_cpu_gdt_table(cpu);
34207+ pack_descriptor(&d, address, length, 0x9b, flags);
34208+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34209+ pack_descriptor(&d, address, length, 0x93, flags);
34210+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34211+ }
34212+ return entry;
34213+ }
34214+ case 0x80: /* Not present */
34215+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34216+ return 0;
34217+ default: /* Shouldn't happen */
34218+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34219+ service, return_code);
34220+ return 0;
34221 }
34222 }
34223
34224 static struct {
34225 unsigned long address;
34226 unsigned short segment;
34227-} pci_indirect = { 0, __KERNEL_CS };
34228+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34229
34230-static int pci_bios_present;
34231+static int pci_bios_present __read_only;
34232
34233 static int check_pcibios(void)
34234 {
34235@@ -131,11 +174,13 @@ static int check_pcibios(void)
34236 unsigned long flags, pcibios_entry;
34237
34238 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34239- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34240+ pci_indirect.address = pcibios_entry;
34241
34242 local_irq_save(flags);
34243- __asm__(
34244- "lcall *(%%edi); cld\n\t"
34245+ __asm__("movw %w6, %%ds\n\t"
34246+ "lcall *%%ss:(%%edi); cld\n\t"
34247+ "push %%ss\n\t"
34248+ "pop %%ds\n\t"
34249 "jc 1f\n\t"
34250 "xor %%ah, %%ah\n"
34251 "1:"
34252@@ -144,7 +189,8 @@ static int check_pcibios(void)
34253 "=b" (ebx),
34254 "=c" (ecx)
34255 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34256- "D" (&pci_indirect)
34257+ "D" (&pci_indirect),
34258+ "r" (__PCIBIOS_DS)
34259 : "memory");
34260 local_irq_restore(flags);
34261
34262@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34263
34264 switch (len) {
34265 case 1:
34266- __asm__("lcall *(%%esi); cld\n\t"
34267+ __asm__("movw %w6, %%ds\n\t"
34268+ "lcall *%%ss:(%%esi); cld\n\t"
34269+ "push %%ss\n\t"
34270+ "pop %%ds\n\t"
34271 "jc 1f\n\t"
34272 "xor %%ah, %%ah\n"
34273 "1:"
34274@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34275 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34276 "b" (bx),
34277 "D" ((long)reg),
34278- "S" (&pci_indirect));
34279+ "S" (&pci_indirect),
34280+ "r" (__PCIBIOS_DS));
34281 /*
34282 * Zero-extend the result beyond 8 bits, do not trust the
34283 * BIOS having done it:
34284@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34285 *value &= 0xff;
34286 break;
34287 case 2:
34288- __asm__("lcall *(%%esi); cld\n\t"
34289+ __asm__("movw %w6, %%ds\n\t"
34290+ "lcall *%%ss:(%%esi); cld\n\t"
34291+ "push %%ss\n\t"
34292+ "pop %%ds\n\t"
34293 "jc 1f\n\t"
34294 "xor %%ah, %%ah\n"
34295 "1:"
34296@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34297 : "1" (PCIBIOS_READ_CONFIG_WORD),
34298 "b" (bx),
34299 "D" ((long)reg),
34300- "S" (&pci_indirect));
34301+ "S" (&pci_indirect),
34302+ "r" (__PCIBIOS_DS));
34303 /*
34304 * Zero-extend the result beyond 16 bits, do not trust the
34305 * BIOS having done it:
34306@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34307 *value &= 0xffff;
34308 break;
34309 case 4:
34310- __asm__("lcall *(%%esi); cld\n\t"
34311+ __asm__("movw %w6, %%ds\n\t"
34312+ "lcall *%%ss:(%%esi); cld\n\t"
34313+ "push %%ss\n\t"
34314+ "pop %%ds\n\t"
34315 "jc 1f\n\t"
34316 "xor %%ah, %%ah\n"
34317 "1:"
34318@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34319 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34320 "b" (bx),
34321 "D" ((long)reg),
34322- "S" (&pci_indirect));
34323+ "S" (&pci_indirect),
34324+ "r" (__PCIBIOS_DS));
34325 break;
34326 }
34327
34328@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34329
34330 switch (len) {
34331 case 1:
34332- __asm__("lcall *(%%esi); cld\n\t"
34333+ __asm__("movw %w6, %%ds\n\t"
34334+ "lcall *%%ss:(%%esi); cld\n\t"
34335+ "push %%ss\n\t"
34336+ "pop %%ds\n\t"
34337 "jc 1f\n\t"
34338 "xor %%ah, %%ah\n"
34339 "1:"
34340@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34341 "c" (value),
34342 "b" (bx),
34343 "D" ((long)reg),
34344- "S" (&pci_indirect));
34345+ "S" (&pci_indirect),
34346+ "r" (__PCIBIOS_DS));
34347 break;
34348 case 2:
34349- __asm__("lcall *(%%esi); cld\n\t"
34350+ __asm__("movw %w6, %%ds\n\t"
34351+ "lcall *%%ss:(%%esi); cld\n\t"
34352+ "push %%ss\n\t"
34353+ "pop %%ds\n\t"
34354 "jc 1f\n\t"
34355 "xor %%ah, %%ah\n"
34356 "1:"
34357@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34358 "c" (value),
34359 "b" (bx),
34360 "D" ((long)reg),
34361- "S" (&pci_indirect));
34362+ "S" (&pci_indirect),
34363+ "r" (__PCIBIOS_DS));
34364 break;
34365 case 4:
34366- __asm__("lcall *(%%esi); cld\n\t"
34367+ __asm__("movw %w6, %%ds\n\t"
34368+ "lcall *%%ss:(%%esi); cld\n\t"
34369+ "push %%ss\n\t"
34370+ "pop %%ds\n\t"
34371 "jc 1f\n\t"
34372 "xor %%ah, %%ah\n"
34373 "1:"
34374@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34375 "c" (value),
34376 "b" (bx),
34377 "D" ((long)reg),
34378- "S" (&pci_indirect));
34379+ "S" (&pci_indirect),
34380+ "r" (__PCIBIOS_DS));
34381 break;
34382 }
34383
34384@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34385
34386 DBG("PCI: Fetching IRQ routing table... ");
34387 __asm__("push %%es\n\t"
34388+ "movw %w8, %%ds\n\t"
34389 "push %%ds\n\t"
34390 "pop %%es\n\t"
34391- "lcall *(%%esi); cld\n\t"
34392+ "lcall *%%ss:(%%esi); cld\n\t"
34393 "pop %%es\n\t"
34394+ "push %%ss\n\t"
34395+ "pop %%ds\n"
34396 "jc 1f\n\t"
34397 "xor %%ah, %%ah\n"
34398 "1:"
34399@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34400 "1" (0),
34401 "D" ((long) &opt),
34402 "S" (&pci_indirect),
34403- "m" (opt)
34404+ "m" (opt),
34405+ "r" (__PCIBIOS_DS)
34406 : "memory");
34407 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34408 if (ret & 0xff00)
34409@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34410 {
34411 int ret;
34412
34413- __asm__("lcall *(%%esi); cld\n\t"
34414+ __asm__("movw %w5, %%ds\n\t"
34415+ "lcall *%%ss:(%%esi); cld\n\t"
34416+ "push %%ss\n\t"
34417+ "pop %%ds\n"
34418 "jc 1f\n\t"
34419 "xor %%ah, %%ah\n"
34420 "1:"
34421@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34422 : "0" (PCIBIOS_SET_PCI_HW_INT),
34423 "b" ((dev->bus->number << 8) | dev->devfn),
34424 "c" ((irq << 8) | (pin + 10)),
34425- "S" (&pci_indirect));
34426+ "S" (&pci_indirect),
34427+ "r" (__PCIBIOS_DS));
34428 return !(ret & 0xff00);
34429 }
34430 EXPORT_SYMBOL(pcibios_set_irq_routing);
34431diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34432index 40e4469..d915bf9 100644
34433--- a/arch/x86/platform/efi/efi_32.c
34434+++ b/arch/x86/platform/efi/efi_32.c
34435@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
34436 {
34437 struct desc_ptr gdt_descr;
34438
34439+#ifdef CONFIG_PAX_KERNEXEC
34440+ struct desc_struct d;
34441+#endif
34442+
34443 local_irq_save(efi_rt_eflags);
34444
34445 load_cr3(initial_page_table);
34446 __flush_tlb_all();
34447
34448+#ifdef CONFIG_PAX_KERNEXEC
34449+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34450+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34451+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34452+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34453+#endif
34454+
34455 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34456 gdt_descr.size = GDT_SIZE - 1;
34457 load_gdt(&gdt_descr);
34458@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
34459 {
34460 struct desc_ptr gdt_descr;
34461
34462+#ifdef CONFIG_PAX_KERNEXEC
34463+ struct desc_struct d;
34464+
34465+ memset(&d, 0, sizeof d);
34466+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34467+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34468+#endif
34469+
34470 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34471 gdt_descr.size = GDT_SIZE - 1;
34472 load_gdt(&gdt_descr);
34473
34474+#ifdef CONFIG_PAX_PER_CPU_PGD
34475+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34476+#else
34477 load_cr3(swapper_pg_dir);
34478+#endif
34479+
34480 __flush_tlb_all();
34481
34482 local_irq_restore(efi_rt_eflags);
34483diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34484index 39a0e7f1..872396e 100644
34485--- a/arch/x86/platform/efi/efi_64.c
34486+++ b/arch/x86/platform/efi/efi_64.c
34487@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
34488 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34489 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34490 }
34491+
34492+#ifdef CONFIG_PAX_PER_CPU_PGD
34493+ load_cr3(swapper_pg_dir);
34494+#endif
34495+
34496 __flush_tlb_all();
34497 }
34498
34499@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
34500 for (pgd = 0; pgd < n_pgds; pgd++)
34501 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34502 kfree(save_pgd);
34503+
34504+#ifdef CONFIG_PAX_PER_CPU_PGD
34505+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34506+#endif
34507+
34508 __flush_tlb_all();
34509 local_irq_restore(efi_flags);
34510 early_code_mapping_set_exec(0);
34511diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34512index fbe66e6..eae5e38 100644
34513--- a/arch/x86/platform/efi/efi_stub_32.S
34514+++ b/arch/x86/platform/efi/efi_stub_32.S
34515@@ -6,7 +6,9 @@
34516 */
34517
34518 #include <linux/linkage.h>
34519+#include <linux/init.h>
34520 #include <asm/page_types.h>
34521+#include <asm/segment.h>
34522
34523 /*
34524 * efi_call_phys(void *, ...) is a function with variable parameters.
34525@@ -20,7 +22,7 @@
34526 * service functions will comply with gcc calling convention, too.
34527 */
34528
34529-.text
34530+__INIT
34531 ENTRY(efi_call_phys)
34532 /*
34533 * 0. The function can only be called in Linux kernel. So CS has been
34534@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34535 * The mapping of lower virtual memory has been created in prelog and
34536 * epilog.
34537 */
34538- movl $1f, %edx
34539- subl $__PAGE_OFFSET, %edx
34540- jmp *%edx
34541+#ifdef CONFIG_PAX_KERNEXEC
34542+ movl $(__KERNEXEC_EFI_DS), %edx
34543+ mov %edx, %ds
34544+ mov %edx, %es
34545+ mov %edx, %ss
34546+ addl $2f,(1f)
34547+ ljmp *(1f)
34548+
34549+__INITDATA
34550+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34551+.previous
34552+
34553+2:
34554+ subl $2b,(1b)
34555+#else
34556+ jmp 1f-__PAGE_OFFSET
34557 1:
34558+#endif
34559
34560 /*
34561 * 2. Now on the top of stack is the return
34562@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34563 * parameter 2, ..., param n. To make things easy, we save the return
34564 * address of efi_call_phys in a global variable.
34565 */
34566- popl %edx
34567- movl %edx, saved_return_addr
34568- /* get the function pointer into ECX*/
34569- popl %ecx
34570- movl %ecx, efi_rt_function_ptr
34571- movl $2f, %edx
34572- subl $__PAGE_OFFSET, %edx
34573- pushl %edx
34574+ popl (saved_return_addr)
34575+ popl (efi_rt_function_ptr)
34576
34577 /*
34578 * 3. Clear PG bit in %CR0.
34579@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34580 /*
34581 * 5. Call the physical function.
34582 */
34583- jmp *%ecx
34584+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
34585
34586-2:
34587 /*
34588 * 6. After EFI runtime service returns, control will return to
34589 * following instruction. We'd better readjust stack pointer first.
34590@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
34591 movl %cr0, %edx
34592 orl $0x80000000, %edx
34593 movl %edx, %cr0
34594- jmp 1f
34595-1:
34596+
34597 /*
34598 * 8. Now restore the virtual mode from flat mode by
34599 * adding EIP with PAGE_OFFSET.
34600 */
34601- movl $1f, %edx
34602- jmp *%edx
34603+#ifdef CONFIG_PAX_KERNEXEC
34604+ movl $(__KERNEL_DS), %edx
34605+ mov %edx, %ds
34606+ mov %edx, %es
34607+ mov %edx, %ss
34608+ ljmp $(__KERNEL_CS),$1f
34609+#else
34610+ jmp 1f+__PAGE_OFFSET
34611+#endif
34612 1:
34613
34614 /*
34615 * 9. Balance the stack. And because EAX contain the return value,
34616 * we'd better not clobber it.
34617 */
34618- leal efi_rt_function_ptr, %edx
34619- movl (%edx), %ecx
34620- pushl %ecx
34621+ pushl (efi_rt_function_ptr)
34622
34623 /*
34624- * 10. Push the saved return address onto the stack and return.
34625+ * 10. Return to the saved return address.
34626 */
34627- leal saved_return_addr, %edx
34628- movl (%edx), %ecx
34629- pushl %ecx
34630- ret
34631+ jmpl *(saved_return_addr)
34632 ENDPROC(efi_call_phys)
34633 .previous
34634
34635-.data
34636+__INITDATA
34637 saved_return_addr:
34638 .long 0
34639 efi_rt_function_ptr:
34640diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
34641index 4c07cca..2c8427d 100644
34642--- a/arch/x86/platform/efi/efi_stub_64.S
34643+++ b/arch/x86/platform/efi/efi_stub_64.S
34644@@ -7,6 +7,7 @@
34645 */
34646
34647 #include <linux/linkage.h>
34648+#include <asm/alternative-asm.h>
34649
34650 #define SAVE_XMM \
34651 mov %rsp, %rax; \
34652@@ -40,6 +41,7 @@ ENTRY(efi_call0)
34653 call *%rdi
34654 addq $32, %rsp
34655 RESTORE_XMM
34656+ pax_force_retaddr 0, 1
34657 ret
34658 ENDPROC(efi_call0)
34659
34660@@ -50,6 +52,7 @@ ENTRY(efi_call1)
34661 call *%rdi
34662 addq $32, %rsp
34663 RESTORE_XMM
34664+ pax_force_retaddr 0, 1
34665 ret
34666 ENDPROC(efi_call1)
34667
34668@@ -60,6 +63,7 @@ ENTRY(efi_call2)
34669 call *%rdi
34670 addq $32, %rsp
34671 RESTORE_XMM
34672+ pax_force_retaddr 0, 1
34673 ret
34674 ENDPROC(efi_call2)
34675
34676@@ -71,6 +75,7 @@ ENTRY(efi_call3)
34677 call *%rdi
34678 addq $32, %rsp
34679 RESTORE_XMM
34680+ pax_force_retaddr 0, 1
34681 ret
34682 ENDPROC(efi_call3)
34683
34684@@ -83,6 +88,7 @@ ENTRY(efi_call4)
34685 call *%rdi
34686 addq $32, %rsp
34687 RESTORE_XMM
34688+ pax_force_retaddr 0, 1
34689 ret
34690 ENDPROC(efi_call4)
34691
34692@@ -96,6 +102,7 @@ ENTRY(efi_call5)
34693 call *%rdi
34694 addq $48, %rsp
34695 RESTORE_XMM
34696+ pax_force_retaddr 0, 1
34697 ret
34698 ENDPROC(efi_call5)
34699
34700@@ -112,5 +119,6 @@ ENTRY(efi_call6)
34701 call *%rdi
34702 addq $48, %rsp
34703 RESTORE_XMM
34704+ pax_force_retaddr 0, 1
34705 ret
34706 ENDPROC(efi_call6)
34707diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
34708index f90e290..435f0dd 100644
34709--- a/arch/x86/platform/intel-mid/intel-mid.c
34710+++ b/arch/x86/platform/intel-mid/intel-mid.c
34711@@ -65,9 +65,10 @@ static void intel_mid_power_off(void)
34712 {
34713 }
34714
34715-static void intel_mid_reboot(void)
34716+static void __noreturn intel_mid_reboot(void)
34717 {
34718 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
34719+ BUG();
34720 }
34721
34722 static unsigned long __init intel_mid_calibrate_tsc(void)
34723diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
34724index d6ee929..3637cb5 100644
34725--- a/arch/x86/platform/olpc/olpc_dt.c
34726+++ b/arch/x86/platform/olpc/olpc_dt.c
34727@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
34728 return res;
34729 }
34730
34731-static struct of_pdt_ops prom_olpc_ops __initdata = {
34732+static struct of_pdt_ops prom_olpc_ops __initconst = {
34733 .nextprop = olpc_dt_nextprop,
34734 .getproplen = olpc_dt_getproplen,
34735 .getproperty = olpc_dt_getproperty,
34736diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
34737index 424f4c9..f2a2988 100644
34738--- a/arch/x86/power/cpu.c
34739+++ b/arch/x86/power/cpu.c
34740@@ -137,11 +137,8 @@ static void do_fpu_end(void)
34741 static void fix_processor_context(void)
34742 {
34743 int cpu = smp_processor_id();
34744- struct tss_struct *t = &per_cpu(init_tss, cpu);
34745-#ifdef CONFIG_X86_64
34746- struct desc_struct *desc = get_cpu_gdt_table(cpu);
34747- tss_desc tss;
34748-#endif
34749+ struct tss_struct *t = init_tss + cpu;
34750+
34751 set_tss_desc(cpu, t); /*
34752 * This just modifies memory; should not be
34753 * necessary. But... This is necessary, because
34754@@ -150,10 +147,6 @@ static void fix_processor_context(void)
34755 */
34756
34757 #ifdef CONFIG_X86_64
34758- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
34759- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
34760- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
34761-
34762 syscall_init(); /* This sets MSR_*STAR and related */
34763 #endif
34764 load_TR_desc(); /* This does ltr */
34765diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
34766index a44f457..9140171 100644
34767--- a/arch/x86/realmode/init.c
34768+++ b/arch/x86/realmode/init.c
34769@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
34770 __va(real_mode_header->trampoline_header);
34771
34772 #ifdef CONFIG_X86_32
34773- trampoline_header->start = __pa_symbol(startup_32_smp);
34774+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
34775+
34776+#ifdef CONFIG_PAX_KERNEXEC
34777+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
34778+#endif
34779+
34780+ trampoline_header->boot_cs = __BOOT_CS;
34781 trampoline_header->gdt_limit = __BOOT_DS + 7;
34782 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
34783 #else
34784@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
34785 *trampoline_cr4_features = read_cr4();
34786
34787 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
34788- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
34789+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
34790 trampoline_pgd[511] = init_level4_pgt[511].pgd;
34791 #endif
34792 }
34793diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
34794index 9cac825..4890b25 100644
34795--- a/arch/x86/realmode/rm/Makefile
34796+++ b/arch/x86/realmode/rm/Makefile
34797@@ -79,5 +79,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
34798 $(call cc-option, -fno-unit-at-a-time)) \
34799 $(call cc-option, -fno-stack-protector) \
34800 $(call cc-option, -mpreferred-stack-boundary=2)
34801+ifdef CONSTIFY_PLUGIN
34802+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
34803+endif
34804 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
34805 GCOV_PROFILE := n
34806diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
34807index a28221d..93c40f1 100644
34808--- a/arch/x86/realmode/rm/header.S
34809+++ b/arch/x86/realmode/rm/header.S
34810@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
34811 #endif
34812 /* APM/BIOS reboot */
34813 .long pa_machine_real_restart_asm
34814-#ifdef CONFIG_X86_64
34815+#ifdef CONFIG_X86_32
34816+ .long __KERNEL_CS
34817+#else
34818 .long __KERNEL32_CS
34819 #endif
34820 END(real_mode_header)
34821diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
34822index c1b2791..f9e31c7 100644
34823--- a/arch/x86/realmode/rm/trampoline_32.S
34824+++ b/arch/x86/realmode/rm/trampoline_32.S
34825@@ -25,6 +25,12 @@
34826 #include <asm/page_types.h>
34827 #include "realmode.h"
34828
34829+#ifdef CONFIG_PAX_KERNEXEC
34830+#define ta(X) (X)
34831+#else
34832+#define ta(X) (pa_ ## X)
34833+#endif
34834+
34835 .text
34836 .code16
34837
34838@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
34839
34840 cli # We should be safe anyway
34841
34842- movl tr_start, %eax # where we need to go
34843-
34844 movl $0xA5A5A5A5, trampoline_status
34845 # write marker for master knows we're running
34846
34847@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
34848 movw $1, %dx # protected mode (PE) bit
34849 lmsw %dx # into protected mode
34850
34851- ljmpl $__BOOT_CS, $pa_startup_32
34852+ ljmpl *(trampoline_header)
34853
34854 .section ".text32","ax"
34855 .code32
34856@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
34857 .balign 8
34858 GLOBAL(trampoline_header)
34859 tr_start: .space 4
34860- tr_gdt_pad: .space 2
34861+ tr_boot_cs: .space 2
34862 tr_gdt: .space 6
34863 END(trampoline_header)
34864
34865diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
34866index bb360dc..d0fd8f8 100644
34867--- a/arch/x86/realmode/rm/trampoline_64.S
34868+++ b/arch/x86/realmode/rm/trampoline_64.S
34869@@ -94,6 +94,7 @@ ENTRY(startup_32)
34870 movl %edx, %gs
34871
34872 movl pa_tr_cr4, %eax
34873+ andl $~X86_CR4_PCIDE, %eax
34874 movl %eax, %cr4 # Enable PAE mode
34875
34876 # Setup trampoline 4 level pagetables
34877@@ -107,7 +108,7 @@ ENTRY(startup_32)
34878 wrmsr
34879
34880 # Enable paging and in turn activate Long Mode
34881- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
34882+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
34883 movl %eax, %cr0
34884
34885 /*
34886diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
34887index e812034..c747134 100644
34888--- a/arch/x86/tools/Makefile
34889+++ b/arch/x86/tools/Makefile
34890@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
34891
34892 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
34893
34894-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
34895+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
34896 hostprogs-y += relocs
34897 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
34898 relocs: $(obj)/relocs
34899diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
34900index f7bab68..b6d9886 100644
34901--- a/arch/x86/tools/relocs.c
34902+++ b/arch/x86/tools/relocs.c
34903@@ -1,5 +1,7 @@
34904 /* This is included from relocs_32/64.c */
34905
34906+#include "../../../include/generated/autoconf.h"
34907+
34908 #define ElfW(type) _ElfW(ELF_BITS, type)
34909 #define _ElfW(bits, type) __ElfW(bits, type)
34910 #define __ElfW(bits, type) Elf##bits##_##type
34911@@ -11,6 +13,7 @@
34912 #define Elf_Sym ElfW(Sym)
34913
34914 static Elf_Ehdr ehdr;
34915+static Elf_Phdr *phdr;
34916
34917 struct relocs {
34918 uint32_t *offset;
34919@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
34920 }
34921 }
34922
34923+static void read_phdrs(FILE *fp)
34924+{
34925+ unsigned int i;
34926+
34927+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
34928+ if (!phdr) {
34929+ die("Unable to allocate %d program headers\n",
34930+ ehdr.e_phnum);
34931+ }
34932+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
34933+ die("Seek to %d failed: %s\n",
34934+ ehdr.e_phoff, strerror(errno));
34935+ }
34936+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
34937+ die("Cannot read ELF program headers: %s\n",
34938+ strerror(errno));
34939+ }
34940+ for(i = 0; i < ehdr.e_phnum; i++) {
34941+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
34942+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
34943+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
34944+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
34945+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
34946+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
34947+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
34948+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
34949+ }
34950+
34951+}
34952+
34953 static void read_shdrs(FILE *fp)
34954 {
34955- int i;
34956+ unsigned int i;
34957 Elf_Shdr shdr;
34958
34959 secs = calloc(ehdr.e_shnum, sizeof(struct section));
34960@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
34961
34962 static void read_strtabs(FILE *fp)
34963 {
34964- int i;
34965+ unsigned int i;
34966 for (i = 0; i < ehdr.e_shnum; i++) {
34967 struct section *sec = &secs[i];
34968 if (sec->shdr.sh_type != SHT_STRTAB) {
34969@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
34970
34971 static void read_symtabs(FILE *fp)
34972 {
34973- int i,j;
34974+ unsigned int i,j;
34975 for (i = 0; i < ehdr.e_shnum; i++) {
34976 struct section *sec = &secs[i];
34977 if (sec->shdr.sh_type != SHT_SYMTAB) {
34978@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
34979 }
34980
34981
34982-static void read_relocs(FILE *fp)
34983+static void read_relocs(FILE *fp, int use_real_mode)
34984 {
34985- int i,j;
34986+ unsigned int i,j;
34987+ uint32_t base;
34988+
34989 for (i = 0; i < ehdr.e_shnum; i++) {
34990 struct section *sec = &secs[i];
34991 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34992@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
34993 die("Cannot read symbol table: %s\n",
34994 strerror(errno));
34995 }
34996+ base = 0;
34997+
34998+#ifdef CONFIG_X86_32
34999+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35000+ if (phdr[j].p_type != PT_LOAD )
35001+ continue;
35002+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35003+ continue;
35004+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35005+ break;
35006+ }
35007+#endif
35008+
35009 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35010 Elf_Rel *rel = &sec->reltab[j];
35011- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35012+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35013 rel->r_info = elf_xword_to_cpu(rel->r_info);
35014 #if (SHT_REL_TYPE == SHT_RELA)
35015 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35016@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
35017
35018 static void print_absolute_symbols(void)
35019 {
35020- int i;
35021+ unsigned int i;
35022 const char *format;
35023
35024 if (ELF_BITS == 64)
35025@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
35026 for (i = 0; i < ehdr.e_shnum; i++) {
35027 struct section *sec = &secs[i];
35028 char *sym_strtab;
35029- int j;
35030+ unsigned int j;
35031
35032 if (sec->shdr.sh_type != SHT_SYMTAB) {
35033 continue;
35034@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
35035
35036 static void print_absolute_relocs(void)
35037 {
35038- int i, printed = 0;
35039+ unsigned int i, printed = 0;
35040 const char *format;
35041
35042 if (ELF_BITS == 64)
35043@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
35044 struct section *sec_applies, *sec_symtab;
35045 char *sym_strtab;
35046 Elf_Sym *sh_symtab;
35047- int j;
35048+ unsigned int j;
35049 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35050 continue;
35051 }
35052@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35053 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35054 Elf_Sym *sym, const char *symname))
35055 {
35056- int i;
35057+ unsigned int i;
35058 /* Walk through the relocations */
35059 for (i = 0; i < ehdr.e_shnum; i++) {
35060 char *sym_strtab;
35061 Elf_Sym *sh_symtab;
35062 struct section *sec_applies, *sec_symtab;
35063- int j;
35064+ unsigned int j;
35065 struct section *sec = &secs[i];
35066
35067 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35068@@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35069 {
35070 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35071 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35072+ char *sym_strtab = sec->link->link->strtab;
35073+
35074+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35075+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35076+ return 0;
35077+
35078+#ifdef CONFIG_PAX_KERNEXEC
35079+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35080+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35081+ return 0;
35082+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35083+ return 0;
35084+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35085+ return 0;
35086+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35087+ return 0;
35088+#endif
35089
35090 switch (r_type) {
35091 case R_386_NONE:
35092@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35093
35094 static void emit_relocs(int as_text, int use_real_mode)
35095 {
35096- int i;
35097+ unsigned int i;
35098 int (*write_reloc)(uint32_t, FILE *) = write32;
35099 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35100 const char *symname);
35101@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35102 {
35103 regex_init(use_real_mode);
35104 read_ehdr(fp);
35105+ read_phdrs(fp);
35106 read_shdrs(fp);
35107 read_strtabs(fp);
35108 read_symtabs(fp);
35109- read_relocs(fp);
35110+ read_relocs(fp, use_real_mode);
35111 if (ELF_BITS == 64)
35112 percpu_init();
35113 if (show_absolute_syms) {
35114diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35115index 80ffa5b..a33bd15 100644
35116--- a/arch/x86/um/tls_32.c
35117+++ b/arch/x86/um/tls_32.c
35118@@ -260,7 +260,7 @@ out:
35119 if (unlikely(task == current &&
35120 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35121 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35122- "without flushed TLS.", current->pid);
35123+ "without flushed TLS.", task_pid_nr(current));
35124 }
35125
35126 return 0;
35127diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35128index fd14be1..e3c79c0 100644
35129--- a/arch/x86/vdso/Makefile
35130+++ b/arch/x86/vdso/Makefile
35131@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
35132 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35133 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35134
35135-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
35136+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
35137 GCOV_PROFILE := n
35138
35139 #
35140diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35141index d6bfb87..876ee18 100644
35142--- a/arch/x86/vdso/vdso32-setup.c
35143+++ b/arch/x86/vdso/vdso32-setup.c
35144@@ -25,6 +25,7 @@
35145 #include <asm/tlbflush.h>
35146 #include <asm/vdso.h>
35147 #include <asm/proto.h>
35148+#include <asm/mman.h>
35149
35150 enum {
35151 VDSO_DISABLED = 0,
35152@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
35153 void enable_sep_cpu(void)
35154 {
35155 int cpu = get_cpu();
35156- struct tss_struct *tss = &per_cpu(init_tss, cpu);
35157+ struct tss_struct *tss = init_tss + cpu;
35158
35159 if (!boot_cpu_has(X86_FEATURE_SEP)) {
35160 put_cpu();
35161@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
35162 gate_vma.vm_start = FIXADDR_USER_START;
35163 gate_vma.vm_end = FIXADDR_USER_END;
35164 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35165- gate_vma.vm_page_prot = __P101;
35166+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35167
35168 return 0;
35169 }
35170@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35171 if (compat)
35172 addr = VDSO_HIGH_BASE;
35173 else {
35174- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
35175+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
35176 if (IS_ERR_VALUE(addr)) {
35177 ret = addr;
35178 goto up_fail;
35179 }
35180 }
35181
35182- current->mm->context.vdso = (void *)addr;
35183+ current->mm->context.vdso = addr;
35184
35185 if (compat_uses_vma || !compat) {
35186 /*
35187@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35188 }
35189
35190 current_thread_info()->sysenter_return =
35191- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
35192+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
35193
35194 up_fail:
35195 if (ret)
35196- current->mm->context.vdso = NULL;
35197+ current->mm->context.vdso = 0;
35198
35199 up_write(&mm->mmap_sem);
35200
35201@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
35202
35203 const char *arch_vma_name(struct vm_area_struct *vma)
35204 {
35205- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
35206+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
35207 return "[vdso]";
35208+
35209+#ifdef CONFIG_PAX_SEGMEXEC
35210+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
35211+ return "[vdso]";
35212+#endif
35213+
35214 return NULL;
35215 }
35216
35217@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
35218 * Check to see if the corresponding task was created in compat vdso
35219 * mode.
35220 */
35221- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
35222+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
35223 return &gate_vma;
35224 return NULL;
35225 }
35226diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35227index 431e875..cbb23f3 100644
35228--- a/arch/x86/vdso/vma.c
35229+++ b/arch/x86/vdso/vma.c
35230@@ -16,8 +16,6 @@
35231 #include <asm/vdso.h>
35232 #include <asm/page.h>
35233
35234-unsigned int __read_mostly vdso_enabled = 1;
35235-
35236 extern char vdso_start[], vdso_end[];
35237 extern unsigned short vdso_sync_cpuid;
35238
35239@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
35240 * unaligned here as a result of stack start randomization.
35241 */
35242 addr = PAGE_ALIGN(addr);
35243- addr = align_vdso_addr(addr);
35244
35245 return addr;
35246 }
35247@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
35248 unsigned size)
35249 {
35250 struct mm_struct *mm = current->mm;
35251- unsigned long addr;
35252+ unsigned long addr = 0;
35253 int ret;
35254
35255- if (!vdso_enabled)
35256- return 0;
35257-
35258 down_write(&mm->mmap_sem);
35259+
35260+#ifdef CONFIG_PAX_RANDMMAP
35261+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
35262+#endif
35263+
35264 addr = vdso_addr(mm->start_stack, size);
35265+ addr = align_vdso_addr(addr);
35266 addr = get_unmapped_area(NULL, addr, size, 0, 0);
35267 if (IS_ERR_VALUE(addr)) {
35268 ret = addr;
35269 goto up_fail;
35270 }
35271
35272- current->mm->context.vdso = (void *)addr;
35273+ mm->context.vdso = addr;
35274
35275 ret = install_special_mapping(mm, addr, size,
35276 VM_READ|VM_EXEC|
35277 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
35278 pages);
35279- if (ret) {
35280- current->mm->context.vdso = NULL;
35281- goto up_fail;
35282- }
35283+ if (ret)
35284+ mm->context.vdso = 0;
35285
35286 up_fail:
35287 up_write(&mm->mmap_sem);
35288@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35289 vdsox32_size);
35290 }
35291 #endif
35292-
35293-static __init int vdso_setup(char *s)
35294-{
35295- vdso_enabled = simple_strtoul(s, NULL, 0);
35296- return 0;
35297-}
35298-__setup("vdso=", vdso_setup);
35299diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35300index 1a3c765..3d2e8d1 100644
35301--- a/arch/x86/xen/Kconfig
35302+++ b/arch/x86/xen/Kconfig
35303@@ -9,6 +9,7 @@ config XEN
35304 select XEN_HAVE_PVMMU
35305 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
35306 depends on X86_TSC
35307+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35308 help
35309 This is the Linux Xen port. Enabling this will allow the
35310 kernel to boot in a paravirtualized environment under the
35311diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35312index fa6ade7..73da73a5 100644
35313--- a/arch/x86/xen/enlighten.c
35314+++ b/arch/x86/xen/enlighten.c
35315@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35316
35317 struct shared_info xen_dummy_shared_info;
35318
35319-void *xen_initial_gdt;
35320-
35321 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35322 __read_mostly int xen_have_vector_callback;
35323 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35324@@ -541,8 +539,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35325 {
35326 unsigned long va = dtr->address;
35327 unsigned int size = dtr->size + 1;
35328- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35329- unsigned long frames[pages];
35330+ unsigned long frames[65536 / PAGE_SIZE];
35331 int f;
35332
35333 /*
35334@@ -590,8 +587,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35335 {
35336 unsigned long va = dtr->address;
35337 unsigned int size = dtr->size + 1;
35338- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35339- unsigned long frames[pages];
35340+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35341 int f;
35342
35343 /*
35344@@ -599,7 +595,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35345 * 8-byte entries, or 16 4k pages..
35346 */
35347
35348- BUG_ON(size > 65536);
35349+ BUG_ON(size > GDT_SIZE);
35350 BUG_ON(va & ~PAGE_MASK);
35351
35352 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35353@@ -988,7 +984,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35354 return 0;
35355 }
35356
35357-static void set_xen_basic_apic_ops(void)
35358+static void __init set_xen_basic_apic_ops(void)
35359 {
35360 apic->read = xen_apic_read;
35361 apic->write = xen_apic_write;
35362@@ -1293,30 +1289,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35363 #endif
35364 };
35365
35366-static void xen_reboot(int reason)
35367+static __noreturn void xen_reboot(int reason)
35368 {
35369 struct sched_shutdown r = { .reason = reason };
35370
35371- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35372- BUG();
35373+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35374+ BUG();
35375 }
35376
35377-static void xen_restart(char *msg)
35378+static __noreturn void xen_restart(char *msg)
35379 {
35380 xen_reboot(SHUTDOWN_reboot);
35381 }
35382
35383-static void xen_emergency_restart(void)
35384+static __noreturn void xen_emergency_restart(void)
35385 {
35386 xen_reboot(SHUTDOWN_reboot);
35387 }
35388
35389-static void xen_machine_halt(void)
35390+static __noreturn void xen_machine_halt(void)
35391 {
35392 xen_reboot(SHUTDOWN_poweroff);
35393 }
35394
35395-static void xen_machine_power_off(void)
35396+static __noreturn void xen_machine_power_off(void)
35397 {
35398 if (pm_power_off)
35399 pm_power_off();
35400@@ -1467,7 +1463,17 @@ asmlinkage void __init xen_start_kernel(void)
35401 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35402
35403 /* Work out if we support NX */
35404- x86_configure_nx();
35405+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35406+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35407+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35408+ unsigned l, h;
35409+
35410+ __supported_pte_mask |= _PAGE_NX;
35411+ rdmsr(MSR_EFER, l, h);
35412+ l |= EFER_NX;
35413+ wrmsr(MSR_EFER, l, h);
35414+ }
35415+#endif
35416
35417 xen_setup_features();
35418
35419@@ -1498,13 +1504,6 @@ asmlinkage void __init xen_start_kernel(void)
35420
35421 machine_ops = xen_machine_ops;
35422
35423- /*
35424- * The only reliable way to retain the initial address of the
35425- * percpu gdt_page is to remember it here, so we can go and
35426- * mark it RW later, when the initial percpu area is freed.
35427- */
35428- xen_initial_gdt = &per_cpu(gdt_page, 0);
35429-
35430 xen_smp_init();
35431
35432 #ifdef CONFIG_ACPI_NUMA
35433diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35434index 3c76c3d..7871755 100644
35435--- a/arch/x86/xen/mmu.c
35436+++ b/arch/x86/xen/mmu.c
35437@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35438 return val;
35439 }
35440
35441-static pteval_t pte_pfn_to_mfn(pteval_t val)
35442+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35443 {
35444 if (pteval_present(val)) {
35445 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35446@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35447 /* L3_k[510] -> level2_kernel_pgt
35448 * L3_i[511] -> level2_fixmap_pgt */
35449 convert_pfn_mfn(level3_kernel_pgt);
35450+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35451+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35452+ convert_pfn_mfn(level3_vmemmap_pgt);
35453
35454 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35455 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
35456@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35457 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35458 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35459 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35460+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35461+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35462+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35463 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35464 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35465+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35466 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35467 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35468
35469@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
35470 pv_mmu_ops.set_pud = xen_set_pud;
35471 #if PAGETABLE_LEVELS == 4
35472 pv_mmu_ops.set_pgd = xen_set_pgd;
35473+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35474 #endif
35475
35476 /* This will work as long as patching hasn't happened yet
35477@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35478 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35479 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35480 .set_pgd = xen_set_pgd_hyper,
35481+ .set_pgd_batched = xen_set_pgd_hyper,
35482
35483 .alloc_pud = xen_alloc_pmd_init,
35484 .release_pud = xen_release_pmd_init,
35485diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35486index c36b325..b0f1518 100644
35487--- a/arch/x86/xen/smp.c
35488+++ b/arch/x86/xen/smp.c
35489@@ -274,17 +274,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35490 native_smp_prepare_boot_cpu();
35491
35492 if (xen_pv_domain()) {
35493- /* We've switched to the "real" per-cpu gdt, so make sure the
35494- old memory can be recycled */
35495- make_lowmem_page_readwrite(xen_initial_gdt);
35496-
35497 #ifdef CONFIG_X86_32
35498 /*
35499 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35500 * expects __USER_DS
35501 */
35502- loadsegment(ds, __USER_DS);
35503- loadsegment(es, __USER_DS);
35504+ loadsegment(ds, __KERNEL_DS);
35505+ loadsegment(es, __KERNEL_DS);
35506 #endif
35507
35508 xen_filter_cpu_maps();
35509@@ -364,7 +360,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35510 ctxt->user_regs.ss = __KERNEL_DS;
35511 #ifdef CONFIG_X86_32
35512 ctxt->user_regs.fs = __KERNEL_PERCPU;
35513- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35514+ savesegment(gs, ctxt->user_regs.gs);
35515 #else
35516 ctxt->gs_base_kernel = per_cpu_offset(cpu);
35517 #endif
35518@@ -374,8 +370,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35519
35520 {
35521 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
35522- ctxt->user_regs.ds = __USER_DS;
35523- ctxt->user_regs.es = __USER_DS;
35524+ ctxt->user_regs.ds = __KERNEL_DS;
35525+ ctxt->user_regs.es = __KERNEL_DS;
35526
35527 xen_copy_trap_info(ctxt->trap_ctxt);
35528
35529@@ -420,13 +416,12 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35530 int rc;
35531
35532 per_cpu(current_task, cpu) = idle;
35533+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
35534 #ifdef CONFIG_X86_32
35535 irq_ctx_init(cpu);
35536 #else
35537 clear_tsk_thread_flag(idle, TIF_FORK);
35538- per_cpu(kernel_stack, cpu) =
35539- (unsigned long)task_stack_page(idle) -
35540- KERNEL_STACK_OFFSET + THREAD_SIZE;
35541+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
35542 #endif
35543 xen_setup_runstate_info(cpu);
35544 xen_setup_timer(cpu);
35545@@ -702,7 +697,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
35546
35547 void __init xen_smp_init(void)
35548 {
35549- smp_ops = xen_smp_ops;
35550+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
35551 xen_fill_possible_map();
35552 }
35553
35554diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
35555index 33ca6e4..0ded929 100644
35556--- a/arch/x86/xen/xen-asm_32.S
35557+++ b/arch/x86/xen/xen-asm_32.S
35558@@ -84,14 +84,14 @@ ENTRY(xen_iret)
35559 ESP_OFFSET=4 # bytes pushed onto stack
35560
35561 /*
35562- * Store vcpu_info pointer for easy access. Do it this way to
35563- * avoid having to reload %fs
35564+ * Store vcpu_info pointer for easy access.
35565 */
35566 #ifdef CONFIG_SMP
35567- GET_THREAD_INFO(%eax)
35568- movl %ss:TI_cpu(%eax), %eax
35569- movl %ss:__per_cpu_offset(,%eax,4), %eax
35570- mov %ss:xen_vcpu(%eax), %eax
35571+ push %fs
35572+ mov $(__KERNEL_PERCPU), %eax
35573+ mov %eax, %fs
35574+ mov PER_CPU_VAR(xen_vcpu), %eax
35575+ pop %fs
35576 #else
35577 movl %ss:xen_vcpu, %eax
35578 #endif
35579diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
35580index 7faed58..ba4427c 100644
35581--- a/arch/x86/xen/xen-head.S
35582+++ b/arch/x86/xen/xen-head.S
35583@@ -19,6 +19,17 @@ ENTRY(startup_xen)
35584 #ifdef CONFIG_X86_32
35585 mov %esi,xen_start_info
35586 mov $init_thread_union+THREAD_SIZE,%esp
35587+#ifdef CONFIG_SMP
35588+ movl $cpu_gdt_table,%edi
35589+ movl $__per_cpu_load,%eax
35590+ movw %ax,__KERNEL_PERCPU + 2(%edi)
35591+ rorl $16,%eax
35592+ movb %al,__KERNEL_PERCPU + 4(%edi)
35593+ movb %ah,__KERNEL_PERCPU + 7(%edi)
35594+ movl $__per_cpu_end - 1,%eax
35595+ subl $__per_cpu_start,%eax
35596+ movw %ax,__KERNEL_PERCPU + 0(%edi)
35597+#endif
35598 #else
35599 mov %rsi,xen_start_info
35600 mov $init_thread_union+THREAD_SIZE,%rsp
35601diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
35602index 95f8c61..611d6e8 100644
35603--- a/arch/x86/xen/xen-ops.h
35604+++ b/arch/x86/xen/xen-ops.h
35605@@ -10,8 +10,6 @@
35606 extern const char xen_hypervisor_callback[];
35607 extern const char xen_failsafe_callback[];
35608
35609-extern void *xen_initial_gdt;
35610-
35611 struct trap_info;
35612 void xen_copy_trap_info(struct trap_info *traps);
35613
35614diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
35615index 525bd3d..ef888b1 100644
35616--- a/arch/xtensa/variants/dc232b/include/variant/core.h
35617+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
35618@@ -119,9 +119,9 @@
35619 ----------------------------------------------------------------------*/
35620
35621 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
35622-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
35623 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
35624 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
35625+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35626
35627 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
35628 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
35629diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
35630index 2f33760..835e50a 100644
35631--- a/arch/xtensa/variants/fsf/include/variant/core.h
35632+++ b/arch/xtensa/variants/fsf/include/variant/core.h
35633@@ -11,6 +11,7 @@
35634 #ifndef _XTENSA_CORE_H
35635 #define _XTENSA_CORE_H
35636
35637+#include <linux/const.h>
35638
35639 /****************************************************************************
35640 Parameters Useful for Any Code, USER or PRIVILEGED
35641@@ -112,9 +113,9 @@
35642 ----------------------------------------------------------------------*/
35643
35644 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35645-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35646 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35647 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
35648+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35649
35650 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
35651 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
35652diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
35653index af00795..2bb8105 100644
35654--- a/arch/xtensa/variants/s6000/include/variant/core.h
35655+++ b/arch/xtensa/variants/s6000/include/variant/core.h
35656@@ -11,6 +11,7 @@
35657 #ifndef _XTENSA_CORE_CONFIGURATION_H
35658 #define _XTENSA_CORE_CONFIGURATION_H
35659
35660+#include <linux/const.h>
35661
35662 /****************************************************************************
35663 Parameters Useful for Any Code, USER or PRIVILEGED
35664@@ -118,9 +119,9 @@
35665 ----------------------------------------------------------------------*/
35666
35667 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
35668-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
35669 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
35670 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
35671+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
35672
35673 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
35674 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
35675diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
35676index 4e491d9..c8e18e4 100644
35677--- a/block/blk-cgroup.c
35678+++ b/block/blk-cgroup.c
35679@@ -812,7 +812,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
35680 static struct cgroup_subsys_state *
35681 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
35682 {
35683- static atomic64_t id_seq = ATOMIC64_INIT(0);
35684+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
35685 struct blkcg *blkcg;
35686
35687 if (!parent_css) {
35688@@ -826,7 +826,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
35689
35690 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
35691 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
35692- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
35693+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
35694 done:
35695 spin_lock_init(&blkcg->lock);
35696 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
35697diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
35698index 1855bf5..af12b06 100644
35699--- a/block/blk-iopoll.c
35700+++ b/block/blk-iopoll.c
35701@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
35702 }
35703 EXPORT_SYMBOL(blk_iopoll_complete);
35704
35705-static void blk_iopoll_softirq(struct softirq_action *h)
35706+static __latent_entropy void blk_iopoll_softirq(void)
35707 {
35708 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
35709 int rearm = 0, budget = blk_iopoll_budget;
35710diff --git a/block/blk-map.c b/block/blk-map.c
35711index 623e1cd..ca1e109 100644
35712--- a/block/blk-map.c
35713+++ b/block/blk-map.c
35714@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
35715 if (!len || !kbuf)
35716 return -EINVAL;
35717
35718- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
35719+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
35720 if (do_copy)
35721 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
35722 else
35723diff --git a/block/blk-softirq.c b/block/blk-softirq.c
35724index 57790c1..5e988dd 100644
35725--- a/block/blk-softirq.c
35726+++ b/block/blk-softirq.c
35727@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
35728 * Softirq action handler - move entries to local list and loop over them
35729 * while passing them to the queue registered handler.
35730 */
35731-static void blk_done_softirq(struct softirq_action *h)
35732+static __latent_entropy void blk_done_softirq(void)
35733 {
35734 struct list_head *cpu_list, local_list;
35735
35736diff --git a/block/bsg.c b/block/bsg.c
35737index 420a5a9..23834aa 100644
35738--- a/block/bsg.c
35739+++ b/block/bsg.c
35740@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
35741 struct sg_io_v4 *hdr, struct bsg_device *bd,
35742 fmode_t has_write_perm)
35743 {
35744+ unsigned char tmpcmd[sizeof(rq->__cmd)];
35745+ unsigned char *cmdptr;
35746+
35747 if (hdr->request_len > BLK_MAX_CDB) {
35748 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
35749 if (!rq->cmd)
35750 return -ENOMEM;
35751- }
35752+ cmdptr = rq->cmd;
35753+ } else
35754+ cmdptr = tmpcmd;
35755
35756- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
35757+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
35758 hdr->request_len))
35759 return -EFAULT;
35760
35761+ if (cmdptr != rq->cmd)
35762+ memcpy(rq->cmd, cmdptr, hdr->request_len);
35763+
35764 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
35765 if (blk_verify_command(rq->cmd, has_write_perm))
35766 return -EPERM;
35767diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
35768index fbd5a67..f24fd95 100644
35769--- a/block/compat_ioctl.c
35770+++ b/block/compat_ioctl.c
35771@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
35772 cgc = compat_alloc_user_space(sizeof(*cgc));
35773 cgc32 = compat_ptr(arg);
35774
35775- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
35776+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
35777 get_user(data, &cgc32->buffer) ||
35778 put_user(compat_ptr(data), &cgc->buffer) ||
35779 copy_in_user(&cgc->buflen, &cgc32->buflen,
35780@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
35781 err |= __get_user(f->spec1, &uf->spec1);
35782 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
35783 err |= __get_user(name, &uf->name);
35784- f->name = compat_ptr(name);
35785+ f->name = (void __force_kernel *)compat_ptr(name);
35786 if (err) {
35787 err = -EFAULT;
35788 goto out;
35789diff --git a/block/genhd.c b/block/genhd.c
35790index 791f419..89f21c4 100644
35791--- a/block/genhd.c
35792+++ b/block/genhd.c
35793@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
35794
35795 /*
35796 * Register device numbers dev..(dev+range-1)
35797- * range must be nonzero
35798+ * Noop if @range is zero.
35799 * The hash chain is sorted on range, so that subranges can override.
35800 */
35801 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
35802 struct kobject *(*probe)(dev_t, int *, void *),
35803 int (*lock)(dev_t, void *), void *data)
35804 {
35805- kobj_map(bdev_map, devt, range, module, probe, lock, data);
35806+ if (range)
35807+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
35808 }
35809
35810 EXPORT_SYMBOL(blk_register_region);
35811
35812+/* undo blk_register_region(), noop if @range is zero */
35813 void blk_unregister_region(dev_t devt, unsigned long range)
35814 {
35815- kobj_unmap(bdev_map, devt, range);
35816+ if (range)
35817+ kobj_unmap(bdev_map, devt, range);
35818 }
35819
35820 EXPORT_SYMBOL(blk_unregister_region);
35821diff --git a/block/partitions/efi.c b/block/partitions/efi.c
35822index dc51f46..d5446a8 100644
35823--- a/block/partitions/efi.c
35824+++ b/block/partitions/efi.c
35825@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
35826 if (!gpt)
35827 return NULL;
35828
35829+ if (!le32_to_cpu(gpt->num_partition_entries))
35830+ return NULL;
35831+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
35832+ if (!pte)
35833+ return NULL;
35834+
35835 count = le32_to_cpu(gpt->num_partition_entries) *
35836 le32_to_cpu(gpt->sizeof_partition_entry);
35837- if (!count)
35838- return NULL;
35839- pte = kmalloc(count, GFP_KERNEL);
35840- if (!pte)
35841- return NULL;
35842-
35843 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
35844 (u8 *) pte, count) < count) {
35845 kfree(pte);
35846diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
35847index 625e3e4..b5339f9 100644
35848--- a/block/scsi_ioctl.c
35849+++ b/block/scsi_ioctl.c
35850@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
35851 return put_user(0, p);
35852 }
35853
35854-static int sg_get_timeout(struct request_queue *q)
35855+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
35856 {
35857 return jiffies_to_clock_t(q->sg_timeout);
35858 }
35859@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
35860 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
35861 struct sg_io_hdr *hdr, fmode_t mode)
35862 {
35863- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
35864+ unsigned char tmpcmd[sizeof(rq->__cmd)];
35865+ unsigned char *cmdptr;
35866+
35867+ if (rq->cmd != rq->__cmd)
35868+ cmdptr = rq->cmd;
35869+ else
35870+ cmdptr = tmpcmd;
35871+
35872+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
35873 return -EFAULT;
35874+
35875+ if (cmdptr != rq->cmd)
35876+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
35877+
35878 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
35879 return -EPERM;
35880
35881@@ -415,6 +427,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
35882 int err;
35883 unsigned int in_len, out_len, bytes, opcode, cmdlen;
35884 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
35885+ unsigned char tmpcmd[sizeof(rq->__cmd)];
35886+ unsigned char *cmdptr;
35887
35888 if (!sic)
35889 return -EINVAL;
35890@@ -448,9 +462,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
35891 */
35892 err = -EFAULT;
35893 rq->cmd_len = cmdlen;
35894- if (copy_from_user(rq->cmd, sic->data, cmdlen))
35895+
35896+ if (rq->cmd != rq->__cmd)
35897+ cmdptr = rq->cmd;
35898+ else
35899+ cmdptr = tmpcmd;
35900+
35901+ if (copy_from_user(cmdptr, sic->data, cmdlen))
35902 goto error;
35903
35904+ if (rq->cmd != cmdptr)
35905+ memcpy(rq->cmd, cmdptr, cmdlen);
35906+
35907 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
35908 goto error;
35909
35910diff --git a/crypto/cryptd.c b/crypto/cryptd.c
35911index 7bdd61b..afec999 100644
35912--- a/crypto/cryptd.c
35913+++ b/crypto/cryptd.c
35914@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
35915
35916 struct cryptd_blkcipher_request_ctx {
35917 crypto_completion_t complete;
35918-};
35919+} __no_const;
35920
35921 struct cryptd_hash_ctx {
35922 struct crypto_shash *child;
35923@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
35924
35925 struct cryptd_aead_request_ctx {
35926 crypto_completion_t complete;
35927-};
35928+} __no_const;
35929
35930 static void cryptd_queue_worker(struct work_struct *work);
35931
35932diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
35933index f8c920c..ab2cb5a 100644
35934--- a/crypto/pcrypt.c
35935+++ b/crypto/pcrypt.c
35936@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
35937 int ret;
35938
35939 pinst->kobj.kset = pcrypt_kset;
35940- ret = kobject_add(&pinst->kobj, NULL, name);
35941+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
35942 if (!ret)
35943 kobject_uevent(&pinst->kobj, KOBJ_ADD);
35944
35945diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
35946index 15dddc1..b61cf0c 100644
35947--- a/drivers/acpi/acpica/hwxfsleep.c
35948+++ b/drivers/acpi/acpica/hwxfsleep.c
35949@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
35950 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
35951
35952 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
35953- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
35954- acpi_hw_extended_sleep},
35955- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
35956- acpi_hw_extended_wake_prep},
35957- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
35958+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
35959+ .extended_function = acpi_hw_extended_sleep},
35960+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
35961+ .extended_function = acpi_hw_extended_wake_prep},
35962+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
35963+ .extended_function = acpi_hw_extended_wake}
35964 };
35965
35966 /*
35967diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
35968index 21ba34a..cb05966 100644
35969--- a/drivers/acpi/apei/apei-internal.h
35970+++ b/drivers/acpi/apei/apei-internal.h
35971@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
35972 struct apei_exec_ins_type {
35973 u32 flags;
35974 apei_exec_ins_func_t run;
35975-};
35976+} __do_const;
35977
35978 struct apei_exec_context {
35979 u32 ip;
35980diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
35981index a30bc31..b91c4d5 100644
35982--- a/drivers/acpi/apei/ghes.c
35983+++ b/drivers/acpi/apei/ghes.c
35984@@ -498,7 +498,7 @@ static void __ghes_print_estatus(const char *pfx,
35985 const struct acpi_hest_generic *generic,
35986 const struct acpi_generic_status *estatus)
35987 {
35988- static atomic_t seqno;
35989+ static atomic_unchecked_t seqno;
35990 unsigned int curr_seqno;
35991 char pfx_seq[64];
35992
35993@@ -509,7 +509,7 @@ static void __ghes_print_estatus(const char *pfx,
35994 else
35995 pfx = KERN_ERR;
35996 }
35997- curr_seqno = atomic_inc_return(&seqno);
35998+ curr_seqno = atomic_inc_return_unchecked(&seqno);
35999 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36000 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36001 pfx_seq, generic->header.source_id);
36002diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36003index a83e3c6..c3d617f 100644
36004--- a/drivers/acpi/bgrt.c
36005+++ b/drivers/acpi/bgrt.c
36006@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36007 if (!bgrt_image)
36008 return -ENODEV;
36009
36010- bin_attr_image.private = bgrt_image;
36011- bin_attr_image.size = bgrt_image_size;
36012+ pax_open_kernel();
36013+ *(void **)&bin_attr_image.private = bgrt_image;
36014+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36015+ pax_close_kernel();
36016
36017 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36018 if (!bgrt_kobj)
36019diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36020index 078c4f7..410e272 100644
36021--- a/drivers/acpi/blacklist.c
36022+++ b/drivers/acpi/blacklist.c
36023@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
36024 u32 is_critical_error;
36025 };
36026
36027-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36028+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36029
36030 /*
36031 * POLICY: If *anything* doesn't work, put it on the blacklist.
36032@@ -164,7 +164,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36033 return 0;
36034 }
36035
36036-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36037+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36038 {
36039 .callback = dmi_disable_osi_vista,
36040 .ident = "Fujitsu Siemens",
36041diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36042index 12b62f2..dc2aac8 100644
36043--- a/drivers/acpi/custom_method.c
36044+++ b/drivers/acpi/custom_method.c
36045@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36046 struct acpi_table_header table;
36047 acpi_status status;
36048
36049+#ifdef CONFIG_GRKERNSEC_KMEM
36050+ return -EPERM;
36051+#endif
36052+
36053 if (!(*ppos)) {
36054 /* parse the table header to get the table length */
36055 if (count <= sizeof(struct acpi_table_header))
36056diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36057index 644516d..643937e 100644
36058--- a/drivers/acpi/processor_idle.c
36059+++ b/drivers/acpi/processor_idle.c
36060@@ -963,7 +963,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36061 {
36062 int i, count = CPUIDLE_DRIVER_STATE_START;
36063 struct acpi_processor_cx *cx;
36064- struct cpuidle_state *state;
36065+ cpuidle_state_no_const *state;
36066 struct cpuidle_driver *drv = &acpi_idle_driver;
36067
36068 if (!pr->flags.power_setup_done)
36069diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36070index 6dbc3ca..b8b59a0 100644
36071--- a/drivers/acpi/sysfs.c
36072+++ b/drivers/acpi/sysfs.c
36073@@ -425,11 +425,11 @@ static u32 num_counters;
36074 static struct attribute **all_attrs;
36075 static u32 acpi_gpe_count;
36076
36077-static struct attribute_group interrupt_stats_attr_group = {
36078+static attribute_group_no_const interrupt_stats_attr_group = {
36079 .name = "interrupts",
36080 };
36081
36082-static struct kobj_attribute *counter_attrs;
36083+static kobj_attribute_no_const *counter_attrs;
36084
36085 static void delete_gpe_attr_array(void)
36086 {
36087diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36088index c482f8c..c832240 100644
36089--- a/drivers/ata/libahci.c
36090+++ b/drivers/ata/libahci.c
36091@@ -1239,7 +1239,7 @@ int ahci_kick_engine(struct ata_port *ap)
36092 }
36093 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36094
36095-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36096+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36097 struct ata_taskfile *tf, int is_cmd, u16 flags,
36098 unsigned long timeout_msec)
36099 {
36100diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36101index 8cb2522..a815e54 100644
36102--- a/drivers/ata/libata-core.c
36103+++ b/drivers/ata/libata-core.c
36104@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36105 static void ata_dev_xfermask(struct ata_device *dev);
36106 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36107
36108-atomic_t ata_print_id = ATOMIC_INIT(0);
36109+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36110
36111 struct ata_force_param {
36112 const char *name;
36113@@ -4851,7 +4851,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36114 struct ata_port *ap;
36115 unsigned int tag;
36116
36117- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36118+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36119 ap = qc->ap;
36120
36121 qc->flags = 0;
36122@@ -4867,7 +4867,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36123 struct ata_port *ap;
36124 struct ata_link *link;
36125
36126- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36127+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36128 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36129 ap = qc->ap;
36130 link = qc->dev->link;
36131@@ -5986,6 +5986,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36132 return;
36133
36134 spin_lock(&lock);
36135+ pax_open_kernel();
36136
36137 for (cur = ops->inherits; cur; cur = cur->inherits) {
36138 void **inherit = (void **)cur;
36139@@ -5999,8 +6000,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36140 if (IS_ERR(*pp))
36141 *pp = NULL;
36142
36143- ops->inherits = NULL;
36144+ *(struct ata_port_operations **)&ops->inherits = NULL;
36145
36146+ pax_close_kernel();
36147 spin_unlock(&lock);
36148 }
36149
36150@@ -6193,7 +6195,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36151
36152 /* give ports names and add SCSI hosts */
36153 for (i = 0; i < host->n_ports; i++) {
36154- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36155+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36156 host->ports[i]->local_port_no = i + 1;
36157 }
36158
36159diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36160index ef8567d..8bdbd03 100644
36161--- a/drivers/ata/libata-scsi.c
36162+++ b/drivers/ata/libata-scsi.c
36163@@ -4147,7 +4147,7 @@ int ata_sas_port_init(struct ata_port *ap)
36164
36165 if (rc)
36166 return rc;
36167- ap->print_id = atomic_inc_return(&ata_print_id);
36168+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36169 return 0;
36170 }
36171 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36172diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36173index 45b5ab3..98446b8 100644
36174--- a/drivers/ata/libata.h
36175+++ b/drivers/ata/libata.h
36176@@ -53,7 +53,7 @@ enum {
36177 ATA_DNXFER_QUIET = (1 << 31),
36178 };
36179
36180-extern atomic_t ata_print_id;
36181+extern atomic_unchecked_t ata_print_id;
36182 extern int atapi_passthru16;
36183 extern int libata_fua;
36184 extern int libata_noacpi;
36185diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36186index 73492dd..ca2bff5 100644
36187--- a/drivers/ata/pata_arasan_cf.c
36188+++ b/drivers/ata/pata_arasan_cf.c
36189@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36190 /* Handle platform specific quirks */
36191 if (quirk) {
36192 if (quirk & CF_BROKEN_PIO) {
36193- ap->ops->set_piomode = NULL;
36194+ pax_open_kernel();
36195+ *(void **)&ap->ops->set_piomode = NULL;
36196+ pax_close_kernel();
36197 ap->pio_mask = 0;
36198 }
36199 if (quirk & CF_BROKEN_MWDMA)
36200diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36201index f9b983a..887b9d8 100644
36202--- a/drivers/atm/adummy.c
36203+++ b/drivers/atm/adummy.c
36204@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36205 vcc->pop(vcc, skb);
36206 else
36207 dev_kfree_skb_any(skb);
36208- atomic_inc(&vcc->stats->tx);
36209+ atomic_inc_unchecked(&vcc->stats->tx);
36210
36211 return 0;
36212 }
36213diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36214index 62a7607..cc4be104 100644
36215--- a/drivers/atm/ambassador.c
36216+++ b/drivers/atm/ambassador.c
36217@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36218 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36219
36220 // VC layer stats
36221- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36222+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36223
36224 // free the descriptor
36225 kfree (tx_descr);
36226@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36227 dump_skb ("<<<", vc, skb);
36228
36229 // VC layer stats
36230- atomic_inc(&atm_vcc->stats->rx);
36231+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36232 __net_timestamp(skb);
36233 // end of our responsibility
36234 atm_vcc->push (atm_vcc, skb);
36235@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36236 } else {
36237 PRINTK (KERN_INFO, "dropped over-size frame");
36238 // should we count this?
36239- atomic_inc(&atm_vcc->stats->rx_drop);
36240+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36241 }
36242
36243 } else {
36244@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36245 }
36246
36247 if (check_area (skb->data, skb->len)) {
36248- atomic_inc(&atm_vcc->stats->tx_err);
36249+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36250 return -ENOMEM; // ?
36251 }
36252
36253diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36254index 0e3f8f9..765a7a5 100644
36255--- a/drivers/atm/atmtcp.c
36256+++ b/drivers/atm/atmtcp.c
36257@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36258 if (vcc->pop) vcc->pop(vcc,skb);
36259 else dev_kfree_skb(skb);
36260 if (dev_data) return 0;
36261- atomic_inc(&vcc->stats->tx_err);
36262+ atomic_inc_unchecked(&vcc->stats->tx_err);
36263 return -ENOLINK;
36264 }
36265 size = skb->len+sizeof(struct atmtcp_hdr);
36266@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36267 if (!new_skb) {
36268 if (vcc->pop) vcc->pop(vcc,skb);
36269 else dev_kfree_skb(skb);
36270- atomic_inc(&vcc->stats->tx_err);
36271+ atomic_inc_unchecked(&vcc->stats->tx_err);
36272 return -ENOBUFS;
36273 }
36274 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36275@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36276 if (vcc->pop) vcc->pop(vcc,skb);
36277 else dev_kfree_skb(skb);
36278 out_vcc->push(out_vcc,new_skb);
36279- atomic_inc(&vcc->stats->tx);
36280- atomic_inc(&out_vcc->stats->rx);
36281+ atomic_inc_unchecked(&vcc->stats->tx);
36282+ atomic_inc_unchecked(&out_vcc->stats->rx);
36283 return 0;
36284 }
36285
36286@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36287 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
36288 read_unlock(&vcc_sklist_lock);
36289 if (!out_vcc) {
36290- atomic_inc(&vcc->stats->tx_err);
36291+ atomic_inc_unchecked(&vcc->stats->tx_err);
36292 goto done;
36293 }
36294 skb_pull(skb,sizeof(struct atmtcp_hdr));
36295@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36296 __net_timestamp(new_skb);
36297 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36298 out_vcc->push(out_vcc,new_skb);
36299- atomic_inc(&vcc->stats->tx);
36300- atomic_inc(&out_vcc->stats->rx);
36301+ atomic_inc_unchecked(&vcc->stats->tx);
36302+ atomic_inc_unchecked(&out_vcc->stats->rx);
36303 done:
36304 if (vcc->pop) vcc->pop(vcc,skb);
36305 else dev_kfree_skb(skb);
36306diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36307index b1955ba..b179940 100644
36308--- a/drivers/atm/eni.c
36309+++ b/drivers/atm/eni.c
36310@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36311 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36312 vcc->dev->number);
36313 length = 0;
36314- atomic_inc(&vcc->stats->rx_err);
36315+ atomic_inc_unchecked(&vcc->stats->rx_err);
36316 }
36317 else {
36318 length = ATM_CELL_SIZE-1; /* no HEC */
36319@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36320 size);
36321 }
36322 eff = length = 0;
36323- atomic_inc(&vcc->stats->rx_err);
36324+ atomic_inc_unchecked(&vcc->stats->rx_err);
36325 }
36326 else {
36327 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36328@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36329 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36330 vcc->dev->number,vcc->vci,length,size << 2,descr);
36331 length = eff = 0;
36332- atomic_inc(&vcc->stats->rx_err);
36333+ atomic_inc_unchecked(&vcc->stats->rx_err);
36334 }
36335 }
36336 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36337@@ -767,7 +767,7 @@ rx_dequeued++;
36338 vcc->push(vcc,skb);
36339 pushed++;
36340 }
36341- atomic_inc(&vcc->stats->rx);
36342+ atomic_inc_unchecked(&vcc->stats->rx);
36343 }
36344 wake_up(&eni_dev->rx_wait);
36345 }
36346@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
36347 PCI_DMA_TODEVICE);
36348 if (vcc->pop) vcc->pop(vcc,skb);
36349 else dev_kfree_skb_irq(skb);
36350- atomic_inc(&vcc->stats->tx);
36351+ atomic_inc_unchecked(&vcc->stats->tx);
36352 wake_up(&eni_dev->tx_wait);
36353 dma_complete++;
36354 }
36355diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36356index b41c948..a002b17 100644
36357--- a/drivers/atm/firestream.c
36358+++ b/drivers/atm/firestream.c
36359@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36360 }
36361 }
36362
36363- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36364+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36365
36366 fs_dprintk (FS_DEBUG_TXMEM, "i");
36367 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36368@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36369 #endif
36370 skb_put (skb, qe->p1 & 0xffff);
36371 ATM_SKB(skb)->vcc = atm_vcc;
36372- atomic_inc(&atm_vcc->stats->rx);
36373+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36374 __net_timestamp(skb);
36375 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36376 atm_vcc->push (atm_vcc, skb);
36377@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36378 kfree (pe);
36379 }
36380 if (atm_vcc)
36381- atomic_inc(&atm_vcc->stats->rx_drop);
36382+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36383 break;
36384 case 0x1f: /* Reassembly abort: no buffers. */
36385 /* Silently increment error counter. */
36386 if (atm_vcc)
36387- atomic_inc(&atm_vcc->stats->rx_drop);
36388+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36389 break;
36390 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36391 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36392diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36393index 204814e..cede831 100644
36394--- a/drivers/atm/fore200e.c
36395+++ b/drivers/atm/fore200e.c
36396@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36397 #endif
36398 /* check error condition */
36399 if (*entry->status & STATUS_ERROR)
36400- atomic_inc(&vcc->stats->tx_err);
36401+ atomic_inc_unchecked(&vcc->stats->tx_err);
36402 else
36403- atomic_inc(&vcc->stats->tx);
36404+ atomic_inc_unchecked(&vcc->stats->tx);
36405 }
36406 }
36407
36408@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36409 if (skb == NULL) {
36410 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
36411
36412- atomic_inc(&vcc->stats->rx_drop);
36413+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36414 return -ENOMEM;
36415 }
36416
36417@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36418
36419 dev_kfree_skb_any(skb);
36420
36421- atomic_inc(&vcc->stats->rx_drop);
36422+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36423 return -ENOMEM;
36424 }
36425
36426 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36427
36428 vcc->push(vcc, skb);
36429- atomic_inc(&vcc->stats->rx);
36430+ atomic_inc_unchecked(&vcc->stats->rx);
36431
36432 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36433
36434@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
36435 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
36436 fore200e->atm_dev->number,
36437 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
36438- atomic_inc(&vcc->stats->rx_err);
36439+ atomic_inc_unchecked(&vcc->stats->rx_err);
36440 }
36441 }
36442
36443@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
36444 goto retry_here;
36445 }
36446
36447- atomic_inc(&vcc->stats->tx_err);
36448+ atomic_inc_unchecked(&vcc->stats->tx_err);
36449
36450 fore200e->tx_sat++;
36451 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
36452diff --git a/drivers/atm/he.c b/drivers/atm/he.c
36453index 8557adc..3fb5d55 100644
36454--- a/drivers/atm/he.c
36455+++ b/drivers/atm/he.c
36456@@ -1691,7 +1691,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36457
36458 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
36459 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
36460- atomic_inc(&vcc->stats->rx_drop);
36461+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36462 goto return_host_buffers;
36463 }
36464
36465@@ -1718,7 +1718,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36466 RBRQ_LEN_ERR(he_dev->rbrq_head)
36467 ? "LEN_ERR" : "",
36468 vcc->vpi, vcc->vci);
36469- atomic_inc(&vcc->stats->rx_err);
36470+ atomic_inc_unchecked(&vcc->stats->rx_err);
36471 goto return_host_buffers;
36472 }
36473
36474@@ -1770,7 +1770,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36475 vcc->push(vcc, skb);
36476 spin_lock(&he_dev->global_lock);
36477
36478- atomic_inc(&vcc->stats->rx);
36479+ atomic_inc_unchecked(&vcc->stats->rx);
36480
36481 return_host_buffers:
36482 ++pdus_assembled;
36483@@ -2096,7 +2096,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
36484 tpd->vcc->pop(tpd->vcc, tpd->skb);
36485 else
36486 dev_kfree_skb_any(tpd->skb);
36487- atomic_inc(&tpd->vcc->stats->tx_err);
36488+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
36489 }
36490 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
36491 return;
36492@@ -2508,7 +2508,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36493 vcc->pop(vcc, skb);
36494 else
36495 dev_kfree_skb_any(skb);
36496- atomic_inc(&vcc->stats->tx_err);
36497+ atomic_inc_unchecked(&vcc->stats->tx_err);
36498 return -EINVAL;
36499 }
36500
36501@@ -2519,7 +2519,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36502 vcc->pop(vcc, skb);
36503 else
36504 dev_kfree_skb_any(skb);
36505- atomic_inc(&vcc->stats->tx_err);
36506+ atomic_inc_unchecked(&vcc->stats->tx_err);
36507 return -EINVAL;
36508 }
36509 #endif
36510@@ -2531,7 +2531,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36511 vcc->pop(vcc, skb);
36512 else
36513 dev_kfree_skb_any(skb);
36514- atomic_inc(&vcc->stats->tx_err);
36515+ atomic_inc_unchecked(&vcc->stats->tx_err);
36516 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36517 return -ENOMEM;
36518 }
36519@@ -2573,7 +2573,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36520 vcc->pop(vcc, skb);
36521 else
36522 dev_kfree_skb_any(skb);
36523- atomic_inc(&vcc->stats->tx_err);
36524+ atomic_inc_unchecked(&vcc->stats->tx_err);
36525 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36526 return -ENOMEM;
36527 }
36528@@ -2604,7 +2604,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36529 __enqueue_tpd(he_dev, tpd, cid);
36530 spin_unlock_irqrestore(&he_dev->global_lock, flags);
36531
36532- atomic_inc(&vcc->stats->tx);
36533+ atomic_inc_unchecked(&vcc->stats->tx);
36534
36535 return 0;
36536 }
36537diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
36538index 1dc0519..1aadaf7 100644
36539--- a/drivers/atm/horizon.c
36540+++ b/drivers/atm/horizon.c
36541@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
36542 {
36543 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
36544 // VC layer stats
36545- atomic_inc(&vcc->stats->rx);
36546+ atomic_inc_unchecked(&vcc->stats->rx);
36547 __net_timestamp(skb);
36548 // end of our responsibility
36549 vcc->push (vcc, skb);
36550@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
36551 dev->tx_iovec = NULL;
36552
36553 // VC layer stats
36554- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36555+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36556
36557 // free the skb
36558 hrz_kfree_skb (skb);
36559diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
36560index 1bdf104..9dc44b1 100644
36561--- a/drivers/atm/idt77252.c
36562+++ b/drivers/atm/idt77252.c
36563@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
36564 else
36565 dev_kfree_skb(skb);
36566
36567- atomic_inc(&vcc->stats->tx);
36568+ atomic_inc_unchecked(&vcc->stats->tx);
36569 }
36570
36571 atomic_dec(&scq->used);
36572@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36573 if ((sb = dev_alloc_skb(64)) == NULL) {
36574 printk("%s: Can't allocate buffers for aal0.\n",
36575 card->name);
36576- atomic_add(i, &vcc->stats->rx_drop);
36577+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
36578 break;
36579 }
36580 if (!atm_charge(vcc, sb->truesize)) {
36581 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
36582 card->name);
36583- atomic_add(i - 1, &vcc->stats->rx_drop);
36584+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
36585 dev_kfree_skb(sb);
36586 break;
36587 }
36588@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36589 ATM_SKB(sb)->vcc = vcc;
36590 __net_timestamp(sb);
36591 vcc->push(vcc, sb);
36592- atomic_inc(&vcc->stats->rx);
36593+ atomic_inc_unchecked(&vcc->stats->rx);
36594
36595 cell += ATM_CELL_PAYLOAD;
36596 }
36597@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36598 "(CDC: %08x)\n",
36599 card->name, len, rpp->len, readl(SAR_REG_CDC));
36600 recycle_rx_pool_skb(card, rpp);
36601- atomic_inc(&vcc->stats->rx_err);
36602+ atomic_inc_unchecked(&vcc->stats->rx_err);
36603 return;
36604 }
36605 if (stat & SAR_RSQE_CRC) {
36606 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
36607 recycle_rx_pool_skb(card, rpp);
36608- atomic_inc(&vcc->stats->rx_err);
36609+ atomic_inc_unchecked(&vcc->stats->rx_err);
36610 return;
36611 }
36612 if (skb_queue_len(&rpp->queue) > 1) {
36613@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36614 RXPRINTK("%s: Can't alloc RX skb.\n",
36615 card->name);
36616 recycle_rx_pool_skb(card, rpp);
36617- atomic_inc(&vcc->stats->rx_err);
36618+ atomic_inc_unchecked(&vcc->stats->rx_err);
36619 return;
36620 }
36621 if (!atm_charge(vcc, skb->truesize)) {
36622@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36623 __net_timestamp(skb);
36624
36625 vcc->push(vcc, skb);
36626- atomic_inc(&vcc->stats->rx);
36627+ atomic_inc_unchecked(&vcc->stats->rx);
36628
36629 return;
36630 }
36631@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
36632 __net_timestamp(skb);
36633
36634 vcc->push(vcc, skb);
36635- atomic_inc(&vcc->stats->rx);
36636+ atomic_inc_unchecked(&vcc->stats->rx);
36637
36638 if (skb->truesize > SAR_FB_SIZE_3)
36639 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
36640@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
36641 if (vcc->qos.aal != ATM_AAL0) {
36642 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
36643 card->name, vpi, vci);
36644- atomic_inc(&vcc->stats->rx_drop);
36645+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36646 goto drop;
36647 }
36648
36649 if ((sb = dev_alloc_skb(64)) == NULL) {
36650 printk("%s: Can't allocate buffers for AAL0.\n",
36651 card->name);
36652- atomic_inc(&vcc->stats->rx_err);
36653+ atomic_inc_unchecked(&vcc->stats->rx_err);
36654 goto drop;
36655 }
36656
36657@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
36658 ATM_SKB(sb)->vcc = vcc;
36659 __net_timestamp(sb);
36660 vcc->push(vcc, sb);
36661- atomic_inc(&vcc->stats->rx);
36662+ atomic_inc_unchecked(&vcc->stats->rx);
36663
36664 drop:
36665 skb_pull(queue, 64);
36666@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36667
36668 if (vc == NULL) {
36669 printk("%s: NULL connection in send().\n", card->name);
36670- atomic_inc(&vcc->stats->tx_err);
36671+ atomic_inc_unchecked(&vcc->stats->tx_err);
36672 dev_kfree_skb(skb);
36673 return -EINVAL;
36674 }
36675 if (!test_bit(VCF_TX, &vc->flags)) {
36676 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
36677- atomic_inc(&vcc->stats->tx_err);
36678+ atomic_inc_unchecked(&vcc->stats->tx_err);
36679 dev_kfree_skb(skb);
36680 return -EINVAL;
36681 }
36682@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36683 break;
36684 default:
36685 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
36686- atomic_inc(&vcc->stats->tx_err);
36687+ atomic_inc_unchecked(&vcc->stats->tx_err);
36688 dev_kfree_skb(skb);
36689 return -EINVAL;
36690 }
36691
36692 if (skb_shinfo(skb)->nr_frags != 0) {
36693 printk("%s: No scatter-gather yet.\n", card->name);
36694- atomic_inc(&vcc->stats->tx_err);
36695+ atomic_inc_unchecked(&vcc->stats->tx_err);
36696 dev_kfree_skb(skb);
36697 return -EINVAL;
36698 }
36699@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
36700
36701 err = queue_skb(card, vc, skb, oam);
36702 if (err) {
36703- atomic_inc(&vcc->stats->tx_err);
36704+ atomic_inc_unchecked(&vcc->stats->tx_err);
36705 dev_kfree_skb(skb);
36706 return err;
36707 }
36708@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
36709 skb = dev_alloc_skb(64);
36710 if (!skb) {
36711 printk("%s: Out of memory in send_oam().\n", card->name);
36712- atomic_inc(&vcc->stats->tx_err);
36713+ atomic_inc_unchecked(&vcc->stats->tx_err);
36714 return -ENOMEM;
36715 }
36716 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
36717diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
36718index 4217f29..88f547a 100644
36719--- a/drivers/atm/iphase.c
36720+++ b/drivers/atm/iphase.c
36721@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
36722 status = (u_short) (buf_desc_ptr->desc_mode);
36723 if (status & (RX_CER | RX_PTE | RX_OFL))
36724 {
36725- atomic_inc(&vcc->stats->rx_err);
36726+ atomic_inc_unchecked(&vcc->stats->rx_err);
36727 IF_ERR(printk("IA: bad packet, dropping it");)
36728 if (status & RX_CER) {
36729 IF_ERR(printk(" cause: packet CRC error\n");)
36730@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
36731 len = dma_addr - buf_addr;
36732 if (len > iadev->rx_buf_sz) {
36733 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
36734- atomic_inc(&vcc->stats->rx_err);
36735+ atomic_inc_unchecked(&vcc->stats->rx_err);
36736 goto out_free_desc;
36737 }
36738
36739@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
36740 ia_vcc = INPH_IA_VCC(vcc);
36741 if (ia_vcc == NULL)
36742 {
36743- atomic_inc(&vcc->stats->rx_err);
36744+ atomic_inc_unchecked(&vcc->stats->rx_err);
36745 atm_return(vcc, skb->truesize);
36746 dev_kfree_skb_any(skb);
36747 goto INCR_DLE;
36748@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
36749 if ((length > iadev->rx_buf_sz) || (length >
36750 (skb->len - sizeof(struct cpcs_trailer))))
36751 {
36752- atomic_inc(&vcc->stats->rx_err);
36753+ atomic_inc_unchecked(&vcc->stats->rx_err);
36754 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
36755 length, skb->len);)
36756 atm_return(vcc, skb->truesize);
36757@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
36758
36759 IF_RX(printk("rx_dle_intr: skb push");)
36760 vcc->push(vcc,skb);
36761- atomic_inc(&vcc->stats->rx);
36762+ atomic_inc_unchecked(&vcc->stats->rx);
36763 iadev->rx_pkt_cnt++;
36764 }
36765 INCR_DLE:
36766@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
36767 {
36768 struct k_sonet_stats *stats;
36769 stats = &PRIV(_ia_dev[board])->sonet_stats;
36770- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
36771- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
36772- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
36773- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
36774- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
36775- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
36776- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
36777- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
36778- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
36779+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
36780+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
36781+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
36782+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
36783+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
36784+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
36785+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
36786+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
36787+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
36788 }
36789 ia_cmds.status = 0;
36790 break;
36791@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
36792 if ((desc == 0) || (desc > iadev->num_tx_desc))
36793 {
36794 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
36795- atomic_inc(&vcc->stats->tx);
36796+ atomic_inc_unchecked(&vcc->stats->tx);
36797 if (vcc->pop)
36798 vcc->pop(vcc, skb);
36799 else
36800@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
36801 ATM_DESC(skb) = vcc->vci;
36802 skb_queue_tail(&iadev->tx_dma_q, skb);
36803
36804- atomic_inc(&vcc->stats->tx);
36805+ atomic_inc_unchecked(&vcc->stats->tx);
36806 iadev->tx_pkt_cnt++;
36807 /* Increment transaction counter */
36808 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
36809
36810 #if 0
36811 /* add flow control logic */
36812- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
36813+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
36814 if (iavcc->vc_desc_cnt > 10) {
36815 vcc->tx_quota = vcc->tx_quota * 3 / 4;
36816 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
36817diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
36818index fa7d701..1e404c7 100644
36819--- a/drivers/atm/lanai.c
36820+++ b/drivers/atm/lanai.c
36821@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
36822 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
36823 lanai_endtx(lanai, lvcc);
36824 lanai_free_skb(lvcc->tx.atmvcc, skb);
36825- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
36826+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
36827 }
36828
36829 /* Try to fill the buffer - don't call unless there is backlog */
36830@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
36831 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
36832 __net_timestamp(skb);
36833 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
36834- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
36835+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
36836 out:
36837 lvcc->rx.buf.ptr = end;
36838 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
36839@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36840 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
36841 "vcc %d\n", lanai->number, (unsigned int) s, vci);
36842 lanai->stats.service_rxnotaal5++;
36843- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36844+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36845 return 0;
36846 }
36847 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
36848@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36849 int bytes;
36850 read_unlock(&vcc_sklist_lock);
36851 DPRINTK("got trashed rx pdu on vci %d\n", vci);
36852- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36853+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36854 lvcc->stats.x.aal5.service_trash++;
36855 bytes = (SERVICE_GET_END(s) * 16) -
36856 (((unsigned long) lvcc->rx.buf.ptr) -
36857@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36858 }
36859 if (s & SERVICE_STREAM) {
36860 read_unlock(&vcc_sklist_lock);
36861- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36862+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36863 lvcc->stats.x.aal5.service_stream++;
36864 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
36865 "PDU on VCI %d!\n", lanai->number, vci);
36866@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36867 return 0;
36868 }
36869 DPRINTK("got rx crc error on vci %d\n", vci);
36870- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36871+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36872 lvcc->stats.x.aal5.service_rxcrc++;
36873 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
36874 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
36875diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
36876index 5aca5f4..ce3a6b0 100644
36877--- a/drivers/atm/nicstar.c
36878+++ b/drivers/atm/nicstar.c
36879@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36880 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
36881 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
36882 card->index);
36883- atomic_inc(&vcc->stats->tx_err);
36884+ atomic_inc_unchecked(&vcc->stats->tx_err);
36885 dev_kfree_skb_any(skb);
36886 return -EINVAL;
36887 }
36888@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36889 if (!vc->tx) {
36890 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
36891 card->index);
36892- atomic_inc(&vcc->stats->tx_err);
36893+ atomic_inc_unchecked(&vcc->stats->tx_err);
36894 dev_kfree_skb_any(skb);
36895 return -EINVAL;
36896 }
36897@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36898 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
36899 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
36900 card->index);
36901- atomic_inc(&vcc->stats->tx_err);
36902+ atomic_inc_unchecked(&vcc->stats->tx_err);
36903 dev_kfree_skb_any(skb);
36904 return -EINVAL;
36905 }
36906
36907 if (skb_shinfo(skb)->nr_frags != 0) {
36908 printk("nicstar%d: No scatter-gather yet.\n", card->index);
36909- atomic_inc(&vcc->stats->tx_err);
36910+ atomic_inc_unchecked(&vcc->stats->tx_err);
36911 dev_kfree_skb_any(skb);
36912 return -EINVAL;
36913 }
36914@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36915 }
36916
36917 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
36918- atomic_inc(&vcc->stats->tx_err);
36919+ atomic_inc_unchecked(&vcc->stats->tx_err);
36920 dev_kfree_skb_any(skb);
36921 return -EIO;
36922 }
36923- atomic_inc(&vcc->stats->tx);
36924+ atomic_inc_unchecked(&vcc->stats->tx);
36925
36926 return 0;
36927 }
36928@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36929 printk
36930 ("nicstar%d: Can't allocate buffers for aal0.\n",
36931 card->index);
36932- atomic_add(i, &vcc->stats->rx_drop);
36933+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
36934 break;
36935 }
36936 if (!atm_charge(vcc, sb->truesize)) {
36937 RXPRINTK
36938 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
36939 card->index);
36940- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
36941+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
36942 dev_kfree_skb_any(sb);
36943 break;
36944 }
36945@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36946 ATM_SKB(sb)->vcc = vcc;
36947 __net_timestamp(sb);
36948 vcc->push(vcc, sb);
36949- atomic_inc(&vcc->stats->rx);
36950+ atomic_inc_unchecked(&vcc->stats->rx);
36951 cell += ATM_CELL_PAYLOAD;
36952 }
36953
36954@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36955 if (iovb == NULL) {
36956 printk("nicstar%d: Out of iovec buffers.\n",
36957 card->index);
36958- atomic_inc(&vcc->stats->rx_drop);
36959+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36960 recycle_rx_buf(card, skb);
36961 return;
36962 }
36963@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36964 small or large buffer itself. */
36965 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
36966 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
36967- atomic_inc(&vcc->stats->rx_err);
36968+ atomic_inc_unchecked(&vcc->stats->rx_err);
36969 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36970 NS_MAX_IOVECS);
36971 NS_PRV_IOVCNT(iovb) = 0;
36972@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36973 ("nicstar%d: Expected a small buffer, and this is not one.\n",
36974 card->index);
36975 which_list(card, skb);
36976- atomic_inc(&vcc->stats->rx_err);
36977+ atomic_inc_unchecked(&vcc->stats->rx_err);
36978 recycle_rx_buf(card, skb);
36979 vc->rx_iov = NULL;
36980 recycle_iov_buf(card, iovb);
36981@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36982 ("nicstar%d: Expected a large buffer, and this is not one.\n",
36983 card->index);
36984 which_list(card, skb);
36985- atomic_inc(&vcc->stats->rx_err);
36986+ atomic_inc_unchecked(&vcc->stats->rx_err);
36987 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36988 NS_PRV_IOVCNT(iovb));
36989 vc->rx_iov = NULL;
36990@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36991 printk(" - PDU size mismatch.\n");
36992 else
36993 printk(".\n");
36994- atomic_inc(&vcc->stats->rx_err);
36995+ atomic_inc_unchecked(&vcc->stats->rx_err);
36996 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36997 NS_PRV_IOVCNT(iovb));
36998 vc->rx_iov = NULL;
36999@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37000 /* skb points to a small buffer */
37001 if (!atm_charge(vcc, skb->truesize)) {
37002 push_rxbufs(card, skb);
37003- atomic_inc(&vcc->stats->rx_drop);
37004+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37005 } else {
37006 skb_put(skb, len);
37007 dequeue_sm_buf(card, skb);
37008@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37009 ATM_SKB(skb)->vcc = vcc;
37010 __net_timestamp(skb);
37011 vcc->push(vcc, skb);
37012- atomic_inc(&vcc->stats->rx);
37013+ atomic_inc_unchecked(&vcc->stats->rx);
37014 }
37015 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37016 struct sk_buff *sb;
37017@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37018 if (len <= NS_SMBUFSIZE) {
37019 if (!atm_charge(vcc, sb->truesize)) {
37020 push_rxbufs(card, sb);
37021- atomic_inc(&vcc->stats->rx_drop);
37022+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37023 } else {
37024 skb_put(sb, len);
37025 dequeue_sm_buf(card, sb);
37026@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37027 ATM_SKB(sb)->vcc = vcc;
37028 __net_timestamp(sb);
37029 vcc->push(vcc, sb);
37030- atomic_inc(&vcc->stats->rx);
37031+ atomic_inc_unchecked(&vcc->stats->rx);
37032 }
37033
37034 push_rxbufs(card, skb);
37035@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37036
37037 if (!atm_charge(vcc, skb->truesize)) {
37038 push_rxbufs(card, skb);
37039- atomic_inc(&vcc->stats->rx_drop);
37040+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37041 } else {
37042 dequeue_lg_buf(card, skb);
37043 #ifdef NS_USE_DESTRUCTORS
37044@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37045 ATM_SKB(skb)->vcc = vcc;
37046 __net_timestamp(skb);
37047 vcc->push(vcc, skb);
37048- atomic_inc(&vcc->stats->rx);
37049+ atomic_inc_unchecked(&vcc->stats->rx);
37050 }
37051
37052 push_rxbufs(card, sb);
37053@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37054 printk
37055 ("nicstar%d: Out of huge buffers.\n",
37056 card->index);
37057- atomic_inc(&vcc->stats->rx_drop);
37058+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37059 recycle_iovec_rx_bufs(card,
37060 (struct iovec *)
37061 iovb->data,
37062@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37063 card->hbpool.count++;
37064 } else
37065 dev_kfree_skb_any(hb);
37066- atomic_inc(&vcc->stats->rx_drop);
37067+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37068 } else {
37069 /* Copy the small buffer to the huge buffer */
37070 sb = (struct sk_buff *)iov->iov_base;
37071@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37072 #endif /* NS_USE_DESTRUCTORS */
37073 __net_timestamp(hb);
37074 vcc->push(vcc, hb);
37075- atomic_inc(&vcc->stats->rx);
37076+ atomic_inc_unchecked(&vcc->stats->rx);
37077 }
37078 }
37079
37080diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37081index 32784d1..4a8434a 100644
37082--- a/drivers/atm/solos-pci.c
37083+++ b/drivers/atm/solos-pci.c
37084@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
37085 }
37086 atm_charge(vcc, skb->truesize);
37087 vcc->push(vcc, skb);
37088- atomic_inc(&vcc->stats->rx);
37089+ atomic_inc_unchecked(&vcc->stats->rx);
37090 break;
37091
37092 case PKT_STATUS:
37093@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37094 vcc = SKB_CB(oldskb)->vcc;
37095
37096 if (vcc) {
37097- atomic_inc(&vcc->stats->tx);
37098+ atomic_inc_unchecked(&vcc->stats->tx);
37099 solos_pop(vcc, oldskb);
37100 } else {
37101 dev_kfree_skb_irq(oldskb);
37102diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37103index 0215934..ce9f5b1 100644
37104--- a/drivers/atm/suni.c
37105+++ b/drivers/atm/suni.c
37106@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37107
37108
37109 #define ADD_LIMITED(s,v) \
37110- atomic_add((v),&stats->s); \
37111- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37112+ atomic_add_unchecked((v),&stats->s); \
37113+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37114
37115
37116 static void suni_hz(unsigned long from_timer)
37117diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37118index 5120a96..e2572bd 100644
37119--- a/drivers/atm/uPD98402.c
37120+++ b/drivers/atm/uPD98402.c
37121@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37122 struct sonet_stats tmp;
37123 int error = 0;
37124
37125- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37126+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37127 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37128 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37129 if (zero && !error) {
37130@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37131
37132
37133 #define ADD_LIMITED(s,v) \
37134- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37135- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37136- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37137+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37138+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37139+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37140
37141
37142 static void stat_event(struct atm_dev *dev)
37143@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37144 if (reason & uPD98402_INT_PFM) stat_event(dev);
37145 if (reason & uPD98402_INT_PCO) {
37146 (void) GET(PCOCR); /* clear interrupt cause */
37147- atomic_add(GET(HECCT),
37148+ atomic_add_unchecked(GET(HECCT),
37149 &PRIV(dev)->sonet_stats.uncorr_hcs);
37150 }
37151 if ((reason & uPD98402_INT_RFO) &&
37152@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37153 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37154 uPD98402_INT_LOS),PIMR); /* enable them */
37155 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37156- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37157- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37158- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37159+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37160+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37161+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37162 return 0;
37163 }
37164
37165diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37166index 969c3c2..9b72956 100644
37167--- a/drivers/atm/zatm.c
37168+++ b/drivers/atm/zatm.c
37169@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37170 }
37171 if (!size) {
37172 dev_kfree_skb_irq(skb);
37173- if (vcc) atomic_inc(&vcc->stats->rx_err);
37174+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37175 continue;
37176 }
37177 if (!atm_charge(vcc,skb->truesize)) {
37178@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37179 skb->len = size;
37180 ATM_SKB(skb)->vcc = vcc;
37181 vcc->push(vcc,skb);
37182- atomic_inc(&vcc->stats->rx);
37183+ atomic_inc_unchecked(&vcc->stats->rx);
37184 }
37185 zout(pos & 0xffff,MTA(mbx));
37186 #if 0 /* probably a stupid idea */
37187@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37188 skb_queue_head(&zatm_vcc->backlog,skb);
37189 break;
37190 }
37191- atomic_inc(&vcc->stats->tx);
37192+ atomic_inc_unchecked(&vcc->stats->tx);
37193 wake_up(&zatm_vcc->tx_wait);
37194 }
37195
37196diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37197index 73f6c29..b0c0e13 100644
37198--- a/drivers/base/bus.c
37199+++ b/drivers/base/bus.c
37200@@ -1115,7 +1115,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37201 return -EINVAL;
37202
37203 mutex_lock(&subsys->p->mutex);
37204- list_add_tail(&sif->node, &subsys->p->interfaces);
37205+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37206 if (sif->add_dev) {
37207 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37208 while ((dev = subsys_dev_iter_next(&iter)))
37209@@ -1140,7 +1140,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37210 subsys = sif->subsys;
37211
37212 mutex_lock(&subsys->p->mutex);
37213- list_del_init(&sif->node);
37214+ pax_list_del_init((struct list_head *)&sif->node);
37215 if (sif->remove_dev) {
37216 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37217 while ((dev = subsys_dev_iter_next(&iter)))
37218diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37219index 0f38201..6c2b444 100644
37220--- a/drivers/base/devtmpfs.c
37221+++ b/drivers/base/devtmpfs.c
37222@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37223 if (!thread)
37224 return 0;
37225
37226- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37227+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37228 if (err)
37229 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37230 else
37231@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37232 *err = sys_unshare(CLONE_NEWNS);
37233 if (*err)
37234 goto out;
37235- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37236+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37237 if (*err)
37238 goto out;
37239- sys_chdir("/.."); /* will traverse into overmounted root */
37240- sys_chroot(".");
37241+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37242+ sys_chroot((char __force_user *)".");
37243 complete(&setup_done);
37244 while (1) {
37245 spin_lock(&req_lock);
37246diff --git a/drivers/base/node.c b/drivers/base/node.c
37247index bc9f43b..29703b8 100644
37248--- a/drivers/base/node.c
37249+++ b/drivers/base/node.c
37250@@ -620,7 +620,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37251 struct node_attr {
37252 struct device_attribute attr;
37253 enum node_states state;
37254-};
37255+} __do_const;
37256
37257 static ssize_t show_node_state(struct device *dev,
37258 struct device_attribute *attr, char *buf)
37259diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37260index bfb8955..4ebff34 100644
37261--- a/drivers/base/power/domain.c
37262+++ b/drivers/base/power/domain.c
37263@@ -1809,9 +1809,9 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
37264
37265 if (dev->power.subsys_data->domain_data) {
37266 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
37267- gpd_data->ops = (struct gpd_dev_ops){ NULL };
37268+ memset(&gpd_data->ops, 0, sizeof(gpd_data->ops));
37269 if (clear_td)
37270- gpd_data->td = (struct gpd_timing_data){ 0 };
37271+ memset(&gpd_data->td, 0, sizeof(gpd_data->td));
37272
37273 if (--gpd_data->refcount == 0) {
37274 dev->power.subsys_data->domain_data = NULL;
37275@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37276 {
37277 struct cpuidle_driver *cpuidle_drv;
37278 struct gpd_cpu_data *cpu_data;
37279- struct cpuidle_state *idle_state;
37280+ cpuidle_state_no_const *idle_state;
37281 int ret = 0;
37282
37283 if (IS_ERR_OR_NULL(genpd) || state < 0)
37284@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37285 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37286 {
37287 struct gpd_cpu_data *cpu_data;
37288- struct cpuidle_state *idle_state;
37289+ cpuidle_state_no_const *idle_state;
37290 int ret = 0;
37291
37292 if (IS_ERR_OR_NULL(genpd))
37293diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37294index 03e089a..0e9560c 100644
37295--- a/drivers/base/power/sysfs.c
37296+++ b/drivers/base/power/sysfs.c
37297@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37298 return -EIO;
37299 }
37300 }
37301- return sprintf(buf, p);
37302+ return sprintf(buf, "%s", p);
37303 }
37304
37305 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37306diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37307index 2d56f41..8830f19 100644
37308--- a/drivers/base/power/wakeup.c
37309+++ b/drivers/base/power/wakeup.c
37310@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
37311 * They need to be modified together atomically, so it's better to use one
37312 * atomic variable to hold them both.
37313 */
37314-static atomic_t combined_event_count = ATOMIC_INIT(0);
37315+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37316
37317 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37318 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37319
37320 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37321 {
37322- unsigned int comb = atomic_read(&combined_event_count);
37323+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37324
37325 *cnt = (comb >> IN_PROGRESS_BITS);
37326 *inpr = comb & MAX_IN_PROGRESS;
37327@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37328 ws->start_prevent_time = ws->last_time;
37329
37330 /* Increment the counter of events in progress. */
37331- cec = atomic_inc_return(&combined_event_count);
37332+ cec = atomic_inc_return_unchecked(&combined_event_count);
37333
37334 trace_wakeup_source_activate(ws->name, cec);
37335 }
37336@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37337 * Increment the counter of registered wakeup events and decrement the
37338 * couter of wakeup events in progress simultaneously.
37339 */
37340- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37341+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37342 trace_wakeup_source_deactivate(ws->name, cec);
37343
37344 split_counters(&cnt, &inpr);
37345diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37346index e8d11b6..7b1b36f 100644
37347--- a/drivers/base/syscore.c
37348+++ b/drivers/base/syscore.c
37349@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37350 void register_syscore_ops(struct syscore_ops *ops)
37351 {
37352 mutex_lock(&syscore_ops_lock);
37353- list_add_tail(&ops->node, &syscore_ops_list);
37354+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37355 mutex_unlock(&syscore_ops_lock);
37356 }
37357 EXPORT_SYMBOL_GPL(register_syscore_ops);
37358@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37359 void unregister_syscore_ops(struct syscore_ops *ops)
37360 {
37361 mutex_lock(&syscore_ops_lock);
37362- list_del(&ops->node);
37363+ pax_list_del((struct list_head *)&ops->node);
37364 mutex_unlock(&syscore_ops_lock);
37365 }
37366 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37367diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37368index b35fc4f..c902870 100644
37369--- a/drivers/block/cciss.c
37370+++ b/drivers/block/cciss.c
37371@@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
37372 while (!list_empty(&h->reqQ)) {
37373 c = list_entry(h->reqQ.next, CommandList_struct, list);
37374 /* can't do anything if fifo is full */
37375- if ((h->access.fifo_full(h))) {
37376+ if ((h->access->fifo_full(h))) {
37377 dev_warn(&h->pdev->dev, "fifo full\n");
37378 break;
37379 }
37380@@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
37381 h->Qdepth--;
37382
37383 /* Tell the controller execute command */
37384- h->access.submit_command(h, c);
37385+ h->access->submit_command(h, c);
37386
37387 /* Put job onto the completed Q */
37388 addQ(&h->cmpQ, c);
37389@@ -3447,17 +3447,17 @@ startio:
37390
37391 static inline unsigned long get_next_completion(ctlr_info_t *h)
37392 {
37393- return h->access.command_completed(h);
37394+ return h->access->command_completed(h);
37395 }
37396
37397 static inline int interrupt_pending(ctlr_info_t *h)
37398 {
37399- return h->access.intr_pending(h);
37400+ return h->access->intr_pending(h);
37401 }
37402
37403 static inline long interrupt_not_for_us(ctlr_info_t *h)
37404 {
37405- return ((h->access.intr_pending(h) == 0) ||
37406+ return ((h->access->intr_pending(h) == 0) ||
37407 (h->interrupts_enabled == 0));
37408 }
37409
37410@@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h)
37411 u32 a;
37412
37413 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37414- return h->access.command_completed(h);
37415+ return h->access->command_completed(h);
37416
37417 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37418 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37419@@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
37420 trans_support & CFGTBL_Trans_use_short_tags);
37421
37422 /* Change the access methods to the performant access methods */
37423- h->access = SA5_performant_access;
37424+ h->access = &SA5_performant_access;
37425 h->transMethod = CFGTBL_Trans_Performant;
37426
37427 return;
37428@@ -4327,7 +4327,7 @@ static int cciss_pci_init(ctlr_info_t *h)
37429 if (prod_index < 0)
37430 return -ENODEV;
37431 h->product_name = products[prod_index].product_name;
37432- h->access = *(products[prod_index].access);
37433+ h->access = products[prod_index].access;
37434
37435 if (cciss_board_disabled(h)) {
37436 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37437@@ -5059,7 +5059,7 @@ reinit_after_soft_reset:
37438 }
37439
37440 /* make sure the board interrupts are off */
37441- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37442+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37443 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
37444 if (rc)
37445 goto clean2;
37446@@ -5109,7 +5109,7 @@ reinit_after_soft_reset:
37447 * fake ones to scoop up any residual completions.
37448 */
37449 spin_lock_irqsave(&h->lock, flags);
37450- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37451+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37452 spin_unlock_irqrestore(&h->lock, flags);
37453 free_irq(h->intr[h->intr_mode], h);
37454 rc = cciss_request_irq(h, cciss_msix_discard_completions,
37455@@ -5129,9 +5129,9 @@ reinit_after_soft_reset:
37456 dev_info(&h->pdev->dev, "Board READY.\n");
37457 dev_info(&h->pdev->dev,
37458 "Waiting for stale completions to drain.\n");
37459- h->access.set_intr_mask(h, CCISS_INTR_ON);
37460+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37461 msleep(10000);
37462- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37463+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37464
37465 rc = controller_reset_failed(h->cfgtable);
37466 if (rc)
37467@@ -5154,7 +5154,7 @@ reinit_after_soft_reset:
37468 cciss_scsi_setup(h);
37469
37470 /* Turn the interrupts on so we can service requests */
37471- h->access.set_intr_mask(h, CCISS_INTR_ON);
37472+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37473
37474 /* Get the firmware version */
37475 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
37476@@ -5226,7 +5226,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
37477 kfree(flush_buf);
37478 if (return_code != IO_OK)
37479 dev_warn(&h->pdev->dev, "Error flushing cache\n");
37480- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37481+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37482 free_irq(h->intr[h->intr_mode], h);
37483 }
37484
37485diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
37486index 7fda30e..2f27946 100644
37487--- a/drivers/block/cciss.h
37488+++ b/drivers/block/cciss.h
37489@@ -101,7 +101,7 @@ struct ctlr_info
37490 /* information about each logical volume */
37491 drive_info_struct *drv[CISS_MAX_LUN];
37492
37493- struct access_method access;
37494+ struct access_method *access;
37495
37496 /* queue and queue Info */
37497 struct list_head reqQ;
37498@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
37499 }
37500
37501 static struct access_method SA5_access = {
37502- SA5_submit_command,
37503- SA5_intr_mask,
37504- SA5_fifo_full,
37505- SA5_intr_pending,
37506- SA5_completed,
37507+ .submit_command = SA5_submit_command,
37508+ .set_intr_mask = SA5_intr_mask,
37509+ .fifo_full = SA5_fifo_full,
37510+ .intr_pending = SA5_intr_pending,
37511+ .command_completed = SA5_completed,
37512 };
37513
37514 static struct access_method SA5B_access = {
37515- SA5_submit_command,
37516- SA5B_intr_mask,
37517- SA5_fifo_full,
37518- SA5B_intr_pending,
37519- SA5_completed,
37520+ .submit_command = SA5_submit_command,
37521+ .set_intr_mask = SA5B_intr_mask,
37522+ .fifo_full = SA5_fifo_full,
37523+ .intr_pending = SA5B_intr_pending,
37524+ .command_completed = SA5_completed,
37525 };
37526
37527 static struct access_method SA5_performant_access = {
37528- SA5_submit_command,
37529- SA5_performant_intr_mask,
37530- SA5_fifo_full,
37531- SA5_performant_intr_pending,
37532- SA5_performant_completed,
37533+ .submit_command = SA5_submit_command,
37534+ .set_intr_mask = SA5_performant_intr_mask,
37535+ .fifo_full = SA5_fifo_full,
37536+ .intr_pending = SA5_performant_intr_pending,
37537+ .command_completed = SA5_performant_completed,
37538 };
37539
37540 struct board_type {
37541diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
37542index 2b94403..fd6ad1f 100644
37543--- a/drivers/block/cpqarray.c
37544+++ b/drivers/block/cpqarray.c
37545@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37546 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
37547 goto Enomem4;
37548 }
37549- hba[i]->access.set_intr_mask(hba[i], 0);
37550+ hba[i]->access->set_intr_mask(hba[i], 0);
37551 if (request_irq(hba[i]->intr, do_ida_intr,
37552 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
37553 {
37554@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
37555 add_timer(&hba[i]->timer);
37556
37557 /* Enable IRQ now that spinlock and rate limit timer are set up */
37558- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37559+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
37560
37561 for(j=0; j<NWD; j++) {
37562 struct gendisk *disk = ida_gendisk[i][j];
37563@@ -694,7 +694,7 @@ DBGINFO(
37564 for(i=0; i<NR_PRODUCTS; i++) {
37565 if (board_id == products[i].board_id) {
37566 c->product_name = products[i].product_name;
37567- c->access = *(products[i].access);
37568+ c->access = products[i].access;
37569 break;
37570 }
37571 }
37572@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
37573 hba[ctlr]->intr = intr;
37574 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
37575 hba[ctlr]->product_name = products[j].product_name;
37576- hba[ctlr]->access = *(products[j].access);
37577+ hba[ctlr]->access = products[j].access;
37578 hba[ctlr]->ctlr = ctlr;
37579 hba[ctlr]->board_id = board_id;
37580 hba[ctlr]->pci_dev = NULL; /* not PCI */
37581@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
37582
37583 while((c = h->reqQ) != NULL) {
37584 /* Can't do anything if we're busy */
37585- if (h->access.fifo_full(h) == 0)
37586+ if (h->access->fifo_full(h) == 0)
37587 return;
37588
37589 /* Get the first entry from the request Q */
37590@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
37591 h->Qdepth--;
37592
37593 /* Tell the controller to do our bidding */
37594- h->access.submit_command(h, c);
37595+ h->access->submit_command(h, c);
37596
37597 /* Get onto the completion Q */
37598 addQ(&h->cmpQ, c);
37599@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37600 unsigned long flags;
37601 __u32 a,a1;
37602
37603- istat = h->access.intr_pending(h);
37604+ istat = h->access->intr_pending(h);
37605 /* Is this interrupt for us? */
37606 if (istat == 0)
37607 return IRQ_NONE;
37608@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
37609 */
37610 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
37611 if (istat & FIFO_NOT_EMPTY) {
37612- while((a = h->access.command_completed(h))) {
37613+ while((a = h->access->command_completed(h))) {
37614 a1 = a; a &= ~3;
37615 if ((c = h->cmpQ) == NULL)
37616 {
37617@@ -1448,11 +1448,11 @@ static int sendcmd(
37618 /*
37619 * Disable interrupt
37620 */
37621- info_p->access.set_intr_mask(info_p, 0);
37622+ info_p->access->set_intr_mask(info_p, 0);
37623 /* Make sure there is room in the command FIFO */
37624 /* Actually it should be completely empty at this time. */
37625 for (i = 200000; i > 0; i--) {
37626- temp = info_p->access.fifo_full(info_p);
37627+ temp = info_p->access->fifo_full(info_p);
37628 if (temp != 0) {
37629 break;
37630 }
37631@@ -1465,7 +1465,7 @@ DBG(
37632 /*
37633 * Send the cmd
37634 */
37635- info_p->access.submit_command(info_p, c);
37636+ info_p->access->submit_command(info_p, c);
37637 complete = pollcomplete(ctlr);
37638
37639 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
37640@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
37641 * we check the new geometry. Then turn interrupts back on when
37642 * we're done.
37643 */
37644- host->access.set_intr_mask(host, 0);
37645+ host->access->set_intr_mask(host, 0);
37646 getgeometry(ctlr);
37647- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
37648+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
37649
37650 for(i=0; i<NWD; i++) {
37651 struct gendisk *disk = ida_gendisk[ctlr][i];
37652@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
37653 /* Wait (up to 2 seconds) for a command to complete */
37654
37655 for (i = 200000; i > 0; i--) {
37656- done = hba[ctlr]->access.command_completed(hba[ctlr]);
37657+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
37658 if (done == 0) {
37659 udelay(10); /* a short fixed delay */
37660 } else
37661diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
37662index be73e9d..7fbf140 100644
37663--- a/drivers/block/cpqarray.h
37664+++ b/drivers/block/cpqarray.h
37665@@ -99,7 +99,7 @@ struct ctlr_info {
37666 drv_info_t drv[NWD];
37667 struct proc_dir_entry *proc;
37668
37669- struct access_method access;
37670+ struct access_method *access;
37671
37672 cmdlist_t *reqQ;
37673 cmdlist_t *cmpQ;
37674diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
37675index 0e06f0c..c47b81d 100644
37676--- a/drivers/block/drbd/drbd_int.h
37677+++ b/drivers/block/drbd/drbd_int.h
37678@@ -582,7 +582,7 @@ struct drbd_epoch {
37679 struct drbd_tconn *tconn;
37680 struct list_head list;
37681 unsigned int barrier_nr;
37682- atomic_t epoch_size; /* increased on every request added. */
37683+ atomic_unchecked_t epoch_size; /* increased on every request added. */
37684 atomic_t active; /* increased on every req. added, and dec on every finished. */
37685 unsigned long flags;
37686 };
37687@@ -1022,7 +1022,7 @@ struct drbd_conf {
37688 unsigned int al_tr_number;
37689 int al_tr_cycle;
37690 wait_queue_head_t seq_wait;
37691- atomic_t packet_seq;
37692+ atomic_unchecked_t packet_seq;
37693 unsigned int peer_seq;
37694 spinlock_t peer_seq_lock;
37695 unsigned int minor;
37696@@ -1573,7 +1573,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
37697 char __user *uoptval;
37698 int err;
37699
37700- uoptval = (char __user __force *)optval;
37701+ uoptval = (char __force_user *)optval;
37702
37703 set_fs(KERNEL_DS);
37704 if (level == SOL_SOCKET)
37705diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
37706index 89c497c..9c736ae 100644
37707--- a/drivers/block/drbd/drbd_interval.c
37708+++ b/drivers/block/drbd/drbd_interval.c
37709@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
37710 }
37711
37712 static const struct rb_augment_callbacks augment_callbacks = {
37713- augment_propagate,
37714- augment_copy,
37715- augment_rotate,
37716+ .propagate = augment_propagate,
37717+ .copy = augment_copy,
37718+ .rotate = augment_rotate,
37719 };
37720
37721 /**
37722diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
37723index 9e3818b..7b64c92 100644
37724--- a/drivers/block/drbd/drbd_main.c
37725+++ b/drivers/block/drbd/drbd_main.c
37726@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
37727 p->sector = sector;
37728 p->block_id = block_id;
37729 p->blksize = blksize;
37730- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
37731+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
37732 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
37733 }
37734
37735@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
37736 return -EIO;
37737 p->sector = cpu_to_be64(req->i.sector);
37738 p->block_id = (unsigned long)req;
37739- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
37740+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
37741 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
37742 if (mdev->state.conn >= C_SYNC_SOURCE &&
37743 mdev->state.conn <= C_PAUSED_SYNC_T)
37744@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
37745 {
37746 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
37747
37748- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
37749- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
37750+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
37751+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
37752 kfree(tconn->current_epoch);
37753
37754 idr_destroy(&tconn->volumes);
37755diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
37756index c706d50..5e1b472 100644
37757--- a/drivers/block/drbd/drbd_nl.c
37758+++ b/drivers/block/drbd/drbd_nl.c
37759@@ -3440,7 +3440,7 @@ out:
37760
37761 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
37762 {
37763- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
37764+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
37765 struct sk_buff *msg;
37766 struct drbd_genlmsghdr *d_out;
37767 unsigned seq;
37768@@ -3453,7 +3453,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
37769 return;
37770 }
37771
37772- seq = atomic_inc_return(&drbd_genl_seq);
37773+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
37774 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
37775 if (!msg)
37776 goto failed;
37777diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
37778index 6fa6673..b7f97e9 100644
37779--- a/drivers/block/drbd/drbd_receiver.c
37780+++ b/drivers/block/drbd/drbd_receiver.c
37781@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
37782 {
37783 int err;
37784
37785- atomic_set(&mdev->packet_seq, 0);
37786+ atomic_set_unchecked(&mdev->packet_seq, 0);
37787 mdev->peer_seq = 0;
37788
37789 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
37790@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
37791 do {
37792 next_epoch = NULL;
37793
37794- epoch_size = atomic_read(&epoch->epoch_size);
37795+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
37796
37797 switch (ev & ~EV_CLEANUP) {
37798 case EV_PUT:
37799@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
37800 rv = FE_DESTROYED;
37801 } else {
37802 epoch->flags = 0;
37803- atomic_set(&epoch->epoch_size, 0);
37804+ atomic_set_unchecked(&epoch->epoch_size, 0);
37805 /* atomic_set(&epoch->active, 0); is already zero */
37806 if (rv == FE_STILL_LIVE)
37807 rv = FE_RECYCLED;
37808@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
37809 conn_wait_active_ee_empty(tconn);
37810 drbd_flush(tconn);
37811
37812- if (atomic_read(&tconn->current_epoch->epoch_size)) {
37813+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
37814 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
37815 if (epoch)
37816 break;
37817@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
37818 }
37819
37820 epoch->flags = 0;
37821- atomic_set(&epoch->epoch_size, 0);
37822+ atomic_set_unchecked(&epoch->epoch_size, 0);
37823 atomic_set(&epoch->active, 0);
37824
37825 spin_lock(&tconn->epoch_lock);
37826- if (atomic_read(&tconn->current_epoch->epoch_size)) {
37827+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
37828 list_add(&epoch->list, &tconn->current_epoch->list);
37829 tconn->current_epoch = epoch;
37830 tconn->epochs++;
37831@@ -2163,7 +2163,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
37832
37833 err = wait_for_and_update_peer_seq(mdev, peer_seq);
37834 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
37835- atomic_inc(&tconn->current_epoch->epoch_size);
37836+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
37837 err2 = drbd_drain_block(mdev, pi->size);
37838 if (!err)
37839 err = err2;
37840@@ -2197,7 +2197,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
37841
37842 spin_lock(&tconn->epoch_lock);
37843 peer_req->epoch = tconn->current_epoch;
37844- atomic_inc(&peer_req->epoch->epoch_size);
37845+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
37846 atomic_inc(&peer_req->epoch->active);
37847 spin_unlock(&tconn->epoch_lock);
37848
37849@@ -4344,7 +4344,7 @@ struct data_cmd {
37850 int expect_payload;
37851 size_t pkt_size;
37852 int (*fn)(struct drbd_tconn *, struct packet_info *);
37853-};
37854+} __do_const;
37855
37856 static struct data_cmd drbd_cmd_handler[] = {
37857 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
37858@@ -4464,7 +4464,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
37859 if (!list_empty(&tconn->current_epoch->list))
37860 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
37861 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
37862- atomic_set(&tconn->current_epoch->epoch_size, 0);
37863+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
37864 tconn->send.seen_any_write_yet = false;
37865
37866 conn_info(tconn, "Connection closed\n");
37867@@ -5220,7 +5220,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
37868 struct asender_cmd {
37869 size_t pkt_size;
37870 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
37871-};
37872+} __do_const;
37873
37874 static struct asender_cmd asender_tbl[] = {
37875 [P_PING] = { 0, got_Ping },
37876diff --git a/drivers/block/loop.c b/drivers/block/loop.c
37877index c8dac73..1800093 100644
37878--- a/drivers/block/loop.c
37879+++ b/drivers/block/loop.c
37880@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
37881
37882 file_start_write(file);
37883 set_fs(get_ds());
37884- bw = file->f_op->write(file, buf, len, &pos);
37885+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
37886 set_fs(old_fs);
37887 file_end_write(file);
37888 if (likely(bw == len))
37889diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
37890index 83a598e..2de5ce3 100644
37891--- a/drivers/block/null_blk.c
37892+++ b/drivers/block/null_blk.c
37893@@ -407,14 +407,24 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
37894 return 0;
37895 }
37896
37897-static struct blk_mq_ops null_mq_ops = {
37898- .queue_rq = null_queue_rq,
37899- .map_queue = blk_mq_map_queue,
37900+static struct blk_mq_ops null_mq_single_ops = {
37901+ .queue_rq = null_queue_rq,
37902+ .map_queue = blk_mq_map_queue,
37903 .init_hctx = null_init_hctx,
37904+ .alloc_hctx = blk_mq_alloc_single_hw_queue,
37905+ .free_hctx = blk_mq_free_single_hw_queue,
37906+};
37907+
37908+static struct blk_mq_ops null_mq_per_node_ops = {
37909+ .queue_rq = null_queue_rq,
37910+ .map_queue = blk_mq_map_queue,
37911+ .init_hctx = null_init_hctx,
37912+ .alloc_hctx = null_alloc_hctx,
37913+ .free_hctx = null_free_hctx,
37914 };
37915
37916 static struct blk_mq_reg null_mq_reg = {
37917- .ops = &null_mq_ops,
37918+ .ops = &null_mq_single_ops,
37919 .queue_depth = 64,
37920 .cmd_size = sizeof(struct nullb_cmd),
37921 .flags = BLK_MQ_F_SHOULD_MERGE,
37922@@ -545,13 +555,8 @@ static int null_add_dev(void)
37923 null_mq_reg.queue_depth = hw_queue_depth;
37924 null_mq_reg.nr_hw_queues = submit_queues;
37925
37926- if (use_per_node_hctx) {
37927- null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
37928- null_mq_reg.ops->free_hctx = null_free_hctx;
37929- } else {
37930- null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
37931- null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
37932- }
37933+ if (use_per_node_hctx)
37934+ null_mq_reg.ops = &null_mq_per_node_ops;
37935
37936 nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
37937 } else if (queue_mode == NULL_Q_BIO) {
37938diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
37939index ff8668c..f62167a 100644
37940--- a/drivers/block/pktcdvd.c
37941+++ b/drivers/block/pktcdvd.c
37942@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
37943
37944 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
37945 {
37946- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
37947+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
37948 }
37949
37950 /*
37951@@ -1883,7 +1883,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
37952 return -EROFS;
37953 }
37954 pd->settings.fp = ti.fp;
37955- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
37956+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
37957
37958 if (ti.nwa_v) {
37959 pd->nwa = be32_to_cpu(ti.next_writable);
37960diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
37961index e5565fb..71be10b4 100644
37962--- a/drivers/block/smart1,2.h
37963+++ b/drivers/block/smart1,2.h
37964@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
37965 }
37966
37967 static struct access_method smart4_access = {
37968- smart4_submit_command,
37969- smart4_intr_mask,
37970- smart4_fifo_full,
37971- smart4_intr_pending,
37972- smart4_completed,
37973+ .submit_command = smart4_submit_command,
37974+ .set_intr_mask = smart4_intr_mask,
37975+ .fifo_full = smart4_fifo_full,
37976+ .intr_pending = smart4_intr_pending,
37977+ .command_completed = smart4_completed,
37978 };
37979
37980 /*
37981@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
37982 }
37983
37984 static struct access_method smart2_access = {
37985- smart2_submit_command,
37986- smart2_intr_mask,
37987- smart2_fifo_full,
37988- smart2_intr_pending,
37989- smart2_completed,
37990+ .submit_command = smart2_submit_command,
37991+ .set_intr_mask = smart2_intr_mask,
37992+ .fifo_full = smart2_fifo_full,
37993+ .intr_pending = smart2_intr_pending,
37994+ .command_completed = smart2_completed,
37995 };
37996
37997 /*
37998@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
37999 }
38000
38001 static struct access_method smart2e_access = {
38002- smart2e_submit_command,
38003- smart2e_intr_mask,
38004- smart2e_fifo_full,
38005- smart2e_intr_pending,
38006- smart2e_completed,
38007+ .submit_command = smart2e_submit_command,
38008+ .set_intr_mask = smart2e_intr_mask,
38009+ .fifo_full = smart2e_fifo_full,
38010+ .intr_pending = smart2e_intr_pending,
38011+ .command_completed = smart2e_completed,
38012 };
38013
38014 /*
38015@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38016 }
38017
38018 static struct access_method smart1_access = {
38019- smart1_submit_command,
38020- smart1_intr_mask,
38021- smart1_fifo_full,
38022- smart1_intr_pending,
38023- smart1_completed,
38024+ .submit_command = smart1_submit_command,
38025+ .set_intr_mask = smart1_intr_mask,
38026+ .fifo_full = smart1_fifo_full,
38027+ .intr_pending = smart1_intr_pending,
38028+ .command_completed = smart1_completed,
38029 };
38030diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38031index f038dba..bb74c08 100644
38032--- a/drivers/bluetooth/btwilink.c
38033+++ b/drivers/bluetooth/btwilink.c
38034@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38035
38036 static int bt_ti_probe(struct platform_device *pdev)
38037 {
38038- static struct ti_st *hst;
38039+ struct ti_st *hst;
38040 struct hci_dev *hdev;
38041 int err;
38042
38043diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
38044index b6739cb..962fd35 100644
38045--- a/drivers/bus/arm-cci.c
38046+++ b/drivers/bus/arm-cci.c
38047@@ -979,7 +979,7 @@ static int cci_probe(void)
38048
38049 nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
38050
38051- ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL);
38052+ ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
38053 if (!ports)
38054 return -ENOMEM;
38055
38056diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38057index 8a3aff7..d7538c2 100644
38058--- a/drivers/cdrom/cdrom.c
38059+++ b/drivers/cdrom/cdrom.c
38060@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38061 ENSURE(reset, CDC_RESET);
38062 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38063 cdi->mc_flags = 0;
38064- cdo->n_minors = 0;
38065 cdi->options = CDO_USE_FFLAGS;
38066
38067 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
38068@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38069 else
38070 cdi->cdda_method = CDDA_OLD;
38071
38072- if (!cdo->generic_packet)
38073- cdo->generic_packet = cdrom_dummy_generic_packet;
38074+ if (!cdo->generic_packet) {
38075+ pax_open_kernel();
38076+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38077+ pax_close_kernel();
38078+ }
38079
38080 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38081 mutex_lock(&cdrom_mutex);
38082@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38083 if (cdi->exit)
38084 cdi->exit(cdi);
38085
38086- cdi->ops->n_minors--;
38087 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38088 }
38089
38090@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38091 */
38092 nr = nframes;
38093 do {
38094- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38095+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38096 if (cgc.buffer)
38097 break;
38098
38099@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38100 struct cdrom_device_info *cdi;
38101 int ret;
38102
38103- ret = scnprintf(info + *pos, max_size - *pos, header);
38104+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38105 if (!ret)
38106 return 1;
38107
38108diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38109index 5980cb9..6d7bd7e 100644
38110--- a/drivers/cdrom/gdrom.c
38111+++ b/drivers/cdrom/gdrom.c
38112@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38113 .audio_ioctl = gdrom_audio_ioctl,
38114 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38115 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38116- .n_minors = 1,
38117 };
38118
38119 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38120diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38121index fa3243d..8c98297 100644
38122--- a/drivers/char/Kconfig
38123+++ b/drivers/char/Kconfig
38124@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
38125
38126 config DEVKMEM
38127 bool "/dev/kmem virtual device support"
38128- default y
38129+ default n
38130+ depends on !GRKERNSEC_KMEM
38131 help
38132 Say Y here if you want to support the /dev/kmem device. The
38133 /dev/kmem device is rarely used, but can be used for certain
38134@@ -576,6 +577,7 @@ config DEVPORT
38135 bool
38136 depends on !M68K
38137 depends on ISA || PCI
38138+ depends on !GRKERNSEC_KMEM
38139 default y
38140
38141 source "drivers/s390/char/Kconfig"
38142diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38143index a48e05b..6bac831 100644
38144--- a/drivers/char/agp/compat_ioctl.c
38145+++ b/drivers/char/agp/compat_ioctl.c
38146@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38147 return -ENOMEM;
38148 }
38149
38150- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38151+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38152 sizeof(*usegment) * ureserve.seg_count)) {
38153 kfree(usegment);
38154 kfree(ksegment);
38155diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38156index 1b19239..b87b143 100644
38157--- a/drivers/char/agp/frontend.c
38158+++ b/drivers/char/agp/frontend.c
38159@@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38160 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38161 return -EFAULT;
38162
38163- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38164+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38165 return -EFAULT;
38166
38167 client = agp_find_client_by_pid(reserve.pid);
38168@@ -849,7 +849,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38169 if (segment == NULL)
38170 return -ENOMEM;
38171
38172- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38173+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38174 sizeof(struct agp_segment) * reserve.seg_count)) {
38175 kfree(segment);
38176 return -EFAULT;
38177diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38178index 4f94375..413694e 100644
38179--- a/drivers/char/genrtc.c
38180+++ b/drivers/char/genrtc.c
38181@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38182 switch (cmd) {
38183
38184 case RTC_PLL_GET:
38185+ memset(&pll, 0, sizeof(pll));
38186 if (get_rtc_pll(&pll))
38187 return -EINVAL;
38188 else
38189diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38190index 5d9c31d..c94ccb5 100644
38191--- a/drivers/char/hpet.c
38192+++ b/drivers/char/hpet.c
38193@@ -578,7 +578,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38194 }
38195
38196 static int
38197-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38198+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38199 struct hpet_info *info)
38200 {
38201 struct hpet_timer __iomem *timer;
38202diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
38203index 86fe45c..c0ea948 100644
38204--- a/drivers/char/hw_random/intel-rng.c
38205+++ b/drivers/char/hw_random/intel-rng.c
38206@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
38207
38208 if (no_fwh_detect)
38209 return -ENODEV;
38210- printk(warning);
38211+ printk("%s", warning);
38212 return -EBUSY;
38213 }
38214
38215diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38216index ec4e10f..f2a763b 100644
38217--- a/drivers/char/ipmi/ipmi_msghandler.c
38218+++ b/drivers/char/ipmi/ipmi_msghandler.c
38219@@ -420,7 +420,7 @@ struct ipmi_smi {
38220 struct proc_dir_entry *proc_dir;
38221 char proc_dir_name[10];
38222
38223- atomic_t stats[IPMI_NUM_STATS];
38224+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38225
38226 /*
38227 * run_to_completion duplicate of smb_info, smi_info
38228@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
38229
38230
38231 #define ipmi_inc_stat(intf, stat) \
38232- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38233+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38234 #define ipmi_get_stat(intf, stat) \
38235- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38236+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38237
38238 static int is_lan_addr(struct ipmi_addr *addr)
38239 {
38240@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38241 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38242 init_waitqueue_head(&intf->waitq);
38243 for (i = 0; i < IPMI_NUM_STATS; i++)
38244- atomic_set(&intf->stats[i], 0);
38245+ atomic_set_unchecked(&intf->stats[i], 0);
38246
38247 intf->proc_dir = NULL;
38248
38249diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38250index 15e4a60..b046093 100644
38251--- a/drivers/char/ipmi/ipmi_si_intf.c
38252+++ b/drivers/char/ipmi/ipmi_si_intf.c
38253@@ -280,7 +280,7 @@ struct smi_info {
38254 unsigned char slave_addr;
38255
38256 /* Counters and things for the proc filesystem. */
38257- atomic_t stats[SI_NUM_STATS];
38258+ atomic_unchecked_t stats[SI_NUM_STATS];
38259
38260 struct task_struct *thread;
38261
38262@@ -289,9 +289,9 @@ struct smi_info {
38263 };
38264
38265 #define smi_inc_stat(smi, stat) \
38266- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38267+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38268 #define smi_get_stat(smi, stat) \
38269- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38270+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38271
38272 #define SI_MAX_PARMS 4
38273
38274@@ -3324,7 +3324,7 @@ static int try_smi_init(struct smi_info *new_smi)
38275 atomic_set(&new_smi->req_events, 0);
38276 new_smi->run_to_completion = 0;
38277 for (i = 0; i < SI_NUM_STATS; i++)
38278- atomic_set(&new_smi->stats[i], 0);
38279+ atomic_set_unchecked(&new_smi->stats[i], 0);
38280
38281 new_smi->interrupt_disabled = 1;
38282 atomic_set(&new_smi->stop_operation, 0);
38283diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38284index f895a8c..2bc9147 100644
38285--- a/drivers/char/mem.c
38286+++ b/drivers/char/mem.c
38287@@ -18,6 +18,7 @@
38288 #include <linux/raw.h>
38289 #include <linux/tty.h>
38290 #include <linux/capability.h>
38291+#include <linux/security.h>
38292 #include <linux/ptrace.h>
38293 #include <linux/device.h>
38294 #include <linux/highmem.h>
38295@@ -37,6 +38,10 @@
38296
38297 #define DEVPORT_MINOR 4
38298
38299+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38300+extern const struct file_operations grsec_fops;
38301+#endif
38302+
38303 static inline unsigned long size_inside_page(unsigned long start,
38304 unsigned long size)
38305 {
38306@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38307
38308 while (cursor < to) {
38309 if (!devmem_is_allowed(pfn)) {
38310+#ifdef CONFIG_GRKERNSEC_KMEM
38311+ gr_handle_mem_readwrite(from, to);
38312+#else
38313 printk(KERN_INFO
38314 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38315 current->comm, from, to);
38316+#endif
38317 return 0;
38318 }
38319 cursor += PAGE_SIZE;
38320@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38321 }
38322 return 1;
38323 }
38324+#elif defined(CONFIG_GRKERNSEC_KMEM)
38325+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38326+{
38327+ return 0;
38328+}
38329 #else
38330 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38331 {
38332@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38333
38334 while (count > 0) {
38335 unsigned long remaining;
38336+ char *temp;
38337
38338 sz = size_inside_page(p, count);
38339
38340@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38341 if (!ptr)
38342 return -EFAULT;
38343
38344- remaining = copy_to_user(buf, ptr, sz);
38345+#ifdef CONFIG_PAX_USERCOPY
38346+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38347+ if (!temp) {
38348+ unxlate_dev_mem_ptr(p, ptr);
38349+ return -ENOMEM;
38350+ }
38351+ memcpy(temp, ptr, sz);
38352+#else
38353+ temp = ptr;
38354+#endif
38355+
38356+ remaining = copy_to_user(buf, temp, sz);
38357+
38358+#ifdef CONFIG_PAX_USERCOPY
38359+ kfree(temp);
38360+#endif
38361+
38362 unxlate_dev_mem_ptr(p, ptr);
38363 if (remaining)
38364 return -EFAULT;
38365@@ -364,9 +395,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38366 size_t count, loff_t *ppos)
38367 {
38368 unsigned long p = *ppos;
38369- ssize_t low_count, read, sz;
38370+ ssize_t low_count, read, sz, err = 0;
38371 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38372- int err = 0;
38373
38374 read = 0;
38375 if (p < (unsigned long) high_memory) {
38376@@ -388,6 +418,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38377 }
38378 #endif
38379 while (low_count > 0) {
38380+ char *temp;
38381+
38382 sz = size_inside_page(p, low_count);
38383
38384 /*
38385@@ -397,7 +429,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38386 */
38387 kbuf = xlate_dev_kmem_ptr((char *)p);
38388
38389- if (copy_to_user(buf, kbuf, sz))
38390+#ifdef CONFIG_PAX_USERCOPY
38391+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38392+ if (!temp)
38393+ return -ENOMEM;
38394+ memcpy(temp, kbuf, sz);
38395+#else
38396+ temp = kbuf;
38397+#endif
38398+
38399+ err = copy_to_user(buf, temp, sz);
38400+
38401+#ifdef CONFIG_PAX_USERCOPY
38402+ kfree(temp);
38403+#endif
38404+
38405+ if (err)
38406 return -EFAULT;
38407 buf += sz;
38408 p += sz;
38409@@ -822,6 +869,9 @@ static const struct memdev {
38410 #ifdef CONFIG_PRINTK
38411 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
38412 #endif
38413+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38414+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
38415+#endif
38416 };
38417
38418 static int memory_open(struct inode *inode, struct file *filp)
38419@@ -893,7 +943,7 @@ static int __init chr_dev_init(void)
38420 continue;
38421
38422 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
38423- NULL, devlist[minor].name);
38424+ NULL, "%s", devlist[minor].name);
38425 }
38426
38427 return tty_init();
38428diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
38429index 9df78e2..01ba9ae 100644
38430--- a/drivers/char/nvram.c
38431+++ b/drivers/char/nvram.c
38432@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
38433
38434 spin_unlock_irq(&rtc_lock);
38435
38436- if (copy_to_user(buf, contents, tmp - contents))
38437+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
38438 return -EFAULT;
38439
38440 *ppos = i;
38441diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
38442index d39cca6..8c1e269 100644
38443--- a/drivers/char/pcmcia/synclink_cs.c
38444+++ b/drivers/char/pcmcia/synclink_cs.c
38445@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38446
38447 if (debug_level >= DEBUG_LEVEL_INFO)
38448 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
38449- __FILE__, __LINE__, info->device_name, port->count);
38450+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
38451
38452- WARN_ON(!port->count);
38453+ WARN_ON(!atomic_read(&port->count));
38454
38455 if (tty_port_close_start(port, tty, filp) == 0)
38456 goto cleanup;
38457@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
38458 cleanup:
38459 if (debug_level >= DEBUG_LEVEL_INFO)
38460 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
38461- tty->driver->name, port->count);
38462+ tty->driver->name, atomic_read(&port->count));
38463 }
38464
38465 /* Wait until the transmitter is empty.
38466@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38467
38468 if (debug_level >= DEBUG_LEVEL_INFO)
38469 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
38470- __FILE__, __LINE__, tty->driver->name, port->count);
38471+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
38472
38473 /* If port is closing, signal caller to try again */
38474 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
38475@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
38476 goto cleanup;
38477 }
38478 spin_lock(&port->lock);
38479- port->count++;
38480+ atomic_inc(&port->count);
38481 spin_unlock(&port->lock);
38482 spin_unlock_irqrestore(&info->netlock, flags);
38483
38484- if (port->count == 1) {
38485+ if (atomic_read(&port->count) == 1) {
38486 /* 1st open on this device, init hardware */
38487 retval = startup(info, tty);
38488 if (retval < 0)
38489@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38490 unsigned short new_crctype;
38491
38492 /* return error if TTY interface open */
38493- if (info->port.count)
38494+ if (atomic_read(&info->port.count))
38495 return -EBUSY;
38496
38497 switch (encoding)
38498@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
38499
38500 /* arbitrate between network and tty opens */
38501 spin_lock_irqsave(&info->netlock, flags);
38502- if (info->port.count != 0 || info->netcount != 0) {
38503+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38504 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38505 spin_unlock_irqrestore(&info->netlock, flags);
38506 return -EBUSY;
38507@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38508 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
38509
38510 /* return error if TTY interface open */
38511- if (info->port.count)
38512+ if (atomic_read(&info->port.count))
38513 return -EBUSY;
38514
38515 if (cmd != SIOCWANDEV)
38516diff --git a/drivers/char/random.c b/drivers/char/random.c
38517index 429b75b..a7f4145 100644
38518--- a/drivers/char/random.c
38519+++ b/drivers/char/random.c
38520@@ -270,10 +270,17 @@
38521 /*
38522 * Configuration information
38523 */
38524+#ifdef CONFIG_GRKERNSEC_RANDNET
38525+#define INPUT_POOL_SHIFT 14
38526+#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
38527+#define OUTPUT_POOL_SHIFT 12
38528+#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
38529+#else
38530 #define INPUT_POOL_SHIFT 12
38531 #define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
38532 #define OUTPUT_POOL_SHIFT 10
38533 #define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
38534+#endif
38535 #define SEC_XFER_SIZE 512
38536 #define EXTRACT_SIZE 10
38537
38538@@ -284,9 +291,6 @@
38539 /*
38540 * To allow fractional bits to be tracked, the entropy_count field is
38541 * denominated in units of 1/8th bits.
38542- *
38543- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
38544- * credit_entropy_bits() needs to be 64 bits wide.
38545 */
38546 #define ENTROPY_SHIFT 3
38547 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
38548@@ -361,12 +365,19 @@ static struct poolinfo {
38549 #define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
38550 int tap1, tap2, tap3, tap4, tap5;
38551 } poolinfo_table[] = {
38552+#ifdef CONFIG_GRKERNSEC_RANDNET
38553+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
38554+ { S(512), 411, 308, 208, 104, 1 },
38555+ /* x^128 + x^104 + x^76 + x^51 + x^25 + x + 1 -- 105 */
38556+ { S(128), 104, 76, 51, 25, 1 },
38557+#else
38558 /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
38559 /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
38560 { S(128), 104, 76, 51, 25, 1 },
38561 /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
38562 /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
38563 { S(32), 26, 19, 14, 7, 1 },
38564+#endif
38565 #if 0
38566 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
38567 { S(2048), 1638, 1231, 819, 411, 1 },
38568@@ -433,9 +444,9 @@ struct entropy_store {
38569 };
38570
38571 static void push_to_pool(struct work_struct *work);
38572-static __u32 input_pool_data[INPUT_POOL_WORDS];
38573-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
38574-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
38575+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
38576+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38577+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
38578
38579 static struct entropy_store input_pool = {
38580 .poolinfo = &poolinfo_table[0],
38581@@ -524,8 +535,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
38582 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
38583 }
38584
38585- ACCESS_ONCE(r->input_rotate) = input_rotate;
38586- ACCESS_ONCE(r->add_ptr) = i;
38587+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
38588+ ACCESS_ONCE_RW(r->add_ptr) = i;
38589 smp_wmb();
38590
38591 if (out)
38592@@ -632,7 +643,7 @@ retry:
38593 /* The +2 corresponds to the /4 in the denominator */
38594
38595 do {
38596- unsigned int anfrac = min(pnfrac, pool_size/2);
38597+ u64 anfrac = min(pnfrac, pool_size/2);
38598 unsigned int add =
38599 ((pool_size - entropy_count)*anfrac*3) >> s;
38600
38601@@ -1151,7 +1162,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
38602
38603 extract_buf(r, tmp);
38604 i = min_t(int, nbytes, EXTRACT_SIZE);
38605- if (copy_to_user(buf, tmp, i)) {
38606+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
38607 ret = -EFAULT;
38608 break;
38609 }
38610@@ -1507,7 +1518,7 @@ EXPORT_SYMBOL(generate_random_uuid);
38611 #include <linux/sysctl.h>
38612
38613 static int min_read_thresh = 8, min_write_thresh;
38614-static int max_read_thresh = INPUT_POOL_WORDS * 32;
38615+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
38616 static int max_write_thresh = INPUT_POOL_WORDS * 32;
38617 static char sysctl_bootid[16];
38618
38619@@ -1523,7 +1534,7 @@ static char sysctl_bootid[16];
38620 static int proc_do_uuid(struct ctl_table *table, int write,
38621 void __user *buffer, size_t *lenp, loff_t *ppos)
38622 {
38623- struct ctl_table fake_table;
38624+ ctl_table_no_const fake_table;
38625 unsigned char buf[64], tmp_uuid[16], *uuid;
38626
38627 uuid = table->data;
38628@@ -1553,7 +1564,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
38629 static int proc_do_entropy(ctl_table *table, int write,
38630 void __user *buffer, size_t *lenp, loff_t *ppos)
38631 {
38632- ctl_table fake_table;
38633+ ctl_table_no_const fake_table;
38634 int entropy_count;
38635
38636 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
38637diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
38638index 7cc1fe22..b602d6b 100644
38639--- a/drivers/char/sonypi.c
38640+++ b/drivers/char/sonypi.c
38641@@ -54,6 +54,7 @@
38642
38643 #include <asm/uaccess.h>
38644 #include <asm/io.h>
38645+#include <asm/local.h>
38646
38647 #include <linux/sonypi.h>
38648
38649@@ -490,7 +491,7 @@ static struct sonypi_device {
38650 spinlock_t fifo_lock;
38651 wait_queue_head_t fifo_proc_list;
38652 struct fasync_struct *fifo_async;
38653- int open_count;
38654+ local_t open_count;
38655 int model;
38656 struct input_dev *input_jog_dev;
38657 struct input_dev *input_key_dev;
38658@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
38659 static int sonypi_misc_release(struct inode *inode, struct file *file)
38660 {
38661 mutex_lock(&sonypi_device.lock);
38662- sonypi_device.open_count--;
38663+ local_dec(&sonypi_device.open_count);
38664 mutex_unlock(&sonypi_device.lock);
38665 return 0;
38666 }
38667@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
38668 {
38669 mutex_lock(&sonypi_device.lock);
38670 /* Flush input queue on first open */
38671- if (!sonypi_device.open_count)
38672+ if (!local_read(&sonypi_device.open_count))
38673 kfifo_reset(&sonypi_device.fifo);
38674- sonypi_device.open_count++;
38675+ local_inc(&sonypi_device.open_count);
38676 mutex_unlock(&sonypi_device.lock);
38677
38678 return 0;
38679diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
38680index 64420b3..5c40b56 100644
38681--- a/drivers/char/tpm/tpm_acpi.c
38682+++ b/drivers/char/tpm/tpm_acpi.c
38683@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
38684 virt = acpi_os_map_memory(start, len);
38685 if (!virt) {
38686 kfree(log->bios_event_log);
38687+ log->bios_event_log = NULL;
38688 printk("%s: ERROR - Unable to map memory\n", __func__);
38689 return -EIO;
38690 }
38691
38692- memcpy_fromio(log->bios_event_log, virt, len);
38693+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
38694
38695 acpi_os_unmap_memory(virt, len);
38696 return 0;
38697diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
38698index 59f7cb2..bac8b6d 100644
38699--- a/drivers/char/tpm/tpm_eventlog.c
38700+++ b/drivers/char/tpm/tpm_eventlog.c
38701@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
38702 event = addr;
38703
38704 if ((event->event_type == 0 && event->event_size == 0) ||
38705- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
38706+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
38707 return NULL;
38708
38709 return addr;
38710@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
38711 return NULL;
38712
38713 if ((event->event_type == 0 && event->event_size == 0) ||
38714- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
38715+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
38716 return NULL;
38717
38718 (*pos)++;
38719@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
38720 int i;
38721
38722 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
38723- seq_putc(m, data[i]);
38724+ if (!seq_putc(m, data[i]))
38725+ return -EFAULT;
38726
38727 return 0;
38728 }
38729diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
38730index feea87c..18aefff 100644
38731--- a/drivers/char/virtio_console.c
38732+++ b/drivers/char/virtio_console.c
38733@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
38734 if (to_user) {
38735 ssize_t ret;
38736
38737- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
38738+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
38739 if (ret)
38740 return -EFAULT;
38741 } else {
38742@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
38743 if (!port_has_data(port) && !port->host_connected)
38744 return 0;
38745
38746- return fill_readbuf(port, ubuf, count, true);
38747+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
38748 }
38749
38750 static int wait_port_writable(struct port *port, bool nonblock)
38751diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
38752index a33f46f..a720eed 100644
38753--- a/drivers/clk/clk-composite.c
38754+++ b/drivers/clk/clk-composite.c
38755@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
38756 struct clk *clk;
38757 struct clk_init_data init;
38758 struct clk_composite *composite;
38759- struct clk_ops *clk_composite_ops;
38760+ clk_ops_no_const *clk_composite_ops;
38761
38762 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
38763 if (!composite) {
38764diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
38765index 81dd31a..ef5c542 100644
38766--- a/drivers/clk/socfpga/clk.c
38767+++ b/drivers/clk/socfpga/clk.c
38768@@ -22,6 +22,7 @@
38769 #include <linux/clk-provider.h>
38770 #include <linux/io.h>
38771 #include <linux/of.h>
38772+#include <asm/pgtable.h>
38773
38774 /* Clock Manager offsets */
38775 #define CLKMGR_CTRL 0x0
38776@@ -152,8 +153,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
38777 streq(clk_name, "periph_pll") ||
38778 streq(clk_name, "sdram_pll")) {
38779 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
38780- clk_pll_ops.enable = clk_gate_ops.enable;
38781- clk_pll_ops.disable = clk_gate_ops.disable;
38782+ pax_open_kernel();
38783+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
38784+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
38785+ pax_close_kernel();
38786 }
38787
38788 clk = clk_register(NULL, &socfpga_clk->hw.hw);
38789@@ -244,7 +247,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
38790 return parent_rate / div;
38791 }
38792
38793-static struct clk_ops gateclk_ops = {
38794+static clk_ops_no_const gateclk_ops __read_only = {
38795 .recalc_rate = socfpga_clk_recalc_rate,
38796 .get_parent = socfpga_clk_get_parent,
38797 .set_parent = socfpga_clk_set_parent,
38798diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
38799index caf41eb..223d27a 100644
38800--- a/drivers/cpufreq/acpi-cpufreq.c
38801+++ b/drivers/cpufreq/acpi-cpufreq.c
38802@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
38803 return sprintf(buf, "%u\n", boost_enabled);
38804 }
38805
38806-static struct global_attr global_boost = __ATTR(boost, 0644,
38807+static global_attr_no_const global_boost = __ATTR(boost, 0644,
38808 show_global_boost,
38809 store_global_boost);
38810
38811@@ -693,8 +693,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
38812 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
38813 per_cpu(acfreq_data, cpu) = data;
38814
38815- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
38816- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
38817+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
38818+ pax_open_kernel();
38819+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
38820+ pax_close_kernel();
38821+ }
38822
38823 result = acpi_processor_register_performance(data->acpi_data, cpu);
38824 if (result)
38825@@ -827,7 +830,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
38826 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
38827 break;
38828 case ACPI_ADR_SPACE_FIXED_HARDWARE:
38829- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
38830+ pax_open_kernel();
38831+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
38832+ pax_close_kernel();
38833 break;
38834 default:
38835 break;
38836diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
38837index 99a443e..8cb6f02 100644
38838--- a/drivers/cpufreq/cpufreq.c
38839+++ b/drivers/cpufreq/cpufreq.c
38840@@ -1878,7 +1878,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
38841 #endif
38842
38843 mutex_lock(&cpufreq_governor_mutex);
38844- list_del(&governor->governor_list);
38845+ pax_list_del(&governor->governor_list);
38846 mutex_unlock(&cpufreq_governor_mutex);
38847 return;
38848 }
38849@@ -2108,7 +2108,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
38850 return NOTIFY_OK;
38851 }
38852
38853-static struct notifier_block __refdata cpufreq_cpu_notifier = {
38854+static struct notifier_block cpufreq_cpu_notifier = {
38855 .notifier_call = cpufreq_cpu_callback,
38856 };
38857
38858@@ -2141,8 +2141,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
38859
38860 pr_debug("trying to register driver %s\n", driver_data->name);
38861
38862- if (driver_data->setpolicy)
38863- driver_data->flags |= CPUFREQ_CONST_LOOPS;
38864+ if (driver_data->setpolicy) {
38865+ pax_open_kernel();
38866+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
38867+ pax_close_kernel();
38868+ }
38869
38870 write_lock_irqsave(&cpufreq_driver_lock, flags);
38871 if (cpufreq_driver) {
38872diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
38873index e6be635..f8a90dc 100644
38874--- a/drivers/cpufreq/cpufreq_governor.c
38875+++ b/drivers/cpufreq/cpufreq_governor.c
38876@@ -187,7 +187,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
38877 struct dbs_data *dbs_data;
38878 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
38879 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
38880- struct od_ops *od_ops = NULL;
38881+ const struct od_ops *od_ops = NULL;
38882 struct od_dbs_tuners *od_tuners = NULL;
38883 struct cs_dbs_tuners *cs_tuners = NULL;
38884 struct cpu_dbs_common_info *cpu_cdbs;
38885@@ -253,7 +253,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
38886
38887 if ((cdata->governor == GOV_CONSERVATIVE) &&
38888 (!policy->governor->initialized)) {
38889- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38890+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38891
38892 cpufreq_register_notifier(cs_ops->notifier_block,
38893 CPUFREQ_TRANSITION_NOTIFIER);
38894@@ -273,7 +273,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
38895
38896 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
38897 (policy->governor->initialized == 1)) {
38898- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38899+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
38900
38901 cpufreq_unregister_notifier(cs_ops->notifier_block,
38902 CPUFREQ_TRANSITION_NOTIFIER);
38903diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
38904index b5f2b86..daa801b 100644
38905--- a/drivers/cpufreq/cpufreq_governor.h
38906+++ b/drivers/cpufreq/cpufreq_governor.h
38907@@ -205,7 +205,7 @@ struct common_dbs_data {
38908 void (*exit)(struct dbs_data *dbs_data);
38909
38910 /* Governor specific ops, see below */
38911- void *gov_ops;
38912+ const void *gov_ops;
38913 };
38914
38915 /* Governor Per policy data */
38916@@ -225,7 +225,7 @@ struct od_ops {
38917 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
38918 unsigned int freq_next, unsigned int relation);
38919 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
38920-};
38921+} __no_const;
38922
38923 struct cs_ops {
38924 struct notifier_block *notifier_block;
38925diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
38926index 18d4091..434be15 100644
38927--- a/drivers/cpufreq/cpufreq_ondemand.c
38928+++ b/drivers/cpufreq/cpufreq_ondemand.c
38929@@ -521,7 +521,7 @@ static void od_exit(struct dbs_data *dbs_data)
38930
38931 define_get_cpu_dbs_routines(od_cpu_dbs_info);
38932
38933-static struct od_ops od_ops = {
38934+static struct od_ops od_ops __read_only = {
38935 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
38936 .powersave_bias_target = generic_powersave_bias_target,
38937 .freq_increase = dbs_freq_increase,
38938@@ -576,14 +576,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
38939 (struct cpufreq_policy *, unsigned int, unsigned int),
38940 unsigned int powersave_bias)
38941 {
38942- od_ops.powersave_bias_target = f;
38943+ pax_open_kernel();
38944+ *(void **)&od_ops.powersave_bias_target = f;
38945+ pax_close_kernel();
38946 od_set_powersave_bias(powersave_bias);
38947 }
38948 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
38949
38950 void od_unregister_powersave_bias_handler(void)
38951 {
38952- od_ops.powersave_bias_target = generic_powersave_bias_target;
38953+ pax_open_kernel();
38954+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
38955+ pax_close_kernel();
38956 od_set_powersave_bias(0);
38957 }
38958 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
38959diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
38960index 4cf0d28..5830372 100644
38961--- a/drivers/cpufreq/cpufreq_stats.c
38962+++ b/drivers/cpufreq/cpufreq_stats.c
38963@@ -352,7 +352,7 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
38964 }
38965
38966 /* priority=1 so this will get called before cpufreq_remove_dev */
38967-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
38968+static struct notifier_block cpufreq_stat_cpu_notifier = {
38969 .notifier_call = cpufreq_stat_cpu_callback,
38970 .priority = 1,
38971 };
38972diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
38973index b687df8..ae733fc 100644
38974--- a/drivers/cpufreq/intel_pstate.c
38975+++ b/drivers/cpufreq/intel_pstate.c
38976@@ -123,10 +123,10 @@ struct pstate_funcs {
38977 struct cpu_defaults {
38978 struct pstate_adjust_policy pid_policy;
38979 struct pstate_funcs funcs;
38980-};
38981+} __do_const;
38982
38983 static struct pstate_adjust_policy pid_params;
38984-static struct pstate_funcs pstate_funcs;
38985+static struct pstate_funcs *pstate_funcs;
38986
38987 struct perf_limits {
38988 int no_turbo;
38989@@ -517,7 +517,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
38990
38991 cpu->pstate.current_pstate = pstate;
38992
38993- pstate_funcs.set(cpu, pstate);
38994+ pstate_funcs->set(cpu, pstate);
38995 }
38996
38997 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
38998@@ -539,12 +539,12 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
38999 {
39000 sprintf(cpu->name, "Intel 2nd generation core");
39001
39002- cpu->pstate.min_pstate = pstate_funcs.get_min();
39003- cpu->pstate.max_pstate = pstate_funcs.get_max();
39004- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39005+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39006+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39007+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39008
39009- if (pstate_funcs.get_vid)
39010- pstate_funcs.get_vid(cpu);
39011+ if (pstate_funcs->get_vid)
39012+ pstate_funcs->get_vid(cpu);
39013
39014 /*
39015 * goto max pstate so we don't slow up boot if we are built-in if we are
39016@@ -808,9 +808,9 @@ static int intel_pstate_msrs_not_valid(void)
39017 rdmsrl(MSR_IA32_APERF, aperf);
39018 rdmsrl(MSR_IA32_MPERF, mperf);
39019
39020- if (!pstate_funcs.get_max() ||
39021- !pstate_funcs.get_min() ||
39022- !pstate_funcs.get_turbo())
39023+ if (!pstate_funcs->get_max() ||
39024+ !pstate_funcs->get_min() ||
39025+ !pstate_funcs->get_turbo())
39026 return -ENODEV;
39027
39028 rdmsrl(MSR_IA32_APERF, tmp);
39029@@ -824,7 +824,7 @@ static int intel_pstate_msrs_not_valid(void)
39030 return 0;
39031 }
39032
39033-static void copy_pid_params(struct pstate_adjust_policy *policy)
39034+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39035 {
39036 pid_params.sample_rate_ms = policy->sample_rate_ms;
39037 pid_params.p_gain_pct = policy->p_gain_pct;
39038@@ -836,11 +836,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39039
39040 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39041 {
39042- pstate_funcs.get_max = funcs->get_max;
39043- pstate_funcs.get_min = funcs->get_min;
39044- pstate_funcs.get_turbo = funcs->get_turbo;
39045- pstate_funcs.set = funcs->set;
39046- pstate_funcs.get_vid = funcs->get_vid;
39047+ pstate_funcs = funcs;
39048 }
39049
39050 #if IS_ENABLED(CONFIG_ACPI)
39051diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39052index 3d1cba9..0ab21d2 100644
39053--- a/drivers/cpufreq/p4-clockmod.c
39054+++ b/drivers/cpufreq/p4-clockmod.c
39055@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39056 case 0x0F: /* Core Duo */
39057 case 0x16: /* Celeron Core */
39058 case 0x1C: /* Atom */
39059- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39060+ pax_open_kernel();
39061+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39062+ pax_close_kernel();
39063 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39064 case 0x0D: /* Pentium M (Dothan) */
39065- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39066+ pax_open_kernel();
39067+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39068+ pax_close_kernel();
39069 /* fall through */
39070 case 0x09: /* Pentium M (Banias) */
39071 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39072@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39073
39074 /* on P-4s, the TSC runs with constant frequency independent whether
39075 * throttling is active or not. */
39076- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39077+ pax_open_kernel();
39078+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39079+ pax_close_kernel();
39080
39081 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39082 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39083diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39084index 724ffbd..ad83692 100644
39085--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39086+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39087@@ -18,14 +18,12 @@
39088 #include <asm/head.h>
39089 #include <asm/timer.h>
39090
39091-static struct cpufreq_driver *cpufreq_us3_driver;
39092-
39093 struct us3_freq_percpu_info {
39094 struct cpufreq_frequency_table table[4];
39095 };
39096
39097 /* Indexed by cpu number. */
39098-static struct us3_freq_percpu_info *us3_freq_table;
39099+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39100
39101 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39102 * in the Safari config register.
39103@@ -156,14 +154,26 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39104
39105 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39106 {
39107- if (cpufreq_us3_driver) {
39108- cpufreq_frequency_table_put_attr(policy->cpu);
39109- us3_freq_target(policy, 0);
39110- }
39111+ cpufreq_frequency_table_put_attr(policy->cpu);
39112+ us3_freq_target(policy, 0);
39113
39114 return 0;
39115 }
39116
39117+static int __init us3_freq_init(void);
39118+static void __exit us3_freq_exit(void);
39119+
39120+static struct cpufreq_driver cpufreq_us3_driver = {
39121+ .init = us3_freq_cpu_init,
39122+ .verify = cpufreq_generic_frequency_table_verify,
39123+ .target_index = us3_freq_target,
39124+ .get = us3_freq_get,
39125+ .exit = us3_freq_cpu_exit,
39126+ .owner = THIS_MODULE,
39127+ .name = "UltraSPARC-III",
39128+
39129+};
39130+
39131 static int __init us3_freq_init(void)
39132 {
39133 unsigned long manuf, impl, ver;
39134@@ -180,55 +190,15 @@ static int __init us3_freq_init(void)
39135 (impl == CHEETAH_IMPL ||
39136 impl == CHEETAH_PLUS_IMPL ||
39137 impl == JAGUAR_IMPL ||
39138- impl == PANTHER_IMPL)) {
39139- struct cpufreq_driver *driver;
39140-
39141- ret = -ENOMEM;
39142- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39143- if (!driver)
39144- goto err_out;
39145-
39146- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39147- GFP_KERNEL);
39148- if (!us3_freq_table)
39149- goto err_out;
39150-
39151- driver->init = us3_freq_cpu_init;
39152- driver->verify = cpufreq_generic_frequency_table_verify;
39153- driver->target_index = us3_freq_target;
39154- driver->get = us3_freq_get;
39155- driver->exit = us3_freq_cpu_exit;
39156- strcpy(driver->name, "UltraSPARC-III");
39157-
39158- cpufreq_us3_driver = driver;
39159- ret = cpufreq_register_driver(driver);
39160- if (ret)
39161- goto err_out;
39162-
39163- return 0;
39164-
39165-err_out:
39166- if (driver) {
39167- kfree(driver);
39168- cpufreq_us3_driver = NULL;
39169- }
39170- kfree(us3_freq_table);
39171- us3_freq_table = NULL;
39172- return ret;
39173- }
39174+ impl == PANTHER_IMPL))
39175+ return cpufreq_register_driver(&cpufreq_us3_driver);
39176
39177 return -ENODEV;
39178 }
39179
39180 static void __exit us3_freq_exit(void)
39181 {
39182- if (cpufreq_us3_driver) {
39183- cpufreq_unregister_driver(cpufreq_us3_driver);
39184- kfree(cpufreq_us3_driver);
39185- cpufreq_us3_driver = NULL;
39186- kfree(us3_freq_table);
39187- us3_freq_table = NULL;
39188- }
39189+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39190 }
39191
39192 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39193diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39194index 4e1daca..e707b61 100644
39195--- a/drivers/cpufreq/speedstep-centrino.c
39196+++ b/drivers/cpufreq/speedstep-centrino.c
39197@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39198 !cpu_has(cpu, X86_FEATURE_EST))
39199 return -ENODEV;
39200
39201- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39202- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39203+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39204+ pax_open_kernel();
39205+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39206+ pax_close_kernel();
39207+ }
39208
39209 if (policy->cpu != 0)
39210 return -ENODEV;
39211diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39212index 06dbe7c..c2c8671 100644
39213--- a/drivers/cpuidle/driver.c
39214+++ b/drivers/cpuidle/driver.c
39215@@ -202,7 +202,7 @@ static int poll_idle(struct cpuidle_device *dev,
39216
39217 static void poll_idle_init(struct cpuidle_driver *drv)
39218 {
39219- struct cpuidle_state *state = &drv->states[0];
39220+ cpuidle_state_no_const *state = &drv->states[0];
39221
39222 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39223 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39224diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39225index ca89412..a7b9c49 100644
39226--- a/drivers/cpuidle/governor.c
39227+++ b/drivers/cpuidle/governor.c
39228@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39229 mutex_lock(&cpuidle_lock);
39230 if (__cpuidle_find_governor(gov->name) == NULL) {
39231 ret = 0;
39232- list_add_tail(&gov->governor_list, &cpuidle_governors);
39233+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39234 if (!cpuidle_curr_governor ||
39235 cpuidle_curr_governor->rating < gov->rating)
39236 cpuidle_switch_governor(gov);
39237diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39238index e918b6d..f87ea80 100644
39239--- a/drivers/cpuidle/sysfs.c
39240+++ b/drivers/cpuidle/sysfs.c
39241@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39242 NULL
39243 };
39244
39245-static struct attribute_group cpuidle_attr_group = {
39246+static attribute_group_no_const cpuidle_attr_group = {
39247 .attrs = cpuidle_default_attrs,
39248 .name = "cpuidle",
39249 };
39250diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39251index 12fea3e..1e28f47 100644
39252--- a/drivers/crypto/hifn_795x.c
39253+++ b/drivers/crypto/hifn_795x.c
39254@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39255 MODULE_PARM_DESC(hifn_pll_ref,
39256 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39257
39258-static atomic_t hifn_dev_number;
39259+static atomic_unchecked_t hifn_dev_number;
39260
39261 #define ACRYPTO_OP_DECRYPT 0
39262 #define ACRYPTO_OP_ENCRYPT 1
39263@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39264 goto err_out_disable_pci_device;
39265
39266 snprintf(name, sizeof(name), "hifn%d",
39267- atomic_inc_return(&hifn_dev_number)-1);
39268+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39269
39270 err = pci_request_regions(pdev, name);
39271 if (err)
39272diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39273index a0b2f7e..1b6f028 100644
39274--- a/drivers/devfreq/devfreq.c
39275+++ b/drivers/devfreq/devfreq.c
39276@@ -607,7 +607,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39277 goto err_out;
39278 }
39279
39280- list_add(&governor->node, &devfreq_governor_list);
39281+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39282
39283 list_for_each_entry(devfreq, &devfreq_list, node) {
39284 int ret = 0;
39285@@ -695,7 +695,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39286 }
39287 }
39288
39289- list_del(&governor->node);
39290+ pax_list_del((struct list_head *)&governor->node);
39291 err_out:
39292 mutex_unlock(&devfreq_list_lock);
39293
39294diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39295index 0d765c0..60b7480 100644
39296--- a/drivers/dma/sh/shdmac.c
39297+++ b/drivers/dma/sh/shdmac.c
39298@@ -511,7 +511,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39299 return ret;
39300 }
39301
39302-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39303+static struct notifier_block sh_dmae_nmi_notifier = {
39304 .notifier_call = sh_dmae_nmi_handler,
39305
39306 /* Run before NMI debug handler and KGDB */
39307diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39308index 1026743..80b081c 100644
39309--- a/drivers/edac/edac_device.c
39310+++ b/drivers/edac/edac_device.c
39311@@ -474,9 +474,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39312 */
39313 int edac_device_alloc_index(void)
39314 {
39315- static atomic_t device_indexes = ATOMIC_INIT(0);
39316+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39317
39318- return atomic_inc_return(&device_indexes) - 1;
39319+ return atomic_inc_return_unchecked(&device_indexes) - 1;
39320 }
39321 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39322
39323diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39324index e5bdf21..b8f9055 100644
39325--- a/drivers/edac/edac_mc_sysfs.c
39326+++ b/drivers/edac/edac_mc_sysfs.c
39327@@ -152,7 +152,7 @@ static const char * const edac_caps[] = {
39328 struct dev_ch_attribute {
39329 struct device_attribute attr;
39330 int channel;
39331-};
39332+} __do_const;
39333
39334 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39335 struct dev_ch_attribute dev_attr_legacy_##_name = \
39336@@ -1009,14 +1009,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39337 }
39338
39339 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39340+ pax_open_kernel();
39341 if (mci->get_sdram_scrub_rate) {
39342- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39343- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39344+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39345+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39346 }
39347 if (mci->set_sdram_scrub_rate) {
39348- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39349- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39350+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39351+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39352 }
39353+ pax_close_kernel();
39354 err = device_create_file(&mci->dev,
39355 &dev_attr_sdram_scrub_rate);
39356 if (err) {
39357diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39358index 2cf44b4d..6dd2dc7 100644
39359--- a/drivers/edac/edac_pci.c
39360+++ b/drivers/edac/edac_pci.c
39361@@ -29,7 +29,7 @@
39362
39363 static DEFINE_MUTEX(edac_pci_ctls_mutex);
39364 static LIST_HEAD(edac_pci_list);
39365-static atomic_t pci_indexes = ATOMIC_INIT(0);
39366+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
39367
39368 /*
39369 * edac_pci_alloc_ctl_info
39370@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
39371 */
39372 int edac_pci_alloc_index(void)
39373 {
39374- return atomic_inc_return(&pci_indexes) - 1;
39375+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
39376 }
39377 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
39378
39379diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
39380index e8658e4..22746d6 100644
39381--- a/drivers/edac/edac_pci_sysfs.c
39382+++ b/drivers/edac/edac_pci_sysfs.c
39383@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
39384 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
39385 static int edac_pci_poll_msec = 1000; /* one second workq period */
39386
39387-static atomic_t pci_parity_count = ATOMIC_INIT(0);
39388-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
39389+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
39390+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
39391
39392 static struct kobject *edac_pci_top_main_kobj;
39393 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
39394@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
39395 void *value;
39396 ssize_t(*show) (void *, char *);
39397 ssize_t(*store) (void *, const char *, size_t);
39398-};
39399+} __do_const;
39400
39401 /* Set of show/store abstract level functions for PCI Parity object */
39402 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
39403@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39404 edac_printk(KERN_CRIT, EDAC_PCI,
39405 "Signaled System Error on %s\n",
39406 pci_name(dev));
39407- atomic_inc(&pci_nonparity_count);
39408+ atomic_inc_unchecked(&pci_nonparity_count);
39409 }
39410
39411 if (status & (PCI_STATUS_PARITY)) {
39412@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39413 "Master Data Parity Error on %s\n",
39414 pci_name(dev));
39415
39416- atomic_inc(&pci_parity_count);
39417+ atomic_inc_unchecked(&pci_parity_count);
39418 }
39419
39420 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39421@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39422 "Detected Parity Error on %s\n",
39423 pci_name(dev));
39424
39425- atomic_inc(&pci_parity_count);
39426+ atomic_inc_unchecked(&pci_parity_count);
39427 }
39428 }
39429
39430@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39431 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
39432 "Signaled System Error on %s\n",
39433 pci_name(dev));
39434- atomic_inc(&pci_nonparity_count);
39435+ atomic_inc_unchecked(&pci_nonparity_count);
39436 }
39437
39438 if (status & (PCI_STATUS_PARITY)) {
39439@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39440 "Master Data Parity Error on "
39441 "%s\n", pci_name(dev));
39442
39443- atomic_inc(&pci_parity_count);
39444+ atomic_inc_unchecked(&pci_parity_count);
39445 }
39446
39447 if (status & (PCI_STATUS_DETECTED_PARITY)) {
39448@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39449 "Detected Parity Error on %s\n",
39450 pci_name(dev));
39451
39452- atomic_inc(&pci_parity_count);
39453+ atomic_inc_unchecked(&pci_parity_count);
39454 }
39455 }
39456 }
39457@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
39458 if (!check_pci_errors)
39459 return;
39460
39461- before_count = atomic_read(&pci_parity_count);
39462+ before_count = atomic_read_unchecked(&pci_parity_count);
39463
39464 /* scan all PCI devices looking for a Parity Error on devices and
39465 * bridges.
39466@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
39467 /* Only if operator has selected panic on PCI Error */
39468 if (edac_pci_get_panic_on_pe()) {
39469 /* If the count is different 'after' from 'before' */
39470- if (before_count != atomic_read(&pci_parity_count))
39471+ if (before_count != atomic_read_unchecked(&pci_parity_count))
39472 panic("EDAC: PCI Parity Error");
39473 }
39474 }
39475diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
39476index 51b7e3a..aa8a3e8 100644
39477--- a/drivers/edac/mce_amd.h
39478+++ b/drivers/edac/mce_amd.h
39479@@ -77,7 +77,7 @@ struct amd_decoder_ops {
39480 bool (*mc0_mce)(u16, u8);
39481 bool (*mc1_mce)(u16, u8);
39482 bool (*mc2_mce)(u16, u8);
39483-};
39484+} __no_const;
39485
39486 void amd_report_gart_errors(bool);
39487 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
39488diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
39489index 57ea7f4..af06b76 100644
39490--- a/drivers/firewire/core-card.c
39491+++ b/drivers/firewire/core-card.c
39492@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
39493 const struct fw_card_driver *driver,
39494 struct device *device)
39495 {
39496- static atomic_t index = ATOMIC_INIT(-1);
39497+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
39498
39499- card->index = atomic_inc_return(&index);
39500+ card->index = atomic_inc_return_unchecked(&index);
39501 card->driver = driver;
39502 card->device = device;
39503 card->current_tlabel = 0;
39504@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
39505
39506 void fw_core_remove_card(struct fw_card *card)
39507 {
39508- struct fw_card_driver dummy_driver = dummy_driver_template;
39509+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
39510
39511 card->driver->update_phy_reg(card, 4,
39512 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
39513diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
39514index 2c6d5e1..a2cca6b 100644
39515--- a/drivers/firewire/core-device.c
39516+++ b/drivers/firewire/core-device.c
39517@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
39518 struct config_rom_attribute {
39519 struct device_attribute attr;
39520 u32 key;
39521-};
39522+} __do_const;
39523
39524 static ssize_t show_immediate(struct device *dev,
39525 struct device_attribute *dattr, char *buf)
39526diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
39527index 0e79951..b180217 100644
39528--- a/drivers/firewire/core-transaction.c
39529+++ b/drivers/firewire/core-transaction.c
39530@@ -38,6 +38,7 @@
39531 #include <linux/timer.h>
39532 #include <linux/types.h>
39533 #include <linux/workqueue.h>
39534+#include <linux/sched.h>
39535
39536 #include <asm/byteorder.h>
39537
39538diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
39539index 515a42c..5ecf3ba 100644
39540--- a/drivers/firewire/core.h
39541+++ b/drivers/firewire/core.h
39542@@ -111,6 +111,7 @@ struct fw_card_driver {
39543
39544 int (*stop_iso)(struct fw_iso_context *ctx);
39545 };
39546+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
39547
39548 void fw_card_initialize(struct fw_card *card,
39549 const struct fw_card_driver *driver, struct device *device);
39550diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
39551index 94a58a0..f5eba42 100644
39552--- a/drivers/firmware/dmi-id.c
39553+++ b/drivers/firmware/dmi-id.c
39554@@ -16,7 +16,7 @@
39555 struct dmi_device_attribute{
39556 struct device_attribute dev_attr;
39557 int field;
39558-};
39559+} __do_const;
39560 #define to_dmi_dev_attr(_dev_attr) \
39561 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
39562
39563diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
39564index c7e81ff..94a7401 100644
39565--- a/drivers/firmware/dmi_scan.c
39566+++ b/drivers/firmware/dmi_scan.c
39567@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
39568 if (buf == NULL)
39569 return -1;
39570
39571- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
39572+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
39573
39574 iounmap(buf);
39575 return 0;
39576diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
39577index 1491dd4..aa910db 100644
39578--- a/drivers/firmware/efi/cper.c
39579+++ b/drivers/firmware/efi/cper.c
39580@@ -41,12 +41,12 @@
39581 */
39582 u64 cper_next_record_id(void)
39583 {
39584- static atomic64_t seq;
39585+ static atomic64_unchecked_t seq;
39586
39587- if (!atomic64_read(&seq))
39588- atomic64_set(&seq, ((u64)get_seconds()) << 32);
39589+ if (!atomic64_read_unchecked(&seq))
39590+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
39591
39592- return atomic64_inc_return(&seq);
39593+ return atomic64_inc_return_unchecked(&seq);
39594 }
39595 EXPORT_SYMBOL_GPL(cper_next_record_id);
39596
39597diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
39598index 2e2fbde..7676c8b 100644
39599--- a/drivers/firmware/efi/efi.c
39600+++ b/drivers/firmware/efi/efi.c
39601@@ -81,14 +81,16 @@ static struct attribute_group efi_subsys_attr_group = {
39602 };
39603
39604 static struct efivars generic_efivars;
39605-static struct efivar_operations generic_ops;
39606+static efivar_operations_no_const generic_ops __read_only;
39607
39608 static int generic_ops_register(void)
39609 {
39610- generic_ops.get_variable = efi.get_variable;
39611- generic_ops.set_variable = efi.set_variable;
39612- generic_ops.get_next_variable = efi.get_next_variable;
39613- generic_ops.query_variable_store = efi_query_variable_store;
39614+ pax_open_kernel();
39615+ *(void **)&generic_ops.get_variable = efi.get_variable;
39616+ *(void **)&generic_ops.set_variable = efi.set_variable;
39617+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
39618+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
39619+ pax_close_kernel();
39620
39621 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
39622 }
39623diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
39624index 3dc2482..7bd2f61 100644
39625--- a/drivers/firmware/efi/efivars.c
39626+++ b/drivers/firmware/efi/efivars.c
39627@@ -456,7 +456,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
39628 static int
39629 create_efivars_bin_attributes(void)
39630 {
39631- struct bin_attribute *attr;
39632+ bin_attribute_no_const *attr;
39633 int error;
39634
39635 /* new_var */
39636diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
39637index 2a90ba6..07f3733 100644
39638--- a/drivers/firmware/google/memconsole.c
39639+++ b/drivers/firmware/google/memconsole.c
39640@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
39641 if (!found_memconsole())
39642 return -ENODEV;
39643
39644- memconsole_bin_attr.size = memconsole_length;
39645+ pax_open_kernel();
39646+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
39647+ pax_close_kernel();
39648
39649 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
39650
39651diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
39652index ec19036..8ffafc2 100644
39653--- a/drivers/gpio/gpio-em.c
39654+++ b/drivers/gpio/gpio-em.c
39655@@ -257,7 +257,7 @@ static int em_gio_probe(struct platform_device *pdev)
39656 struct em_gio_priv *p;
39657 struct resource *io[2], *irq[2];
39658 struct gpio_chip *gpio_chip;
39659- struct irq_chip *irq_chip;
39660+ irq_chip_no_const *irq_chip;
39661 const char *name = dev_name(&pdev->dev);
39662 int ret;
39663
39664diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
39665index 814addb..0937d7f 100644
39666--- a/drivers/gpio/gpio-ich.c
39667+++ b/drivers/gpio/gpio-ich.c
39668@@ -71,7 +71,7 @@ struct ichx_desc {
39669 /* Some chipsets have quirks, let these use their own request/get */
39670 int (*request)(struct gpio_chip *chip, unsigned offset);
39671 int (*get)(struct gpio_chip *chip, unsigned offset);
39672-};
39673+} __do_const;
39674
39675 static struct {
39676 spinlock_t lock;
39677diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
39678index 8b7e719..dc089dc 100644
39679--- a/drivers/gpio/gpio-rcar.c
39680+++ b/drivers/gpio/gpio-rcar.c
39681@@ -316,7 +316,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
39682 struct gpio_rcar_priv *p;
39683 struct resource *io, *irq;
39684 struct gpio_chip *gpio_chip;
39685- struct irq_chip *irq_chip;
39686+ irq_chip_no_const *irq_chip;
39687 const char *name = dev_name(&pdev->dev);
39688 int ret;
39689
39690diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
39691index 9902732..64b62dd 100644
39692--- a/drivers/gpio/gpio-vr41xx.c
39693+++ b/drivers/gpio/gpio-vr41xx.c
39694@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
39695 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
39696 maskl, pendl, maskh, pendh);
39697
39698- atomic_inc(&irq_err_count);
39699+ atomic_inc_unchecked(&irq_err_count);
39700
39701 return -EINVAL;
39702 }
39703diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
39704index d6cf77c..2842146 100644
39705--- a/drivers/gpu/drm/drm_crtc.c
39706+++ b/drivers/gpu/drm/drm_crtc.c
39707@@ -3102,7 +3102,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
39708 goto done;
39709 }
39710
39711- if (copy_to_user(&enum_ptr[copied].name,
39712+ if (copy_to_user(enum_ptr[copied].name,
39713 &prop_enum->name, DRM_PROP_NAME_LEN)) {
39714 ret = -EFAULT;
39715 goto done;
39716diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
39717index 01361ab..891e821 100644
39718--- a/drivers/gpu/drm/drm_crtc_helper.c
39719+++ b/drivers/gpu/drm/drm_crtc_helper.c
39720@@ -338,7 +338,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
39721 struct drm_crtc *tmp;
39722 int crtc_mask = 1;
39723
39724- WARN(!crtc, "checking null crtc?\n");
39725+ BUG_ON(!crtc);
39726
39727 dev = crtc->dev;
39728
39729diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
39730index d9137e4..69b73a0 100644
39731--- a/drivers/gpu/drm/drm_drv.c
39732+++ b/drivers/gpu/drm/drm_drv.c
39733@@ -233,7 +233,7 @@ module_exit(drm_core_exit);
39734 /**
39735 * Copy and IOCTL return string to user space
39736 */
39737-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
39738+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
39739 {
39740 int len;
39741
39742@@ -303,7 +303,7 @@ long drm_ioctl(struct file *filp,
39743 struct drm_file *file_priv = filp->private_data;
39744 struct drm_device *dev;
39745 const struct drm_ioctl_desc *ioctl = NULL;
39746- drm_ioctl_t *func;
39747+ drm_ioctl_no_const_t func;
39748 unsigned int nr = DRM_IOCTL_NR(cmd);
39749 int retcode = -EINVAL;
39750 char stack_kdata[128];
39751diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
39752index c5b929c..8a3b8be 100644
39753--- a/drivers/gpu/drm/drm_fops.c
39754+++ b/drivers/gpu/drm/drm_fops.c
39755@@ -97,7 +97,7 @@ int drm_open(struct inode *inode, struct file *filp)
39756 if (drm_device_is_unplugged(dev))
39757 return -ENODEV;
39758
39759- if (!dev->open_count++)
39760+ if (local_inc_return(&dev->open_count) == 1)
39761 need_setup = 1;
39762 mutex_lock(&dev->struct_mutex);
39763 old_imapping = inode->i_mapping;
39764@@ -127,7 +127,7 @@ err_undo:
39765 iput(container_of(dev->dev_mapping, struct inode, i_data));
39766 dev->dev_mapping = old_mapping;
39767 mutex_unlock(&dev->struct_mutex);
39768- dev->open_count--;
39769+ local_dec(&dev->open_count);
39770 return retcode;
39771 }
39772 EXPORT_SYMBOL(drm_open);
39773@@ -467,7 +467,7 @@ int drm_release(struct inode *inode, struct file *filp)
39774
39775 mutex_lock(&drm_global_mutex);
39776
39777- DRM_DEBUG("open_count = %d\n", dev->open_count);
39778+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
39779
39780 if (dev->driver->preclose)
39781 dev->driver->preclose(dev, file_priv);
39782@@ -476,10 +476,10 @@ int drm_release(struct inode *inode, struct file *filp)
39783 * Begin inline drm_release
39784 */
39785
39786- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
39787+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
39788 task_pid_nr(current),
39789 (long)old_encode_dev(file_priv->minor->device),
39790- dev->open_count);
39791+ local_read(&dev->open_count));
39792
39793 /* Release any auth tokens that might point to this file_priv,
39794 (do that under the drm_global_mutex) */
39795@@ -577,7 +577,7 @@ int drm_release(struct inode *inode, struct file *filp)
39796 * End inline drm_release
39797 */
39798
39799- if (!--dev->open_count) {
39800+ if (local_dec_and_test(&dev->open_count)) {
39801 if (atomic_read(&dev->ioctl_count)) {
39802 DRM_ERROR("Device busy: %d\n",
39803 atomic_read(&dev->ioctl_count));
39804diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
39805index 3d2e91c..d31c4c9 100644
39806--- a/drivers/gpu/drm/drm_global.c
39807+++ b/drivers/gpu/drm/drm_global.c
39808@@ -36,7 +36,7 @@
39809 struct drm_global_item {
39810 struct mutex mutex;
39811 void *object;
39812- int refcount;
39813+ atomic_t refcount;
39814 };
39815
39816 static struct drm_global_item glob[DRM_GLOBAL_NUM];
39817@@ -49,7 +49,7 @@ void drm_global_init(void)
39818 struct drm_global_item *item = &glob[i];
39819 mutex_init(&item->mutex);
39820 item->object = NULL;
39821- item->refcount = 0;
39822+ atomic_set(&item->refcount, 0);
39823 }
39824 }
39825
39826@@ -59,7 +59,7 @@ void drm_global_release(void)
39827 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
39828 struct drm_global_item *item = &glob[i];
39829 BUG_ON(item->object != NULL);
39830- BUG_ON(item->refcount != 0);
39831+ BUG_ON(atomic_read(&item->refcount) != 0);
39832 }
39833 }
39834
39835@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
39836 struct drm_global_item *item = &glob[ref->global_type];
39837
39838 mutex_lock(&item->mutex);
39839- if (item->refcount == 0) {
39840+ if (atomic_read(&item->refcount) == 0) {
39841 item->object = kzalloc(ref->size, GFP_KERNEL);
39842 if (unlikely(item->object == NULL)) {
39843 ret = -ENOMEM;
39844@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
39845 goto out_err;
39846
39847 }
39848- ++item->refcount;
39849+ atomic_inc(&item->refcount);
39850 ref->object = item->object;
39851 mutex_unlock(&item->mutex);
39852 return 0;
39853@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
39854 struct drm_global_item *item = &glob[ref->global_type];
39855
39856 mutex_lock(&item->mutex);
39857- BUG_ON(item->refcount == 0);
39858+ BUG_ON(atomic_read(&item->refcount) == 0);
39859 BUG_ON(ref->object != item->object);
39860- if (--item->refcount == 0) {
39861+ if (atomic_dec_and_test(&item->refcount)) {
39862 ref->release(ref);
39863 item->object = NULL;
39864 }
39865diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
39866index 7d5a152..d7186da 100644
39867--- a/drivers/gpu/drm/drm_info.c
39868+++ b/drivers/gpu/drm/drm_info.c
39869@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
39870 struct drm_local_map *map;
39871 struct drm_map_list *r_list;
39872
39873- /* Hardcoded from _DRM_FRAME_BUFFER,
39874- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
39875- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
39876- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
39877+ static const char * const types[] = {
39878+ [_DRM_FRAME_BUFFER] = "FB",
39879+ [_DRM_REGISTERS] = "REG",
39880+ [_DRM_SHM] = "SHM",
39881+ [_DRM_AGP] = "AGP",
39882+ [_DRM_SCATTER_GATHER] = "SG",
39883+ [_DRM_CONSISTENT] = "PCI",
39884+ [_DRM_GEM] = "GEM" };
39885 const char *type;
39886 int i;
39887
39888@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
39889 map = r_list->map;
39890 if (!map)
39891 continue;
39892- if (map->type < 0 || map->type > 5)
39893+ if (map->type >= ARRAY_SIZE(types))
39894 type = "??";
39895 else
39896 type = types[map->type];
39897@@ -257,7 +261,11 @@ int drm_vma_info(struct seq_file *m, void *data)
39898 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
39899 vma->vm_flags & VM_LOCKED ? 'l' : '-',
39900 vma->vm_flags & VM_IO ? 'i' : '-',
39901+#ifdef CONFIG_GRKERNSEC_HIDESYM
39902+ 0);
39903+#else
39904 vma->vm_pgoff);
39905+#endif
39906
39907 #if defined(__i386__)
39908 pgprot = pgprot_val(vma->vm_page_prot);
39909diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
39910index 2f4c4343..dd12cd2 100644
39911--- a/drivers/gpu/drm/drm_ioc32.c
39912+++ b/drivers/gpu/drm/drm_ioc32.c
39913@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
39914 request = compat_alloc_user_space(nbytes);
39915 if (!access_ok(VERIFY_WRITE, request, nbytes))
39916 return -EFAULT;
39917- list = (struct drm_buf_desc *) (request + 1);
39918+ list = (struct drm_buf_desc __user *) (request + 1);
39919
39920 if (__put_user(count, &request->count)
39921 || __put_user(list, &request->list))
39922@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
39923 request = compat_alloc_user_space(nbytes);
39924 if (!access_ok(VERIFY_WRITE, request, nbytes))
39925 return -EFAULT;
39926- list = (struct drm_buf_pub *) (request + 1);
39927+ list = (struct drm_buf_pub __user *) (request + 1);
39928
39929 if (__put_user(count, &request->count)
39930 || __put_user(list, &request->list))
39931@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
39932 return 0;
39933 }
39934
39935-drm_ioctl_compat_t *drm_compat_ioctls[] = {
39936+drm_ioctl_compat_t drm_compat_ioctls[] = {
39937 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
39938 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
39939 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
39940@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
39941 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39942 {
39943 unsigned int nr = DRM_IOCTL_NR(cmd);
39944- drm_ioctl_compat_t *fn;
39945 int ret;
39946
39947 /* Assume that ioctls without an explicit compat routine will just
39948@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39949 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
39950 return drm_ioctl(filp, cmd, arg);
39951
39952- fn = drm_compat_ioctls[nr];
39953-
39954- if (fn != NULL)
39955- ret = (*fn) (filp, cmd, arg);
39956+ if (drm_compat_ioctls[nr] != NULL)
39957+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
39958 else
39959 ret = drm_ioctl(filp, cmd, arg);
39960
39961diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
39962index 66dd3a0..3bed6c4 100644
39963--- a/drivers/gpu/drm/drm_stub.c
39964+++ b/drivers/gpu/drm/drm_stub.c
39965@@ -403,7 +403,7 @@ void drm_unplug_dev(struct drm_device *dev)
39966
39967 drm_device_set_unplugged(dev);
39968
39969- if (dev->open_count == 0) {
39970+ if (local_read(&dev->open_count) == 0) {
39971 drm_put_dev(dev);
39972 }
39973 mutex_unlock(&drm_global_mutex);
39974diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
39975index c22c309..ae758c3 100644
39976--- a/drivers/gpu/drm/drm_sysfs.c
39977+++ b/drivers/gpu/drm/drm_sysfs.c
39978@@ -505,7 +505,7 @@ static void drm_sysfs_release(struct device *dev)
39979 */
39980 int drm_sysfs_device_add(struct drm_minor *minor)
39981 {
39982- char *minor_str;
39983+ const char *minor_str;
39984 int r;
39985
39986 if (minor->type == DRM_MINOR_CONTROL)
39987diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
39988index d4d16ed..8fb0b51 100644
39989--- a/drivers/gpu/drm/i810/i810_drv.h
39990+++ b/drivers/gpu/drm/i810/i810_drv.h
39991@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
39992 int page_flipping;
39993
39994 wait_queue_head_t irq_queue;
39995- atomic_t irq_received;
39996- atomic_t irq_emitted;
39997+ atomic_unchecked_t irq_received;
39998+ atomic_unchecked_t irq_emitted;
39999
40000 int front_offset;
40001 } drm_i810_private_t;
40002diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
40003index 6ed45a9..eb6dc41 100644
40004--- a/drivers/gpu/drm/i915/i915_debugfs.c
40005+++ b/drivers/gpu/drm/i915/i915_debugfs.c
40006@@ -702,7 +702,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
40007 I915_READ(GTIMR));
40008 }
40009 seq_printf(m, "Interrupts received: %d\n",
40010- atomic_read(&dev_priv->irq_received));
40011+ atomic_read_unchecked(&dev_priv->irq_received));
40012 for_each_ring(ring, dev_priv, i) {
40013 if (INTEL_INFO(dev)->gen >= 6) {
40014 seq_printf(m,
40015diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40016index e02266a..e3411aa 100644
40017--- a/drivers/gpu/drm/i915/i915_dma.c
40018+++ b/drivers/gpu/drm/i915/i915_dma.c
40019@@ -1271,7 +1271,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40020 bool can_switch;
40021
40022 spin_lock(&dev->count_lock);
40023- can_switch = (dev->open_count == 0);
40024+ can_switch = (local_read(&dev->open_count) == 0);
40025 spin_unlock(&dev->count_lock);
40026 return can_switch;
40027 }
40028diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
40029index 221ac62..f56acc8 100644
40030--- a/drivers/gpu/drm/i915/i915_drv.h
40031+++ b/drivers/gpu/drm/i915/i915_drv.h
40032@@ -1326,7 +1326,7 @@ typedef struct drm_i915_private {
40033 drm_dma_handle_t *status_page_dmah;
40034 struct resource mch_res;
40035
40036- atomic_t irq_received;
40037+ atomic_unchecked_t irq_received;
40038
40039 /* protects the irq masks */
40040 spinlock_t irq_lock;
40041diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40042index a3ba9a8..ee52ddd 100644
40043--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40044+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40045@@ -861,9 +861,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40046
40047 static int
40048 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
40049- int count)
40050+ unsigned int count)
40051 {
40052- int i;
40053+ unsigned int i;
40054 unsigned relocs_total = 0;
40055 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40056
40057diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40058index 3c59584..500f2e9 100644
40059--- a/drivers/gpu/drm/i915/i915_ioc32.c
40060+++ b/drivers/gpu/drm/i915/i915_ioc32.c
40061@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40062 (unsigned long)request);
40063 }
40064
40065-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40066+static drm_ioctl_compat_t i915_compat_ioctls[] = {
40067 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40068 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40069 [DRM_I915_GETPARAM] = compat_i915_getparam,
40070@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40071 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40072 {
40073 unsigned int nr = DRM_IOCTL_NR(cmd);
40074- drm_ioctl_compat_t *fn = NULL;
40075 int ret;
40076
40077 if (nr < DRM_COMMAND_BASE)
40078 return drm_compat_ioctl(filp, cmd, arg);
40079
40080- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
40081- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40082-
40083- if (fn != NULL)
40084+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
40085+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40086 ret = (*fn) (filp, cmd, arg);
40087- else
40088+ } else
40089 ret = drm_ioctl(filp, cmd, arg);
40090
40091 return ret;
40092diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
40093index a209177..842a89a 100644
40094--- a/drivers/gpu/drm/i915/i915_irq.c
40095+++ b/drivers/gpu/drm/i915/i915_irq.c
40096@@ -1419,7 +1419,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
40097 int pipe;
40098 u32 pipe_stats[I915_MAX_PIPES];
40099
40100- atomic_inc(&dev_priv->irq_received);
40101+ atomic_inc_unchecked(&dev_priv->irq_received);
40102
40103 while (true) {
40104 iir = I915_READ(VLV_IIR);
40105@@ -1729,7 +1729,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
40106 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
40107 irqreturn_t ret = IRQ_NONE;
40108
40109- atomic_inc(&dev_priv->irq_received);
40110+ atomic_inc_unchecked(&dev_priv->irq_received);
40111
40112 /* We get interrupts on unclaimed registers, so check for this before we
40113 * do any I915_{READ,WRITE}. */
40114@@ -1799,7 +1799,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
40115 uint32_t tmp = 0;
40116 enum pipe pipe;
40117
40118- atomic_inc(&dev_priv->irq_received);
40119+ atomic_inc_unchecked(&dev_priv->irq_received);
40120
40121 master_ctl = I915_READ(GEN8_MASTER_IRQ);
40122 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
40123@@ -2623,7 +2623,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
40124 {
40125 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40126
40127- atomic_set(&dev_priv->irq_received, 0);
40128+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40129
40130 I915_WRITE(HWSTAM, 0xeffe);
40131
40132@@ -2641,7 +2641,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
40133 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40134 int pipe;
40135
40136- atomic_set(&dev_priv->irq_received, 0);
40137+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40138
40139 /* VLV magic */
40140 I915_WRITE(VLV_IMR, 0);
40141@@ -2672,7 +2672,7 @@ static void gen8_irq_preinstall(struct drm_device *dev)
40142 struct drm_i915_private *dev_priv = dev->dev_private;
40143 int pipe;
40144
40145- atomic_set(&dev_priv->irq_received, 0);
40146+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40147
40148 I915_WRITE(GEN8_MASTER_IRQ, 0);
40149 POSTING_READ(GEN8_MASTER_IRQ);
40150@@ -2998,7 +2998,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
40151 if (!dev_priv)
40152 return;
40153
40154- atomic_set(&dev_priv->irq_received, 0);
40155+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40156
40157 I915_WRITE(GEN8_MASTER_IRQ, 0);
40158
40159@@ -3092,7 +3092,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
40160 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40161 int pipe;
40162
40163- atomic_set(&dev_priv->irq_received, 0);
40164+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40165
40166 for_each_pipe(pipe)
40167 I915_WRITE(PIPESTAT(pipe), 0);
40168@@ -3178,7 +3178,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
40169 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
40170 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
40171
40172- atomic_inc(&dev_priv->irq_received);
40173+ atomic_inc_unchecked(&dev_priv->irq_received);
40174
40175 iir = I915_READ16(IIR);
40176 if (iir == 0)
40177@@ -3253,7 +3253,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
40178 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40179 int pipe;
40180
40181- atomic_set(&dev_priv->irq_received, 0);
40182+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40183
40184 if (I915_HAS_HOTPLUG(dev)) {
40185 I915_WRITE(PORT_HOTPLUG_EN, 0);
40186@@ -3360,7 +3360,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
40187 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
40188 int pipe, ret = IRQ_NONE;
40189
40190- atomic_inc(&dev_priv->irq_received);
40191+ atomic_inc_unchecked(&dev_priv->irq_received);
40192
40193 iir = I915_READ(IIR);
40194 do {
40195@@ -3487,7 +3487,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
40196 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
40197 int pipe;
40198
40199- atomic_set(&dev_priv->irq_received, 0);
40200+ atomic_set_unchecked(&dev_priv->irq_received, 0);
40201
40202 I915_WRITE(PORT_HOTPLUG_EN, 0);
40203 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
40204@@ -3603,7 +3603,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
40205 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
40206 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
40207
40208- atomic_inc(&dev_priv->irq_received);
40209+ atomic_inc_unchecked(&dev_priv->irq_received);
40210
40211 iir = I915_READ(IIR);
40212
40213diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40214index 3c5ff7a..ae759ca 100644
40215--- a/drivers/gpu/drm/i915/intel_display.c
40216+++ b/drivers/gpu/drm/i915/intel_display.c
40217@@ -10506,13 +10506,13 @@ struct intel_quirk {
40218 int subsystem_vendor;
40219 int subsystem_device;
40220 void (*hook)(struct drm_device *dev);
40221-};
40222+} __do_const;
40223
40224 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40225 struct intel_dmi_quirk {
40226 void (*hook)(struct drm_device *dev);
40227 const struct dmi_system_id (*dmi_id_list)[];
40228-};
40229+} __do_const;
40230
40231 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40232 {
40233@@ -10520,18 +10520,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40234 return 1;
40235 }
40236
40237-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40238+static const struct dmi_system_id intel_dmi_quirks_table[] = {
40239 {
40240- .dmi_id_list = &(const struct dmi_system_id[]) {
40241- {
40242- .callback = intel_dmi_reverse_brightness,
40243- .ident = "NCR Corporation",
40244- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40245- DMI_MATCH(DMI_PRODUCT_NAME, ""),
40246- },
40247- },
40248- { } /* terminating entry */
40249+ .callback = intel_dmi_reverse_brightness,
40250+ .ident = "NCR Corporation",
40251+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40252+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
40253 },
40254+ },
40255+ { } /* terminating entry */
40256+};
40257+
40258+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40259+ {
40260+ .dmi_id_list = &intel_dmi_quirks_table,
40261 .hook = quirk_invert_brightness,
40262 },
40263 };
40264diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40265index ca4bc54..ee598a2 100644
40266--- a/drivers/gpu/drm/mga/mga_drv.h
40267+++ b/drivers/gpu/drm/mga/mga_drv.h
40268@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
40269 u32 clear_cmd;
40270 u32 maccess;
40271
40272- atomic_t vbl_received; /**< Number of vblanks received. */
40273+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40274 wait_queue_head_t fence_queue;
40275- atomic_t last_fence_retired;
40276+ atomic_unchecked_t last_fence_retired;
40277 u32 next_fence_to_post;
40278
40279 unsigned int fb_cpp;
40280diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40281index 709e90d..89a1c0d 100644
40282--- a/drivers/gpu/drm/mga/mga_ioc32.c
40283+++ b/drivers/gpu/drm/mga/mga_ioc32.c
40284@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40285 return 0;
40286 }
40287
40288-drm_ioctl_compat_t *mga_compat_ioctls[] = {
40289+drm_ioctl_compat_t mga_compat_ioctls[] = {
40290 [DRM_MGA_INIT] = compat_mga_init,
40291 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40292 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40293@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40294 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40295 {
40296 unsigned int nr = DRM_IOCTL_NR(cmd);
40297- drm_ioctl_compat_t *fn = NULL;
40298 int ret;
40299
40300 if (nr < DRM_COMMAND_BASE)
40301 return drm_compat_ioctl(filp, cmd, arg);
40302
40303- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
40304- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40305-
40306- if (fn != NULL)
40307+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
40308+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40309 ret = (*fn) (filp, cmd, arg);
40310- else
40311+ } else
40312 ret = drm_ioctl(filp, cmd, arg);
40313
40314 return ret;
40315diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40316index 2b0ceb8..517e99e 100644
40317--- a/drivers/gpu/drm/mga/mga_irq.c
40318+++ b/drivers/gpu/drm/mga/mga_irq.c
40319@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40320 if (crtc != 0)
40321 return 0;
40322
40323- return atomic_read(&dev_priv->vbl_received);
40324+ return atomic_read_unchecked(&dev_priv->vbl_received);
40325 }
40326
40327
40328@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
40329 /* VBLANK interrupt */
40330 if (status & MGA_VLINEPEN) {
40331 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40332- atomic_inc(&dev_priv->vbl_received);
40333+ atomic_inc_unchecked(&dev_priv->vbl_received);
40334 drm_handle_vblank(dev, 0);
40335 handled = 1;
40336 }
40337@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
40338 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40339 MGA_WRITE(MGA_PRIMEND, prim_end);
40340
40341- atomic_inc(&dev_priv->last_fence_retired);
40342+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
40343 DRM_WAKEUP(&dev_priv->fence_queue);
40344 handled = 1;
40345 }
40346@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
40347 * using fences.
40348 */
40349 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
40350- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
40351+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
40352 - *sequence) <= (1 << 23)));
40353
40354 *sequence = cur_fence;
40355diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
40356index 4c3feaa..26391ce 100644
40357--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
40358+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
40359@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
40360 struct bit_table {
40361 const char id;
40362 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
40363-};
40364+} __no_const;
40365
40366 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
40367
40368diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
40369index 4b0fb6c..67667a9 100644
40370--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
40371+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
40372@@ -96,7 +96,6 @@ struct nouveau_drm {
40373 struct drm_global_reference mem_global_ref;
40374 struct ttm_bo_global_ref bo_global_ref;
40375 struct ttm_bo_device bdev;
40376- atomic_t validate_sequence;
40377 int (*move)(struct nouveau_channel *,
40378 struct ttm_buffer_object *,
40379 struct ttm_mem_reg *, struct ttm_mem_reg *);
40380diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40381index c1a7e5a..38b8539 100644
40382--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40383+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40384@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
40385 unsigned long arg)
40386 {
40387 unsigned int nr = DRM_IOCTL_NR(cmd);
40388- drm_ioctl_compat_t *fn = NULL;
40389+ drm_ioctl_compat_t fn = NULL;
40390 int ret;
40391
40392 if (nr < DRM_COMMAND_BASE)
40393diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40394index 19e3757..ad16478 100644
40395--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
40396+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40397@@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40398 }
40399
40400 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
40401- nouveau_vram_manager_init,
40402- nouveau_vram_manager_fini,
40403- nouveau_vram_manager_new,
40404- nouveau_vram_manager_del,
40405- nouveau_vram_manager_debug
40406+ .init = nouveau_vram_manager_init,
40407+ .takedown = nouveau_vram_manager_fini,
40408+ .get_node = nouveau_vram_manager_new,
40409+ .put_node = nouveau_vram_manager_del,
40410+ .debug = nouveau_vram_manager_debug
40411 };
40412
40413 static int
40414@@ -198,11 +198,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40415 }
40416
40417 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
40418- nouveau_gart_manager_init,
40419- nouveau_gart_manager_fini,
40420- nouveau_gart_manager_new,
40421- nouveau_gart_manager_del,
40422- nouveau_gart_manager_debug
40423+ .init = nouveau_gart_manager_init,
40424+ .takedown = nouveau_gart_manager_fini,
40425+ .get_node = nouveau_gart_manager_new,
40426+ .put_node = nouveau_gart_manager_del,
40427+ .debug = nouveau_gart_manager_debug
40428 };
40429
40430 #include <core/subdev/vm/nv04.h>
40431@@ -270,11 +270,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40432 }
40433
40434 const struct ttm_mem_type_manager_func nv04_gart_manager = {
40435- nv04_gart_manager_init,
40436- nv04_gart_manager_fini,
40437- nv04_gart_manager_new,
40438- nv04_gart_manager_del,
40439- nv04_gart_manager_debug
40440+ .init = nv04_gart_manager_init,
40441+ .takedown = nv04_gart_manager_fini,
40442+ .get_node = nv04_gart_manager_new,
40443+ .put_node = nv04_gart_manager_del,
40444+ .debug = nv04_gart_manager_debug
40445 };
40446
40447 int
40448diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
40449index 81638d7..2e45854 100644
40450--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
40451+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
40452@@ -65,7 +65,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
40453 bool can_switch;
40454
40455 spin_lock(&dev->count_lock);
40456- can_switch = (dev->open_count == 0);
40457+ can_switch = (local_read(&dev->open_count) == 0);
40458 spin_unlock(&dev->count_lock);
40459 return can_switch;
40460 }
40461diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
40462index eb89653..613cf71 100644
40463--- a/drivers/gpu/drm/qxl/qxl_cmd.c
40464+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
40465@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
40466 int ret;
40467
40468 mutex_lock(&qdev->async_io_mutex);
40469- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40470+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40471 if (qdev->last_sent_io_cmd > irq_num) {
40472 if (intr)
40473 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40474- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40475+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40476 else
40477 ret = wait_event_timeout(qdev->io_cmd_event,
40478- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40479+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40480 /* 0 is timeout, just bail the "hw" has gone away */
40481 if (ret <= 0)
40482 goto out;
40483- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40484+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40485 }
40486 outb(val, addr);
40487 qdev->last_sent_io_cmd = irq_num + 1;
40488 if (intr)
40489 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40490- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40491+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40492 else
40493 ret = wait_event_timeout(qdev->io_cmd_event,
40494- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40495+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40496 out:
40497 if (ret > 0)
40498 ret = 0;
40499diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
40500index c3c2bbd..bc3c0fb 100644
40501--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
40502+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
40503@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
40504 struct drm_info_node *node = (struct drm_info_node *) m->private;
40505 struct qxl_device *qdev = node->minor->dev->dev_private;
40506
40507- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
40508- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
40509- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
40510- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
40511+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
40512+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
40513+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
40514+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
40515 seq_printf(m, "%d\n", qdev->irq_received_error);
40516 return 0;
40517 }
40518diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
40519index 7bda32f..dd98fc5 100644
40520--- a/drivers/gpu/drm/qxl/qxl_drv.h
40521+++ b/drivers/gpu/drm/qxl/qxl_drv.h
40522@@ -290,10 +290,10 @@ struct qxl_device {
40523 unsigned int last_sent_io_cmd;
40524
40525 /* interrupt handling */
40526- atomic_t irq_received;
40527- atomic_t irq_received_display;
40528- atomic_t irq_received_cursor;
40529- atomic_t irq_received_io_cmd;
40530+ atomic_unchecked_t irq_received;
40531+ atomic_unchecked_t irq_received_display;
40532+ atomic_unchecked_t irq_received_cursor;
40533+ atomic_unchecked_t irq_received_io_cmd;
40534 unsigned irq_received_error;
40535 wait_queue_head_t display_event;
40536 wait_queue_head_t cursor_event;
40537diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
40538index 7b95c75..9cffb4f 100644
40539--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
40540+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
40541@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40542
40543 /* TODO copy slow path code from i915 */
40544 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
40545- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
40546+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
40547
40548 {
40549 struct qxl_drawable *draw = fb_cmd;
40550@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
40551 struct drm_qxl_reloc reloc;
40552
40553 if (DRM_COPY_FROM_USER(&reloc,
40554- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
40555+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
40556 sizeof(reloc))) {
40557 ret = -EFAULT;
40558 goto out_free_bos;
40559@@ -297,7 +297,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
40560 struct drm_qxl_command *commands =
40561 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
40562
40563- if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
40564+ if (DRM_COPY_FROM_USER(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
40565 sizeof(user_cmd)))
40566 return -EFAULT;
40567
40568diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
40569index 21393dc..329f3a9 100644
40570--- a/drivers/gpu/drm/qxl/qxl_irq.c
40571+++ b/drivers/gpu/drm/qxl/qxl_irq.c
40572@@ -33,19 +33,19 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
40573
40574 pending = xchg(&qdev->ram_header->int_pending, 0);
40575
40576- atomic_inc(&qdev->irq_received);
40577+ atomic_inc_unchecked(&qdev->irq_received);
40578
40579 if (pending & QXL_INTERRUPT_DISPLAY) {
40580- atomic_inc(&qdev->irq_received_display);
40581+ atomic_inc_unchecked(&qdev->irq_received_display);
40582 wake_up_all(&qdev->display_event);
40583 qxl_queue_garbage_collect(qdev, false);
40584 }
40585 if (pending & QXL_INTERRUPT_CURSOR) {
40586- atomic_inc(&qdev->irq_received_cursor);
40587+ atomic_inc_unchecked(&qdev->irq_received_cursor);
40588 wake_up_all(&qdev->cursor_event);
40589 }
40590 if (pending & QXL_INTERRUPT_IO_CMD) {
40591- atomic_inc(&qdev->irq_received_io_cmd);
40592+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
40593 wake_up_all(&qdev->io_cmd_event);
40594 }
40595 if (pending & QXL_INTERRUPT_ERROR) {
40596@@ -82,10 +82,10 @@ int qxl_irq_init(struct qxl_device *qdev)
40597 init_waitqueue_head(&qdev->io_cmd_event);
40598 INIT_WORK(&qdev->client_monitors_config_work,
40599 qxl_client_monitors_config_work_func);
40600- atomic_set(&qdev->irq_received, 0);
40601- atomic_set(&qdev->irq_received_display, 0);
40602- atomic_set(&qdev->irq_received_cursor, 0);
40603- atomic_set(&qdev->irq_received_io_cmd, 0);
40604+ atomic_set_unchecked(&qdev->irq_received, 0);
40605+ atomic_set_unchecked(&qdev->irq_received_display, 0);
40606+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
40607+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
40608 qdev->irq_received_error = 0;
40609 ret = drm_irq_install(qdev->ddev);
40610 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
40611diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
40612index c7e7e65..7dddd4d 100644
40613--- a/drivers/gpu/drm/qxl/qxl_ttm.c
40614+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
40615@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
40616 }
40617 }
40618
40619-static struct vm_operations_struct qxl_ttm_vm_ops;
40620+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
40621 static const struct vm_operations_struct *ttm_vm_ops;
40622
40623 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40624@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
40625 return r;
40626 if (unlikely(ttm_vm_ops == NULL)) {
40627 ttm_vm_ops = vma->vm_ops;
40628+ pax_open_kernel();
40629 qxl_ttm_vm_ops = *ttm_vm_ops;
40630 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
40631+ pax_close_kernel();
40632 }
40633 vma->vm_ops = &qxl_ttm_vm_ops;
40634 return 0;
40635@@ -560,25 +562,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
40636 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
40637 {
40638 #if defined(CONFIG_DEBUG_FS)
40639- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
40640- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
40641- unsigned i;
40642+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
40643+ {
40644+ .name = "qxl_mem_mm",
40645+ .show = &qxl_mm_dump_table,
40646+ },
40647+ {
40648+ .name = "qxl_surf_mm",
40649+ .show = &qxl_mm_dump_table,
40650+ }
40651+ };
40652
40653- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
40654- if (i == 0)
40655- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
40656- else
40657- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
40658- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
40659- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
40660- qxl_mem_types_list[i].driver_features = 0;
40661- if (i == 0)
40662- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
40663- else
40664- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
40665+ pax_open_kernel();
40666+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
40667+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
40668+ pax_close_kernel();
40669
40670- }
40671- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
40672+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
40673 #else
40674 return 0;
40675 #endif
40676diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
40677index c451257..0ad2134 100644
40678--- a/drivers/gpu/drm/r128/r128_cce.c
40679+++ b/drivers/gpu/drm/r128/r128_cce.c
40680@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
40681
40682 /* GH: Simple idle check.
40683 */
40684- atomic_set(&dev_priv->idle_count, 0);
40685+ atomic_set_unchecked(&dev_priv->idle_count, 0);
40686
40687 /* We don't support anything other than bus-mastering ring mode,
40688 * but the ring can be in either AGP or PCI space for the ring
40689diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
40690index 56eb5e3..c4ec43d 100644
40691--- a/drivers/gpu/drm/r128/r128_drv.h
40692+++ b/drivers/gpu/drm/r128/r128_drv.h
40693@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
40694 int is_pci;
40695 unsigned long cce_buffers_offset;
40696
40697- atomic_t idle_count;
40698+ atomic_unchecked_t idle_count;
40699
40700 int page_flipping;
40701 int current_page;
40702 u32 crtc_offset;
40703 u32 crtc_offset_cntl;
40704
40705- atomic_t vbl_received;
40706+ atomic_unchecked_t vbl_received;
40707
40708 u32 color_fmt;
40709 unsigned int front_offset;
40710diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
40711index a954c54..9cc595c 100644
40712--- a/drivers/gpu/drm/r128/r128_ioc32.c
40713+++ b/drivers/gpu/drm/r128/r128_ioc32.c
40714@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
40715 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
40716 }
40717
40718-drm_ioctl_compat_t *r128_compat_ioctls[] = {
40719+drm_ioctl_compat_t r128_compat_ioctls[] = {
40720 [DRM_R128_INIT] = compat_r128_init,
40721 [DRM_R128_DEPTH] = compat_r128_depth,
40722 [DRM_R128_STIPPLE] = compat_r128_stipple,
40723@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
40724 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40725 {
40726 unsigned int nr = DRM_IOCTL_NR(cmd);
40727- drm_ioctl_compat_t *fn = NULL;
40728 int ret;
40729
40730 if (nr < DRM_COMMAND_BASE)
40731 return drm_compat_ioctl(filp, cmd, arg);
40732
40733- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
40734- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
40735-
40736- if (fn != NULL)
40737+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
40738+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
40739 ret = (*fn) (filp, cmd, arg);
40740- else
40741+ } else
40742 ret = drm_ioctl(filp, cmd, arg);
40743
40744 return ret;
40745diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
40746index 2ea4f09..d391371 100644
40747--- a/drivers/gpu/drm/r128/r128_irq.c
40748+++ b/drivers/gpu/drm/r128/r128_irq.c
40749@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
40750 if (crtc != 0)
40751 return 0;
40752
40753- return atomic_read(&dev_priv->vbl_received);
40754+ return atomic_read_unchecked(&dev_priv->vbl_received);
40755 }
40756
40757 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
40758@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
40759 /* VBLANK interrupt */
40760 if (status & R128_CRTC_VBLANK_INT) {
40761 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
40762- atomic_inc(&dev_priv->vbl_received);
40763+ atomic_inc_unchecked(&dev_priv->vbl_received);
40764 drm_handle_vblank(dev, 0);
40765 return IRQ_HANDLED;
40766 }
40767diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
40768index 01dd9ae..6352f04 100644
40769--- a/drivers/gpu/drm/r128/r128_state.c
40770+++ b/drivers/gpu/drm/r128/r128_state.c
40771@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
40772
40773 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
40774 {
40775- if (atomic_read(&dev_priv->idle_count) == 0)
40776+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
40777 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
40778 else
40779- atomic_set(&dev_priv->idle_count, 0);
40780+ atomic_set_unchecked(&dev_priv->idle_count, 0);
40781 }
40782
40783 #endif
40784diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
40785index af85299..ed9ac8d 100644
40786--- a/drivers/gpu/drm/radeon/mkregtable.c
40787+++ b/drivers/gpu/drm/radeon/mkregtable.c
40788@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
40789 regex_t mask_rex;
40790 regmatch_t match[4];
40791 char buf[1024];
40792- size_t end;
40793+ long end;
40794 int len;
40795 int done = 0;
40796 int r;
40797 unsigned o;
40798 struct offset *offset;
40799 char last_reg_s[10];
40800- int last_reg;
40801+ unsigned long last_reg;
40802
40803 if (regcomp
40804 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
40805diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
40806index 39b033b..6efc056 100644
40807--- a/drivers/gpu/drm/radeon/radeon_device.c
40808+++ b/drivers/gpu/drm/radeon/radeon_device.c
40809@@ -1120,7 +1120,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
40810 bool can_switch;
40811
40812 spin_lock(&dev->count_lock);
40813- can_switch = (dev->open_count == 0);
40814+ can_switch = (local_read(&dev->open_count) == 0);
40815 spin_unlock(&dev->count_lock);
40816 return can_switch;
40817 }
40818diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
40819index 00e0d44..08381a4 100644
40820--- a/drivers/gpu/drm/radeon/radeon_drv.h
40821+++ b/drivers/gpu/drm/radeon/radeon_drv.h
40822@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
40823
40824 /* SW interrupt */
40825 wait_queue_head_t swi_queue;
40826- atomic_t swi_emitted;
40827+ atomic_unchecked_t swi_emitted;
40828 int vblank_crtc;
40829 uint32_t irq_enable_reg;
40830 uint32_t r500_disp_irq_reg;
40831diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
40832index bdb0f93..5ff558f 100644
40833--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
40834+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
40835@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
40836 request = compat_alloc_user_space(sizeof(*request));
40837 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
40838 || __put_user(req32.param, &request->param)
40839- || __put_user((void __user *)(unsigned long)req32.value,
40840+ || __put_user((unsigned long)req32.value,
40841 &request->value))
40842 return -EFAULT;
40843
40844@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
40845 #define compat_radeon_cp_setparam NULL
40846 #endif /* X86_64 || IA64 */
40847
40848-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
40849+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
40850 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
40851 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
40852 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
40853@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
40854 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40855 {
40856 unsigned int nr = DRM_IOCTL_NR(cmd);
40857- drm_ioctl_compat_t *fn = NULL;
40858 int ret;
40859
40860 if (nr < DRM_COMMAND_BASE)
40861 return drm_compat_ioctl(filp, cmd, arg);
40862
40863- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
40864- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
40865-
40866- if (fn != NULL)
40867+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
40868+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
40869 ret = (*fn) (filp, cmd, arg);
40870- else
40871+ } else
40872 ret = drm_ioctl(filp, cmd, arg);
40873
40874 return ret;
40875diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
40876index 8d68e97..9dcfed8 100644
40877--- a/drivers/gpu/drm/radeon/radeon_irq.c
40878+++ b/drivers/gpu/drm/radeon/radeon_irq.c
40879@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
40880 unsigned int ret;
40881 RING_LOCALS;
40882
40883- atomic_inc(&dev_priv->swi_emitted);
40884- ret = atomic_read(&dev_priv->swi_emitted);
40885+ atomic_inc_unchecked(&dev_priv->swi_emitted);
40886+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
40887
40888 BEGIN_RING(4);
40889 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
40890@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
40891 drm_radeon_private_t *dev_priv =
40892 (drm_radeon_private_t *) dev->dev_private;
40893
40894- atomic_set(&dev_priv->swi_emitted, 0);
40895+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
40896 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
40897
40898 dev->max_vblank_count = 0x001fffff;
40899diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
40900index 4d20910..6726b6d 100644
40901--- a/drivers/gpu/drm/radeon/radeon_state.c
40902+++ b/drivers/gpu/drm/radeon/radeon_state.c
40903@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
40904 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
40905 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
40906
40907- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
40908+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
40909 sarea_priv->nbox * sizeof(depth_boxes[0])))
40910 return -EFAULT;
40911
40912@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
40913 {
40914 drm_radeon_private_t *dev_priv = dev->dev_private;
40915 drm_radeon_getparam_t *param = data;
40916- int value;
40917+ int value = 0;
40918
40919 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
40920
40921diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
40922index 84323c9..cf07baf 100644
40923--- a/drivers/gpu/drm/radeon/radeon_ttm.c
40924+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
40925@@ -787,7 +787,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
40926 man->size = size >> PAGE_SHIFT;
40927 }
40928
40929-static struct vm_operations_struct radeon_ttm_vm_ops;
40930+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
40931 static const struct vm_operations_struct *ttm_vm_ops = NULL;
40932
40933 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
40934@@ -828,8 +828,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
40935 }
40936 if (unlikely(ttm_vm_ops == NULL)) {
40937 ttm_vm_ops = vma->vm_ops;
40938+ pax_open_kernel();
40939 radeon_ttm_vm_ops = *ttm_vm_ops;
40940 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
40941+ pax_close_kernel();
40942 }
40943 vma->vm_ops = &radeon_ttm_vm_ops;
40944 return 0;
40945@@ -858,38 +860,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
40946 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
40947 {
40948 #if defined(CONFIG_DEBUG_FS)
40949- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
40950- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
40951- unsigned i;
40952+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
40953+ {
40954+ .name = "radeon_vram_mm",
40955+ .show = &radeon_mm_dump_table,
40956+ },
40957+ {
40958+ .name = "radeon_gtt_mm",
40959+ .show = &radeon_mm_dump_table,
40960+ },
40961+ {
40962+ .name = "ttm_page_pool",
40963+ .show = &ttm_page_alloc_debugfs,
40964+ },
40965+ {
40966+ .name = "ttm_dma_page_pool",
40967+ .show = &ttm_dma_page_alloc_debugfs,
40968+ },
40969+ };
40970+ unsigned i = RADEON_DEBUGFS_MEM_TYPES + 1;
40971
40972- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
40973- if (i == 0)
40974- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
40975- else
40976- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
40977- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
40978- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
40979- radeon_mem_types_list[i].driver_features = 0;
40980- if (i == 0)
40981- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
40982- else
40983- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
40984-
40985- }
40986- /* Add ttm page pool to debugfs */
40987- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
40988- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
40989- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
40990- radeon_mem_types_list[i].driver_features = 0;
40991- radeon_mem_types_list[i++].data = NULL;
40992+ pax_open_kernel();
40993+ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
40994+ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
40995+ pax_close_kernel();
40996 #ifdef CONFIG_SWIOTLB
40997- if (swiotlb_nr_tbl()) {
40998- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
40999- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
41000- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
41001- radeon_mem_types_list[i].driver_features = 0;
41002- radeon_mem_types_list[i++].data = NULL;
41003- }
41004+ if (swiotlb_nr_tbl())
41005+ i++;
41006 #endif
41007 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
41008
41009diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41010index ae1cb31..5b5b6b7c 100644
41011--- a/drivers/gpu/drm/tegra/dc.c
41012+++ b/drivers/gpu/drm/tegra/dc.c
41013@@ -1064,7 +1064,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41014 }
41015
41016 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41017- dc->debugfs_files[i].data = dc;
41018+ *(void **)&dc->debugfs_files[i].data = dc;
41019
41020 err = drm_debugfs_create_files(dc->debugfs_files,
41021 ARRAY_SIZE(debugfs_files),
41022diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41023index 0cd9bc2..9759be4 100644
41024--- a/drivers/gpu/drm/tegra/hdmi.c
41025+++ b/drivers/gpu/drm/tegra/hdmi.c
41026@@ -57,7 +57,7 @@ struct tegra_hdmi {
41027 bool stereo;
41028 bool dvi;
41029
41030- struct drm_info_list *debugfs_files;
41031+ drm_info_list_no_const *debugfs_files;
41032 struct drm_minor *minor;
41033 struct dentry *debugfs;
41034 };
41035diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41036index c58eba33..83c2728 100644
41037--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41038+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41039@@ -141,10 +141,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41040 }
41041
41042 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41043- ttm_bo_man_init,
41044- ttm_bo_man_takedown,
41045- ttm_bo_man_get_node,
41046- ttm_bo_man_put_node,
41047- ttm_bo_man_debug
41048+ .init = ttm_bo_man_init,
41049+ .takedown = ttm_bo_man_takedown,
41050+ .get_node = ttm_bo_man_get_node,
41051+ .put_node = ttm_bo_man_put_node,
41052+ .debug = ttm_bo_man_debug
41053 };
41054 EXPORT_SYMBOL(ttm_bo_manager_func);
41055diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41056index dbc2def..0a9f710 100644
41057--- a/drivers/gpu/drm/ttm/ttm_memory.c
41058+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41059@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41060 zone->glob = glob;
41061 glob->zone_kernel = zone;
41062 ret = kobject_init_and_add(
41063- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41064+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41065 if (unlikely(ret != 0)) {
41066 kobject_put(&zone->kobj);
41067 return ret;
41068@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41069 zone->glob = glob;
41070 glob->zone_dma32 = zone;
41071 ret = kobject_init_and_add(
41072- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41073+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41074 if (unlikely(ret != 0)) {
41075 kobject_put(&zone->kobj);
41076 return ret;
41077diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41078index 863bef9..cba15cf 100644
41079--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41080+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41081@@ -391,9 +391,9 @@ out:
41082 static unsigned long
41083 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41084 {
41085- static atomic_t start_pool = ATOMIC_INIT(0);
41086+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
41087 unsigned i;
41088- unsigned pool_offset = atomic_add_return(1, &start_pool);
41089+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
41090 struct ttm_page_pool *pool;
41091 int shrink_pages = sc->nr_to_scan;
41092 unsigned long freed = 0;
41093diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41094index 97e9d61..bf23c461 100644
41095--- a/drivers/gpu/drm/udl/udl_fb.c
41096+++ b/drivers/gpu/drm/udl/udl_fb.c
41097@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41098 fb_deferred_io_cleanup(info);
41099 kfree(info->fbdefio);
41100 info->fbdefio = NULL;
41101- info->fbops->fb_mmap = udl_fb_mmap;
41102 }
41103
41104 pr_warn("released /dev/fb%d user=%d count=%d\n",
41105diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41106index a811ef2..ff99b05 100644
41107--- a/drivers/gpu/drm/via/via_drv.h
41108+++ b/drivers/gpu/drm/via/via_drv.h
41109@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
41110 typedef uint32_t maskarray_t[5];
41111
41112 typedef struct drm_via_irq {
41113- atomic_t irq_received;
41114+ atomic_unchecked_t irq_received;
41115 uint32_t pending_mask;
41116 uint32_t enable_mask;
41117 wait_queue_head_t irq_queue;
41118@@ -75,7 +75,7 @@ typedef struct drm_via_private {
41119 struct timeval last_vblank;
41120 int last_vblank_valid;
41121 unsigned usec_per_vblank;
41122- atomic_t vbl_received;
41123+ atomic_unchecked_t vbl_received;
41124 drm_via_state_t hc_state;
41125 char pci_buf[VIA_PCI_BUF_SIZE];
41126 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41127diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41128index ac98964..5dbf512 100644
41129--- a/drivers/gpu/drm/via/via_irq.c
41130+++ b/drivers/gpu/drm/via/via_irq.c
41131@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41132 if (crtc != 0)
41133 return 0;
41134
41135- return atomic_read(&dev_priv->vbl_received);
41136+ return atomic_read_unchecked(&dev_priv->vbl_received);
41137 }
41138
41139 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
41140@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
41141
41142 status = VIA_READ(VIA_REG_INTERRUPT);
41143 if (status & VIA_IRQ_VBLANK_PENDING) {
41144- atomic_inc(&dev_priv->vbl_received);
41145- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41146+ atomic_inc_unchecked(&dev_priv->vbl_received);
41147+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41148 do_gettimeofday(&cur_vblank);
41149 if (dev_priv->last_vblank_valid) {
41150 dev_priv->usec_per_vblank =
41151@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
41152 dev_priv->last_vblank = cur_vblank;
41153 dev_priv->last_vblank_valid = 1;
41154 }
41155- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41156+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41157 DRM_DEBUG("US per vblank is: %u\n",
41158 dev_priv->usec_per_vblank);
41159 }
41160@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
41161
41162 for (i = 0; i < dev_priv->num_irqs; ++i) {
41163 if (status & cur_irq->pending_mask) {
41164- atomic_inc(&cur_irq->irq_received);
41165+ atomic_inc_unchecked(&cur_irq->irq_received);
41166 DRM_WAKEUP(&cur_irq->irq_queue);
41167 handled = 1;
41168 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41169@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41170 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
41171 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41172 masks[irq][4]));
41173- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41174+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41175 } else {
41176 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
41177 (((cur_irq_sequence =
41178- atomic_read(&cur_irq->irq_received)) -
41179+ atomic_read_unchecked(&cur_irq->irq_received)) -
41180 *sequence) <= (1 << 23)));
41181 }
41182 *sequence = cur_irq_sequence;
41183@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41184 }
41185
41186 for (i = 0; i < dev_priv->num_irqs; ++i) {
41187- atomic_set(&cur_irq->irq_received, 0);
41188+ atomic_set_unchecked(&cur_irq->irq_received, 0);
41189 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41190 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41191 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
41192@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41193 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41194 case VIA_IRQ_RELATIVE:
41195 irqwait->request.sequence +=
41196- atomic_read(&cur_irq->irq_received);
41197+ atomic_read_unchecked(&cur_irq->irq_received);
41198 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41199 case VIA_IRQ_ABSOLUTE:
41200 break;
41201diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41202index c0b73b9..f6f7f34 100644
41203--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41204+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41205@@ -341,7 +341,7 @@ struct vmw_private {
41206 * Fencing and IRQs.
41207 */
41208
41209- atomic_t marker_seq;
41210+ atomic_unchecked_t marker_seq;
41211 wait_queue_head_t fence_queue;
41212 wait_queue_head_t fifo_queue;
41213 int fence_queue_waiters; /* Protected by hw_mutex */
41214diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41215index 3eb1486..0a47ee9 100644
41216--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41217+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41218@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41219 (unsigned int) min,
41220 (unsigned int) fifo->capabilities);
41221
41222- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41223+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41224 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41225 vmw_marker_queue_init(&fifo->marker_queue);
41226 return vmw_fifo_send_fence(dev_priv, &dummy);
41227@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41228 if (reserveable)
41229 iowrite32(bytes, fifo_mem +
41230 SVGA_FIFO_RESERVED);
41231- return fifo_mem + (next_cmd >> 2);
41232+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41233 } else {
41234 need_bounce = true;
41235 }
41236@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41237
41238 fm = vmw_fifo_reserve(dev_priv, bytes);
41239 if (unlikely(fm == NULL)) {
41240- *seqno = atomic_read(&dev_priv->marker_seq);
41241+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41242 ret = -ENOMEM;
41243 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41244 false, 3*HZ);
41245@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41246 }
41247
41248 do {
41249- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41250+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41251 } while (*seqno == 0);
41252
41253 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41254diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41255index c5c054a..46f0548 100644
41256--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41257+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41258@@ -153,9 +153,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41259 }
41260
41261 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41262- vmw_gmrid_man_init,
41263- vmw_gmrid_man_takedown,
41264- vmw_gmrid_man_get_node,
41265- vmw_gmrid_man_put_node,
41266- vmw_gmrid_man_debug
41267+ .init = vmw_gmrid_man_init,
41268+ .takedown = vmw_gmrid_man_takedown,
41269+ .get_node = vmw_gmrid_man_get_node,
41270+ .put_node = vmw_gmrid_man_put_node,
41271+ .debug = vmw_gmrid_man_debug
41272 };
41273diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41274index 45d5b5a..f3f5e4e 100644
41275--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41276+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41277@@ -141,7 +141,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41278 int ret;
41279
41280 num_clips = arg->num_clips;
41281- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41282+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41283
41284 if (unlikely(num_clips == 0))
41285 return 0;
41286@@ -225,7 +225,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41287 int ret;
41288
41289 num_clips = arg->num_clips;
41290- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41291+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41292
41293 if (unlikely(num_clips == 0))
41294 return 0;
41295diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41296index 4640adb..e1384ed 100644
41297--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41298+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41299@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
41300 * emitted. Then the fence is stale and signaled.
41301 */
41302
41303- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
41304+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
41305 > VMW_FENCE_WRAP);
41306
41307 return ret;
41308@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
41309
41310 if (fifo_idle)
41311 down_read(&fifo_state->rwsem);
41312- signal_seq = atomic_read(&dev_priv->marker_seq);
41313+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
41314 ret = 0;
41315
41316 for (;;) {
41317diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41318index 8a8725c2..afed796 100644
41319--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41320+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41321@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
41322 while (!vmw_lag_lt(queue, us)) {
41323 spin_lock(&queue->lock);
41324 if (list_empty(&queue->head))
41325- seqno = atomic_read(&dev_priv->marker_seq);
41326+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41327 else {
41328 marker = list_first_entry(&queue->head,
41329 struct vmw_marker, head);
41330diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
41331index ec0ae2d..dc0780b 100644
41332--- a/drivers/gpu/vga/vga_switcheroo.c
41333+++ b/drivers/gpu/vga/vga_switcheroo.c
41334@@ -643,7 +643,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
41335
41336 /* this version is for the case where the power switch is separate
41337 to the device being powered down. */
41338-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
41339+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
41340 {
41341 /* copy over all the bus versions */
41342 if (dev->bus && dev->bus->pm) {
41343@@ -688,7 +688,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
41344 return ret;
41345 }
41346
41347-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
41348+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
41349 {
41350 /* copy over all the bus versions */
41351 if (dev->bus && dev->bus->pm) {
41352diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
41353index 253fe23..0dfec5f 100644
41354--- a/drivers/hid/hid-core.c
41355+++ b/drivers/hid/hid-core.c
41356@@ -2416,7 +2416,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
41357
41358 int hid_add_device(struct hid_device *hdev)
41359 {
41360- static atomic_t id = ATOMIC_INIT(0);
41361+ static atomic_unchecked_t id = ATOMIC_INIT(0);
41362 int ret;
41363
41364 if (WARN_ON(hdev->status & HID_STAT_ADDED))
41365@@ -2450,7 +2450,7 @@ int hid_add_device(struct hid_device *hdev)
41366 /* XXX hack, any other cleaner solution after the driver core
41367 * is converted to allow more than 20 bytes as the device name? */
41368 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
41369- hdev->vendor, hdev->product, atomic_inc_return(&id));
41370+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
41371
41372 hid_debug_register(hdev, dev_name(&hdev->dev));
41373 ret = device_add(&hdev->dev);
41374diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
41375index c13fb5b..55a3802 100644
41376--- a/drivers/hid/hid-wiimote-debug.c
41377+++ b/drivers/hid/hid-wiimote-debug.c
41378@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
41379 else if (size == 0)
41380 return -EIO;
41381
41382- if (copy_to_user(u, buf, size))
41383+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
41384 return -EFAULT;
41385
41386 *off += size;
41387diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
41388index cedc6da..2c3da2a 100644
41389--- a/drivers/hid/uhid.c
41390+++ b/drivers/hid/uhid.c
41391@@ -47,7 +47,7 @@ struct uhid_device {
41392 struct mutex report_lock;
41393 wait_queue_head_t report_wait;
41394 atomic_t report_done;
41395- atomic_t report_id;
41396+ atomic_unchecked_t report_id;
41397 struct uhid_event report_buf;
41398 };
41399
41400@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
41401
41402 spin_lock_irqsave(&uhid->qlock, flags);
41403 ev->type = UHID_FEATURE;
41404- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
41405+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
41406 ev->u.feature.rnum = rnum;
41407 ev->u.feature.rtype = report_type;
41408
41409@@ -446,7 +446,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
41410 spin_lock_irqsave(&uhid->qlock, flags);
41411
41412 /* id for old report; drop it silently */
41413- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
41414+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
41415 goto unlock;
41416 if (atomic_read(&uhid->report_done))
41417 goto unlock;
41418diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
41419index cea623c..73011b0 100644
41420--- a/drivers/hv/channel.c
41421+++ b/drivers/hv/channel.c
41422@@ -362,8 +362,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
41423 int ret = 0;
41424 int t;
41425
41426- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
41427- atomic_inc(&vmbus_connection.next_gpadl_handle);
41428+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
41429+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
41430
41431 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
41432 if (ret)
41433diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
41434index f0c5e07..399256e 100644
41435--- a/drivers/hv/hv.c
41436+++ b/drivers/hv/hv.c
41437@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
41438 u64 output_address = (output) ? virt_to_phys(output) : 0;
41439 u32 output_address_hi = output_address >> 32;
41440 u32 output_address_lo = output_address & 0xFFFFFFFF;
41441- void *hypercall_page = hv_context.hypercall_page;
41442+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
41443
41444 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
41445 "=a"(hv_status_lo) : "d" (control_hi),
41446diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
41447index 7e17a54..a50a33d 100644
41448--- a/drivers/hv/hv_balloon.c
41449+++ b/drivers/hv/hv_balloon.c
41450@@ -464,7 +464,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
41451
41452 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
41453 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
41454-static atomic_t trans_id = ATOMIC_INIT(0);
41455+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
41456
41457 static int dm_ring_size = (5 * PAGE_SIZE);
41458
41459@@ -886,7 +886,7 @@ static void hot_add_req(struct work_struct *dummy)
41460 pr_info("Memory hot add failed\n");
41461
41462 dm->state = DM_INITIALIZED;
41463- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41464+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41465 vmbus_sendpacket(dm->dev->channel, &resp,
41466 sizeof(struct dm_hot_add_response),
41467 (unsigned long)NULL,
41468@@ -960,7 +960,7 @@ static void post_status(struct hv_dynmem_device *dm)
41469 memset(&status, 0, sizeof(struct dm_status));
41470 status.hdr.type = DM_STATUS_REPORT;
41471 status.hdr.size = sizeof(struct dm_status);
41472- status.hdr.trans_id = atomic_inc_return(&trans_id);
41473+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41474
41475 /*
41476 * The host expects the guest to report free memory.
41477@@ -980,7 +980,7 @@ static void post_status(struct hv_dynmem_device *dm)
41478 * send the status. This can happen if we were interrupted
41479 * after we picked our transaction ID.
41480 */
41481- if (status.hdr.trans_id != atomic_read(&trans_id))
41482+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
41483 return;
41484
41485 vmbus_sendpacket(dm->dev->channel, &status,
41486@@ -1108,7 +1108,7 @@ static void balloon_up(struct work_struct *dummy)
41487 */
41488
41489 do {
41490- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
41491+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41492 ret = vmbus_sendpacket(dm_device.dev->channel,
41493 bl_resp,
41494 bl_resp->hdr.size,
41495@@ -1152,7 +1152,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
41496
41497 memset(&resp, 0, sizeof(struct dm_unballoon_response));
41498 resp.hdr.type = DM_UNBALLOON_RESPONSE;
41499- resp.hdr.trans_id = atomic_inc_return(&trans_id);
41500+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41501 resp.hdr.size = sizeof(struct dm_unballoon_response);
41502
41503 vmbus_sendpacket(dm_device.dev->channel, &resp,
41504@@ -1215,7 +1215,7 @@ static void version_resp(struct hv_dynmem_device *dm,
41505 memset(&version_req, 0, sizeof(struct dm_version_request));
41506 version_req.hdr.type = DM_VERSION_REQUEST;
41507 version_req.hdr.size = sizeof(struct dm_version_request);
41508- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41509+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41510 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
41511 version_req.is_last_attempt = 1;
41512
41513@@ -1385,7 +1385,7 @@ static int balloon_probe(struct hv_device *dev,
41514 memset(&version_req, 0, sizeof(struct dm_version_request));
41515 version_req.hdr.type = DM_VERSION_REQUEST;
41516 version_req.hdr.size = sizeof(struct dm_version_request);
41517- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
41518+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41519 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
41520 version_req.is_last_attempt = 0;
41521
41522@@ -1416,7 +1416,7 @@ static int balloon_probe(struct hv_device *dev,
41523 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
41524 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
41525 cap_msg.hdr.size = sizeof(struct dm_capabilities);
41526- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
41527+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
41528
41529 cap_msg.caps.cap_bits.balloon = 1;
41530 cap_msg.caps.cap_bits.hot_add = 1;
41531diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
41532index e055176..c22ff1f 100644
41533--- a/drivers/hv/hyperv_vmbus.h
41534+++ b/drivers/hv/hyperv_vmbus.h
41535@@ -602,7 +602,7 @@ enum vmbus_connect_state {
41536 struct vmbus_connection {
41537 enum vmbus_connect_state conn_state;
41538
41539- atomic_t next_gpadl_handle;
41540+ atomic_unchecked_t next_gpadl_handle;
41541
41542 /*
41543 * Represents channel interrupts. Each bit position represents a
41544diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
41545index 48aad4f..c768fb2 100644
41546--- a/drivers/hv/vmbus_drv.c
41547+++ b/drivers/hv/vmbus_drv.c
41548@@ -846,10 +846,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
41549 {
41550 int ret = 0;
41551
41552- static atomic_t device_num = ATOMIC_INIT(0);
41553+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41554
41555 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
41556- atomic_inc_return(&device_num));
41557+ atomic_inc_return_unchecked(&device_num));
41558
41559 child_device_obj->device.bus = &hv_bus;
41560 child_device_obj->device.parent = &hv_acpi_dev->dev;
41561diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
41562index 6a34f7f..aa4c3a6 100644
41563--- a/drivers/hwmon/acpi_power_meter.c
41564+++ b/drivers/hwmon/acpi_power_meter.c
41565@@ -117,7 +117,7 @@ struct sensor_template {
41566 struct device_attribute *devattr,
41567 const char *buf, size_t count);
41568 int index;
41569-};
41570+} __do_const;
41571
41572 /* Averaging interval */
41573 static int update_avg_interval(struct acpi_power_meter_resource *resource)
41574@@ -632,7 +632,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
41575 struct sensor_template *attrs)
41576 {
41577 struct device *dev = &resource->acpi_dev->dev;
41578- struct sensor_device_attribute *sensors =
41579+ sensor_device_attribute_no_const *sensors =
41580 &resource->sensors[resource->num_sensors];
41581 int res = 0;
41582
41583diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
41584index 3288f13..71cfb4e 100644
41585--- a/drivers/hwmon/applesmc.c
41586+++ b/drivers/hwmon/applesmc.c
41587@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
41588 {
41589 struct applesmc_node_group *grp;
41590 struct applesmc_dev_attr *node;
41591- struct attribute *attr;
41592+ attribute_no_const *attr;
41593 int ret, i;
41594
41595 for (grp = groups; grp->format; grp++) {
41596diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
41597index dafc63c..4abb96c 100644
41598--- a/drivers/hwmon/asus_atk0110.c
41599+++ b/drivers/hwmon/asus_atk0110.c
41600@@ -151,10 +151,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
41601 struct atk_sensor_data {
41602 struct list_head list;
41603 struct atk_data *data;
41604- struct device_attribute label_attr;
41605- struct device_attribute input_attr;
41606- struct device_attribute limit1_attr;
41607- struct device_attribute limit2_attr;
41608+ device_attribute_no_const label_attr;
41609+ device_attribute_no_const input_attr;
41610+ device_attribute_no_const limit1_attr;
41611+ device_attribute_no_const limit2_attr;
41612 char label_attr_name[ATTR_NAME_SIZE];
41613 char input_attr_name[ATTR_NAME_SIZE];
41614 char limit1_attr_name[ATTR_NAME_SIZE];
41615@@ -274,7 +274,7 @@ static ssize_t atk_name_show(struct device *dev,
41616 static struct device_attribute atk_name_attr =
41617 __ATTR(name, 0444, atk_name_show, NULL);
41618
41619-static void atk_init_attribute(struct device_attribute *attr, char *name,
41620+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
41621 sysfs_show_func show)
41622 {
41623 sysfs_attr_init(&attr->attr);
41624diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
41625index 9425098..7646cc5 100644
41626--- a/drivers/hwmon/coretemp.c
41627+++ b/drivers/hwmon/coretemp.c
41628@@ -797,7 +797,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
41629 return NOTIFY_OK;
41630 }
41631
41632-static struct notifier_block coretemp_cpu_notifier __refdata = {
41633+static struct notifier_block coretemp_cpu_notifier = {
41634 .notifier_call = coretemp_cpu_callback,
41635 };
41636
41637diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
41638index 632f1dc..57e6a58 100644
41639--- a/drivers/hwmon/ibmaem.c
41640+++ b/drivers/hwmon/ibmaem.c
41641@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
41642 struct aem_rw_sensor_template *rw)
41643 {
41644 struct device *dev = &data->pdev->dev;
41645- struct sensor_device_attribute *sensors = data->sensors;
41646+ sensor_device_attribute_no_const *sensors = data->sensors;
41647 int err;
41648
41649 /* Set up read-only sensors */
41650diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
41651index 708081b..fe2d4ab 100644
41652--- a/drivers/hwmon/iio_hwmon.c
41653+++ b/drivers/hwmon/iio_hwmon.c
41654@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
41655 {
41656 struct device *dev = &pdev->dev;
41657 struct iio_hwmon_state *st;
41658- struct sensor_device_attribute *a;
41659+ sensor_device_attribute_no_const *a;
41660 int ret, i;
41661 int in_i = 1, temp_i = 1, curr_i = 1;
41662 enum iio_chan_type type;
41663diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
41664index cf811c1..4c17110 100644
41665--- a/drivers/hwmon/nct6775.c
41666+++ b/drivers/hwmon/nct6775.c
41667@@ -944,10 +944,10 @@ static struct attribute_group *
41668 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
41669 int repeat)
41670 {
41671- struct attribute_group *group;
41672+ attribute_group_no_const *group;
41673 struct sensor_device_attr_u *su;
41674- struct sensor_device_attribute *a;
41675- struct sensor_device_attribute_2 *a2;
41676+ sensor_device_attribute_no_const *a;
41677+ sensor_device_attribute_2_no_const *a2;
41678 struct attribute **attrs;
41679 struct sensor_device_template **t;
41680 int i, count;
41681diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
41682index 3cbf66e..8c5cc2a 100644
41683--- a/drivers/hwmon/pmbus/pmbus_core.c
41684+++ b/drivers/hwmon/pmbus/pmbus_core.c
41685@@ -782,7 +782,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
41686 return 0;
41687 }
41688
41689-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
41690+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
41691 const char *name,
41692 umode_t mode,
41693 ssize_t (*show)(struct device *dev,
41694@@ -799,7 +799,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
41695 dev_attr->store = store;
41696 }
41697
41698-static void pmbus_attr_init(struct sensor_device_attribute *a,
41699+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
41700 const char *name,
41701 umode_t mode,
41702 ssize_t (*show)(struct device *dev,
41703@@ -821,7 +821,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
41704 u16 reg, u8 mask)
41705 {
41706 struct pmbus_boolean *boolean;
41707- struct sensor_device_attribute *a;
41708+ sensor_device_attribute_no_const *a;
41709
41710 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
41711 if (!boolean)
41712@@ -846,7 +846,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
41713 bool update, bool readonly)
41714 {
41715 struct pmbus_sensor *sensor;
41716- struct device_attribute *a;
41717+ device_attribute_no_const *a;
41718
41719 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
41720 if (!sensor)
41721@@ -877,7 +877,7 @@ static int pmbus_add_label(struct pmbus_data *data,
41722 const char *lstring, int index)
41723 {
41724 struct pmbus_label *label;
41725- struct device_attribute *a;
41726+ device_attribute_no_const *a;
41727
41728 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
41729 if (!label)
41730diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
41731index 97cd45a..ac54d8b 100644
41732--- a/drivers/hwmon/sht15.c
41733+++ b/drivers/hwmon/sht15.c
41734@@ -169,7 +169,7 @@ struct sht15_data {
41735 int supply_uv;
41736 bool supply_uv_valid;
41737 struct work_struct update_supply_work;
41738- atomic_t interrupt_handled;
41739+ atomic_unchecked_t interrupt_handled;
41740 };
41741
41742 /**
41743@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
41744 ret = gpio_direction_input(data->pdata->gpio_data);
41745 if (ret)
41746 return ret;
41747- atomic_set(&data->interrupt_handled, 0);
41748+ atomic_set_unchecked(&data->interrupt_handled, 0);
41749
41750 enable_irq(gpio_to_irq(data->pdata->gpio_data));
41751 if (gpio_get_value(data->pdata->gpio_data) == 0) {
41752 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
41753 /* Only relevant if the interrupt hasn't occurred. */
41754- if (!atomic_read(&data->interrupt_handled))
41755+ if (!atomic_read_unchecked(&data->interrupt_handled))
41756 schedule_work(&data->read_work);
41757 }
41758 ret = wait_event_timeout(data->wait_queue,
41759@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
41760
41761 /* First disable the interrupt */
41762 disable_irq_nosync(irq);
41763- atomic_inc(&data->interrupt_handled);
41764+ atomic_inc_unchecked(&data->interrupt_handled);
41765 /* Then schedule a reading work struct */
41766 if (data->state != SHT15_READING_NOTHING)
41767 schedule_work(&data->read_work);
41768@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
41769 * If not, then start the interrupt again - care here as could
41770 * have gone low in meantime so verify it hasn't!
41771 */
41772- atomic_set(&data->interrupt_handled, 0);
41773+ atomic_set_unchecked(&data->interrupt_handled, 0);
41774 enable_irq(gpio_to_irq(data->pdata->gpio_data));
41775 /* If still not occurred or another handler was scheduled */
41776 if (gpio_get_value(data->pdata->gpio_data)
41777- || atomic_read(&data->interrupt_handled))
41778+ || atomic_read_unchecked(&data->interrupt_handled))
41779 return;
41780 }
41781
41782diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
41783index 38944e9..ae9e5ed 100644
41784--- a/drivers/hwmon/via-cputemp.c
41785+++ b/drivers/hwmon/via-cputemp.c
41786@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
41787 return NOTIFY_OK;
41788 }
41789
41790-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
41791+static struct notifier_block via_cputemp_cpu_notifier = {
41792 .notifier_call = via_cputemp_cpu_callback,
41793 };
41794
41795diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
41796index 07f01ac..d79ad3d 100644
41797--- a/drivers/i2c/busses/i2c-amd756-s4882.c
41798+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
41799@@ -43,7 +43,7 @@
41800 extern struct i2c_adapter amd756_smbus;
41801
41802 static struct i2c_adapter *s4882_adapter;
41803-static struct i2c_algorithm *s4882_algo;
41804+static i2c_algorithm_no_const *s4882_algo;
41805
41806 /* Wrapper access functions for multiplexed SMBus */
41807 static DEFINE_MUTEX(amd756_lock);
41808diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
41809index 721f7eb..0fd2a09 100644
41810--- a/drivers/i2c/busses/i2c-diolan-u2c.c
41811+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
41812@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
41813 /* usb layer */
41814
41815 /* Send command to device, and get response. */
41816-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
41817+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
41818 {
41819 int ret = 0;
41820 int actual;
41821diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
41822index 2ca268d..c6acbdf 100644
41823--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
41824+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
41825@@ -41,7 +41,7 @@
41826 extern struct i2c_adapter *nforce2_smbus;
41827
41828 static struct i2c_adapter *s4985_adapter;
41829-static struct i2c_algorithm *s4985_algo;
41830+static i2c_algorithm_no_const *s4985_algo;
41831
41832 /* Wrapper access functions for multiplexed SMBus */
41833 static DEFINE_MUTEX(nforce2_lock);
41834diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
41835index 80b47e8..1a6040d9 100644
41836--- a/drivers/i2c/i2c-dev.c
41837+++ b/drivers/i2c/i2c-dev.c
41838@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
41839 break;
41840 }
41841
41842- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
41843+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
41844 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
41845 if (IS_ERR(rdwr_pa[i].buf)) {
41846 res = PTR_ERR(rdwr_pa[i].buf);
41847diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
41848index 0b510ba..4fbb5085 100644
41849--- a/drivers/ide/ide-cd.c
41850+++ b/drivers/ide/ide-cd.c
41851@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
41852 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
41853 if ((unsigned long)buf & alignment
41854 || blk_rq_bytes(rq) & q->dma_pad_mask
41855- || object_is_on_stack(buf))
41856+ || object_starts_on_stack(buf))
41857 drive->dma = 0;
41858 }
41859 }
41860diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
41861index 18f72e3..3722327 100644
41862--- a/drivers/iio/industrialio-core.c
41863+++ b/drivers/iio/industrialio-core.c
41864@@ -521,7 +521,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
41865 }
41866
41867 static
41868-int __iio_device_attr_init(struct device_attribute *dev_attr,
41869+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
41870 const char *postfix,
41871 struct iio_chan_spec const *chan,
41872 ssize_t (*readfunc)(struct device *dev,
41873diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
41874index f2ef7ef..743d02f 100644
41875--- a/drivers/infiniband/core/cm.c
41876+++ b/drivers/infiniband/core/cm.c
41877@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
41878
41879 struct cm_counter_group {
41880 struct kobject obj;
41881- atomic_long_t counter[CM_ATTR_COUNT];
41882+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
41883 };
41884
41885 struct cm_counter_attribute {
41886@@ -1392,7 +1392,7 @@ static void cm_dup_req_handler(struct cm_work *work,
41887 struct ib_mad_send_buf *msg = NULL;
41888 int ret;
41889
41890- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41891+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41892 counter[CM_REQ_COUNTER]);
41893
41894 /* Quick state check to discard duplicate REQs. */
41895@@ -1776,7 +1776,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
41896 if (!cm_id_priv)
41897 return;
41898
41899- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41900+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41901 counter[CM_REP_COUNTER]);
41902 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
41903 if (ret)
41904@@ -1943,7 +1943,7 @@ static int cm_rtu_handler(struct cm_work *work)
41905 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
41906 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
41907 spin_unlock_irq(&cm_id_priv->lock);
41908- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41909+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41910 counter[CM_RTU_COUNTER]);
41911 goto out;
41912 }
41913@@ -2126,7 +2126,7 @@ static int cm_dreq_handler(struct cm_work *work)
41914 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
41915 dreq_msg->local_comm_id);
41916 if (!cm_id_priv) {
41917- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41918+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41919 counter[CM_DREQ_COUNTER]);
41920 cm_issue_drep(work->port, work->mad_recv_wc);
41921 return -EINVAL;
41922@@ -2151,7 +2151,7 @@ static int cm_dreq_handler(struct cm_work *work)
41923 case IB_CM_MRA_REP_RCVD:
41924 break;
41925 case IB_CM_TIMEWAIT:
41926- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41927+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41928 counter[CM_DREQ_COUNTER]);
41929 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
41930 goto unlock;
41931@@ -2165,7 +2165,7 @@ static int cm_dreq_handler(struct cm_work *work)
41932 cm_free_msg(msg);
41933 goto deref;
41934 case IB_CM_DREQ_RCVD:
41935- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41936+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41937 counter[CM_DREQ_COUNTER]);
41938 goto unlock;
41939 default:
41940@@ -2532,7 +2532,7 @@ static int cm_mra_handler(struct cm_work *work)
41941 ib_modify_mad(cm_id_priv->av.port->mad_agent,
41942 cm_id_priv->msg, timeout)) {
41943 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
41944- atomic_long_inc(&work->port->
41945+ atomic_long_inc_unchecked(&work->port->
41946 counter_group[CM_RECV_DUPLICATES].
41947 counter[CM_MRA_COUNTER]);
41948 goto out;
41949@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
41950 break;
41951 case IB_CM_MRA_REQ_RCVD:
41952 case IB_CM_MRA_REP_RCVD:
41953- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41954+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41955 counter[CM_MRA_COUNTER]);
41956 /* fall through */
41957 default:
41958@@ -2703,7 +2703,7 @@ static int cm_lap_handler(struct cm_work *work)
41959 case IB_CM_LAP_IDLE:
41960 break;
41961 case IB_CM_MRA_LAP_SENT:
41962- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41963+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41964 counter[CM_LAP_COUNTER]);
41965 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
41966 goto unlock;
41967@@ -2719,7 +2719,7 @@ static int cm_lap_handler(struct cm_work *work)
41968 cm_free_msg(msg);
41969 goto deref;
41970 case IB_CM_LAP_RCVD:
41971- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41972+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41973 counter[CM_LAP_COUNTER]);
41974 goto unlock;
41975 default:
41976@@ -3003,7 +3003,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
41977 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
41978 if (cur_cm_id_priv) {
41979 spin_unlock_irq(&cm.lock);
41980- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
41981+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
41982 counter[CM_SIDR_REQ_COUNTER]);
41983 goto out; /* Duplicate message. */
41984 }
41985@@ -3215,10 +3215,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
41986 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
41987 msg->retries = 1;
41988
41989- atomic_long_add(1 + msg->retries,
41990+ atomic_long_add_unchecked(1 + msg->retries,
41991 &port->counter_group[CM_XMIT].counter[attr_index]);
41992 if (msg->retries)
41993- atomic_long_add(msg->retries,
41994+ atomic_long_add_unchecked(msg->retries,
41995 &port->counter_group[CM_XMIT_RETRIES].
41996 counter[attr_index]);
41997
41998@@ -3428,7 +3428,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
41999 }
42000
42001 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42002- atomic_long_inc(&port->counter_group[CM_RECV].
42003+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42004 counter[attr_id - CM_ATTR_ID_OFFSET]);
42005
42006 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42007@@ -3633,7 +3633,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42008 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42009
42010 return sprintf(buf, "%ld\n",
42011- atomic_long_read(&group->counter[cm_attr->index]));
42012+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42013 }
42014
42015 static const struct sysfs_ops cm_counter_ops = {
42016diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42017index 9f5ad7c..588cd84 100644
42018--- a/drivers/infiniband/core/fmr_pool.c
42019+++ b/drivers/infiniband/core/fmr_pool.c
42020@@ -98,8 +98,8 @@ struct ib_fmr_pool {
42021
42022 struct task_struct *thread;
42023
42024- atomic_t req_ser;
42025- atomic_t flush_ser;
42026+ atomic_unchecked_t req_ser;
42027+ atomic_unchecked_t flush_ser;
42028
42029 wait_queue_head_t force_wait;
42030 };
42031@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42032 struct ib_fmr_pool *pool = pool_ptr;
42033
42034 do {
42035- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42036+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42037 ib_fmr_batch_release(pool);
42038
42039- atomic_inc(&pool->flush_ser);
42040+ atomic_inc_unchecked(&pool->flush_ser);
42041 wake_up_interruptible(&pool->force_wait);
42042
42043 if (pool->flush_function)
42044@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42045 }
42046
42047 set_current_state(TASK_INTERRUPTIBLE);
42048- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42049+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42050 !kthread_should_stop())
42051 schedule();
42052 __set_current_state(TASK_RUNNING);
42053@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42054 pool->dirty_watermark = params->dirty_watermark;
42055 pool->dirty_len = 0;
42056 spin_lock_init(&pool->pool_lock);
42057- atomic_set(&pool->req_ser, 0);
42058- atomic_set(&pool->flush_ser, 0);
42059+ atomic_set_unchecked(&pool->req_ser, 0);
42060+ atomic_set_unchecked(&pool->flush_ser, 0);
42061 init_waitqueue_head(&pool->force_wait);
42062
42063 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42064@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42065 }
42066 spin_unlock_irq(&pool->pool_lock);
42067
42068- serial = atomic_inc_return(&pool->req_ser);
42069+ serial = atomic_inc_return_unchecked(&pool->req_ser);
42070 wake_up_process(pool->thread);
42071
42072 if (wait_event_interruptible(pool->force_wait,
42073- atomic_read(&pool->flush_ser) - serial >= 0))
42074+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42075 return -EINTR;
42076
42077 return 0;
42078@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42079 } else {
42080 list_add_tail(&fmr->list, &pool->dirty_list);
42081 if (++pool->dirty_len >= pool->dirty_watermark) {
42082- atomic_inc(&pool->req_ser);
42083+ atomic_inc_unchecked(&pool->req_ser);
42084 wake_up_process(pool->thread);
42085 }
42086 }
42087diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42088index 84e4500..2c9beeb 100644
42089--- a/drivers/infiniband/hw/cxgb4/mem.c
42090+++ b/drivers/infiniband/hw/cxgb4/mem.c
42091@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42092 int err;
42093 struct fw_ri_tpte tpt;
42094 u32 stag_idx;
42095- static atomic_t key;
42096+ static atomic_unchecked_t key;
42097
42098 if (c4iw_fatal_error(rdev))
42099 return -EIO;
42100@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42101 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42102 rdev->stats.stag.max = rdev->stats.stag.cur;
42103 mutex_unlock(&rdev->stats.lock);
42104- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42105+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42106 }
42107 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42108 __func__, stag_state, type, pdid, stag_idx);
42109diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
42110index 644c2c7..ecf0879 100644
42111--- a/drivers/infiniband/hw/ipath/ipath_dma.c
42112+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
42113@@ -176,17 +176,17 @@ static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
42114 }
42115
42116 struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
42117- ipath_mapping_error,
42118- ipath_dma_map_single,
42119- ipath_dma_unmap_single,
42120- ipath_dma_map_page,
42121- ipath_dma_unmap_page,
42122- ipath_map_sg,
42123- ipath_unmap_sg,
42124- ipath_sg_dma_address,
42125- ipath_sg_dma_len,
42126- ipath_sync_single_for_cpu,
42127- ipath_sync_single_for_device,
42128- ipath_dma_alloc_coherent,
42129- ipath_dma_free_coherent
42130+ .mapping_error = ipath_mapping_error,
42131+ .map_single = ipath_dma_map_single,
42132+ .unmap_single = ipath_dma_unmap_single,
42133+ .map_page = ipath_dma_map_page,
42134+ .unmap_page = ipath_dma_unmap_page,
42135+ .map_sg = ipath_map_sg,
42136+ .unmap_sg = ipath_unmap_sg,
42137+ .dma_address = ipath_sg_dma_address,
42138+ .dma_len = ipath_sg_dma_len,
42139+ .sync_single_for_cpu = ipath_sync_single_for_cpu,
42140+ .sync_single_for_device = ipath_sync_single_for_device,
42141+ .alloc_coherent = ipath_dma_alloc_coherent,
42142+ .free_coherent = ipath_dma_free_coherent
42143 };
42144diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42145index 79b3dbc..96e5fcc 100644
42146--- a/drivers/infiniband/hw/ipath/ipath_rc.c
42147+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42148@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42149 struct ib_atomic_eth *ateth;
42150 struct ipath_ack_entry *e;
42151 u64 vaddr;
42152- atomic64_t *maddr;
42153+ atomic64_unchecked_t *maddr;
42154 u64 sdata;
42155 u32 rkey;
42156 u8 next;
42157@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42158 IB_ACCESS_REMOTE_ATOMIC)))
42159 goto nack_acc_unlck;
42160 /* Perform atomic OP and save result. */
42161- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42162+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42163 sdata = be64_to_cpu(ateth->swap_data);
42164 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42165 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42166- (u64) atomic64_add_return(sdata, maddr) - sdata :
42167+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42168 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42169 be64_to_cpu(ateth->compare_data),
42170 sdata);
42171diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42172index 1f95bba..9530f87 100644
42173--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42174+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42175@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42176 unsigned long flags;
42177 struct ib_wc wc;
42178 u64 sdata;
42179- atomic64_t *maddr;
42180+ atomic64_unchecked_t *maddr;
42181 enum ib_wc_status send_status;
42182
42183 /*
42184@@ -382,11 +382,11 @@ again:
42185 IB_ACCESS_REMOTE_ATOMIC)))
42186 goto acc_err;
42187 /* Perform atomic OP and save result. */
42188- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42189+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42190 sdata = wqe->wr.wr.atomic.compare_add;
42191 *(u64 *) sqp->s_sge.sge.vaddr =
42192 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42193- (u64) atomic64_add_return(sdata, maddr) - sdata :
42194+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42195 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42196 sdata, wqe->wr.wr.atomic.swap);
42197 goto send_comp;
42198diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42199index f2a3f48..673ec79 100644
42200--- a/drivers/infiniband/hw/mlx4/mad.c
42201+++ b/drivers/infiniband/hw/mlx4/mad.c
42202@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42203
42204 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42205 {
42206- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42207+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42208 cpu_to_be64(0xff00000000000000LL);
42209 }
42210
42211diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42212index 25b2cdf..099ff97 100644
42213--- a/drivers/infiniband/hw/mlx4/mcg.c
42214+++ b/drivers/infiniband/hw/mlx4/mcg.c
42215@@ -1040,7 +1040,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42216 {
42217 char name[20];
42218
42219- atomic_set(&ctx->tid, 0);
42220+ atomic_set_unchecked(&ctx->tid, 0);
42221 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42222 ctx->mcg_wq = create_singlethread_workqueue(name);
42223 if (!ctx->mcg_wq)
42224diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42225index 036b663..c9a8c73 100644
42226--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42227+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42228@@ -404,7 +404,7 @@ struct mlx4_ib_demux_ctx {
42229 struct list_head mcg_mgid0_list;
42230 struct workqueue_struct *mcg_wq;
42231 struct mlx4_ib_demux_pv_ctx **tun;
42232- atomic_t tid;
42233+ atomic_unchecked_t tid;
42234 int flushing; /* flushing the work queue */
42235 };
42236
42237diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42238index 9d3e5c1..6f166df 100644
42239--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42240+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42241@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42242 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42243 }
42244
42245-int mthca_QUERY_FW(struct mthca_dev *dev)
42246+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42247 {
42248 struct mthca_mailbox *mailbox;
42249 u32 *outbox;
42250@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42251 CMD_TIME_CLASS_B);
42252 }
42253
42254-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42255+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42256 int num_mtt)
42257 {
42258 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42259@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42260 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42261 }
42262
42263-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42264+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42265 int eq_num)
42266 {
42267 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42268@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42269 CMD_TIME_CLASS_B);
42270 }
42271
42272-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42273+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42274 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42275 void *in_mad, void *response_mad)
42276 {
42277diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42278index 87897b9..7e79542 100644
42279--- a/drivers/infiniband/hw/mthca/mthca_main.c
42280+++ b/drivers/infiniband/hw/mthca/mthca_main.c
42281@@ -692,7 +692,7 @@ err_close:
42282 return err;
42283 }
42284
42285-static int mthca_setup_hca(struct mthca_dev *dev)
42286+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42287 {
42288 int err;
42289
42290diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42291index ed9a989..6aa5dc2 100644
42292--- a/drivers/infiniband/hw/mthca/mthca_mr.c
42293+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42294@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42295 * through the bitmaps)
42296 */
42297
42298-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42299+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42300 {
42301 int o;
42302 int m;
42303@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42304 return key;
42305 }
42306
42307-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42308+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42309 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
42310 {
42311 struct mthca_mailbox *mailbox;
42312@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
42313 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
42314 }
42315
42316-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42317+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42318 u64 *buffer_list, int buffer_size_shift,
42319 int list_len, u64 iova, u64 total_size,
42320 u32 access, struct mthca_mr *mr)
42321diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
42322index 5b71d43..35a9e14 100644
42323--- a/drivers/infiniband/hw/mthca/mthca_provider.c
42324+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
42325@@ -763,7 +763,7 @@ unlock:
42326 return 0;
42327 }
42328
42329-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42330+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42331 {
42332 struct mthca_dev *dev = to_mdev(ibcq->device);
42333 struct mthca_cq *cq = to_mcq(ibcq);
42334diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
42335index 4291410..d2ab1fb 100644
42336--- a/drivers/infiniband/hw/nes/nes.c
42337+++ b/drivers/infiniband/hw/nes/nes.c
42338@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
42339 LIST_HEAD(nes_adapter_list);
42340 static LIST_HEAD(nes_dev_list);
42341
42342-atomic_t qps_destroyed;
42343+atomic_unchecked_t qps_destroyed;
42344
42345 static unsigned int ee_flsh_adapter;
42346 static unsigned int sysfs_nonidx_addr;
42347@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
42348 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
42349 struct nes_adapter *nesadapter = nesdev->nesadapter;
42350
42351- atomic_inc(&qps_destroyed);
42352+ atomic_inc_unchecked(&qps_destroyed);
42353
42354 /* Free the control structures */
42355
42356diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
42357index 33cc589..3bd6538 100644
42358--- a/drivers/infiniband/hw/nes/nes.h
42359+++ b/drivers/infiniband/hw/nes/nes.h
42360@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
42361 extern unsigned int wqm_quanta;
42362 extern struct list_head nes_adapter_list;
42363
42364-extern atomic_t cm_connects;
42365-extern atomic_t cm_accepts;
42366-extern atomic_t cm_disconnects;
42367-extern atomic_t cm_closes;
42368-extern atomic_t cm_connecteds;
42369-extern atomic_t cm_connect_reqs;
42370-extern atomic_t cm_rejects;
42371-extern atomic_t mod_qp_timouts;
42372-extern atomic_t qps_created;
42373-extern atomic_t qps_destroyed;
42374-extern atomic_t sw_qps_destroyed;
42375+extern atomic_unchecked_t cm_connects;
42376+extern atomic_unchecked_t cm_accepts;
42377+extern atomic_unchecked_t cm_disconnects;
42378+extern atomic_unchecked_t cm_closes;
42379+extern atomic_unchecked_t cm_connecteds;
42380+extern atomic_unchecked_t cm_connect_reqs;
42381+extern atomic_unchecked_t cm_rejects;
42382+extern atomic_unchecked_t mod_qp_timouts;
42383+extern atomic_unchecked_t qps_created;
42384+extern atomic_unchecked_t qps_destroyed;
42385+extern atomic_unchecked_t sw_qps_destroyed;
42386 extern u32 mh_detected;
42387 extern u32 mh_pauses_sent;
42388 extern u32 cm_packets_sent;
42389@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
42390 extern u32 cm_packets_received;
42391 extern u32 cm_packets_dropped;
42392 extern u32 cm_packets_retrans;
42393-extern atomic_t cm_listens_created;
42394-extern atomic_t cm_listens_destroyed;
42395+extern atomic_unchecked_t cm_listens_created;
42396+extern atomic_unchecked_t cm_listens_destroyed;
42397 extern u32 cm_backlog_drops;
42398-extern atomic_t cm_loopbacks;
42399-extern atomic_t cm_nodes_created;
42400-extern atomic_t cm_nodes_destroyed;
42401-extern atomic_t cm_accel_dropped_pkts;
42402-extern atomic_t cm_resets_recvd;
42403-extern atomic_t pau_qps_created;
42404-extern atomic_t pau_qps_destroyed;
42405+extern atomic_unchecked_t cm_loopbacks;
42406+extern atomic_unchecked_t cm_nodes_created;
42407+extern atomic_unchecked_t cm_nodes_destroyed;
42408+extern atomic_unchecked_t cm_accel_dropped_pkts;
42409+extern atomic_unchecked_t cm_resets_recvd;
42410+extern atomic_unchecked_t pau_qps_created;
42411+extern atomic_unchecked_t pau_qps_destroyed;
42412
42413 extern u32 int_mod_timer_init;
42414 extern u32 int_mod_cq_depth_256;
42415diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
42416index 6b29249..461d143 100644
42417--- a/drivers/infiniband/hw/nes/nes_cm.c
42418+++ b/drivers/infiniband/hw/nes/nes_cm.c
42419@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
42420 u32 cm_packets_retrans;
42421 u32 cm_packets_created;
42422 u32 cm_packets_received;
42423-atomic_t cm_listens_created;
42424-atomic_t cm_listens_destroyed;
42425+atomic_unchecked_t cm_listens_created;
42426+atomic_unchecked_t cm_listens_destroyed;
42427 u32 cm_backlog_drops;
42428-atomic_t cm_loopbacks;
42429-atomic_t cm_nodes_created;
42430-atomic_t cm_nodes_destroyed;
42431-atomic_t cm_accel_dropped_pkts;
42432-atomic_t cm_resets_recvd;
42433+atomic_unchecked_t cm_loopbacks;
42434+atomic_unchecked_t cm_nodes_created;
42435+atomic_unchecked_t cm_nodes_destroyed;
42436+atomic_unchecked_t cm_accel_dropped_pkts;
42437+atomic_unchecked_t cm_resets_recvd;
42438
42439 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
42440 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
42441@@ -133,28 +133,28 @@ static void print_core(struct nes_cm_core *core);
42442 /* instance of function pointers for client API */
42443 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
42444 static struct nes_cm_ops nes_cm_api = {
42445- mini_cm_accelerated,
42446- mini_cm_listen,
42447- mini_cm_del_listen,
42448- mini_cm_connect,
42449- mini_cm_close,
42450- mini_cm_accept,
42451- mini_cm_reject,
42452- mini_cm_recv_pkt,
42453- mini_cm_dealloc_core,
42454- mini_cm_get,
42455- mini_cm_set
42456+ .accelerated = mini_cm_accelerated,
42457+ .listen = mini_cm_listen,
42458+ .stop_listener = mini_cm_del_listen,
42459+ .connect = mini_cm_connect,
42460+ .close = mini_cm_close,
42461+ .accept = mini_cm_accept,
42462+ .reject = mini_cm_reject,
42463+ .recv_pkt = mini_cm_recv_pkt,
42464+ .destroy_cm_core = mini_cm_dealloc_core,
42465+ .get = mini_cm_get,
42466+ .set = mini_cm_set
42467 };
42468
42469 static struct nes_cm_core *g_cm_core;
42470
42471-atomic_t cm_connects;
42472-atomic_t cm_accepts;
42473-atomic_t cm_disconnects;
42474-atomic_t cm_closes;
42475-atomic_t cm_connecteds;
42476-atomic_t cm_connect_reqs;
42477-atomic_t cm_rejects;
42478+atomic_unchecked_t cm_connects;
42479+atomic_unchecked_t cm_accepts;
42480+atomic_unchecked_t cm_disconnects;
42481+atomic_unchecked_t cm_closes;
42482+atomic_unchecked_t cm_connecteds;
42483+atomic_unchecked_t cm_connect_reqs;
42484+atomic_unchecked_t cm_rejects;
42485
42486 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
42487 {
42488@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
42489 kfree(listener);
42490 listener = NULL;
42491 ret = 0;
42492- atomic_inc(&cm_listens_destroyed);
42493+ atomic_inc_unchecked(&cm_listens_destroyed);
42494 } else {
42495 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
42496 }
42497@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
42498 cm_node->rem_mac);
42499
42500 add_hte_node(cm_core, cm_node);
42501- atomic_inc(&cm_nodes_created);
42502+ atomic_inc_unchecked(&cm_nodes_created);
42503
42504 return cm_node;
42505 }
42506@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
42507 }
42508
42509 atomic_dec(&cm_core->node_cnt);
42510- atomic_inc(&cm_nodes_destroyed);
42511+ atomic_inc_unchecked(&cm_nodes_destroyed);
42512 nesqp = cm_node->nesqp;
42513 if (nesqp) {
42514 nesqp->cm_node = NULL;
42515@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
42516
42517 static void drop_packet(struct sk_buff *skb)
42518 {
42519- atomic_inc(&cm_accel_dropped_pkts);
42520+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
42521 dev_kfree_skb_any(skb);
42522 }
42523
42524@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
42525 {
42526
42527 int reset = 0; /* whether to send reset in case of err.. */
42528- atomic_inc(&cm_resets_recvd);
42529+ atomic_inc_unchecked(&cm_resets_recvd);
42530 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
42531 " refcnt=%d\n", cm_node, cm_node->state,
42532 atomic_read(&cm_node->ref_count));
42533@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
42534 rem_ref_cm_node(cm_node->cm_core, cm_node);
42535 return NULL;
42536 }
42537- atomic_inc(&cm_loopbacks);
42538+ atomic_inc_unchecked(&cm_loopbacks);
42539 loopbackremotenode->loopbackpartner = cm_node;
42540 loopbackremotenode->tcp_cntxt.rcv_wscale =
42541 NES_CM_DEFAULT_RCV_WND_SCALE;
42542@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
42543 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
42544 else {
42545 rem_ref_cm_node(cm_core, cm_node);
42546- atomic_inc(&cm_accel_dropped_pkts);
42547+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
42548 dev_kfree_skb_any(skb);
42549 }
42550 break;
42551@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42552
42553 if ((cm_id) && (cm_id->event_handler)) {
42554 if (issue_disconn) {
42555- atomic_inc(&cm_disconnects);
42556+ atomic_inc_unchecked(&cm_disconnects);
42557 cm_event.event = IW_CM_EVENT_DISCONNECT;
42558 cm_event.status = disconn_status;
42559 cm_event.local_addr = cm_id->local_addr;
42560@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
42561 }
42562
42563 if (issue_close) {
42564- atomic_inc(&cm_closes);
42565+ atomic_inc_unchecked(&cm_closes);
42566 nes_disconnect(nesqp, 1);
42567
42568 cm_id->provider_data = nesqp;
42569@@ -3035,7 +3035,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
42570
42571 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
42572 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
42573- atomic_inc(&cm_accepts);
42574+ atomic_inc_unchecked(&cm_accepts);
42575
42576 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
42577 netdev_refcnt_read(nesvnic->netdev));
42578@@ -3224,7 +3224,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
42579 struct nes_cm_core *cm_core;
42580 u8 *start_buff;
42581
42582- atomic_inc(&cm_rejects);
42583+ atomic_inc_unchecked(&cm_rejects);
42584 cm_node = (struct nes_cm_node *)cm_id->provider_data;
42585 loopback = cm_node->loopbackpartner;
42586 cm_core = cm_node->cm_core;
42587@@ -3286,7 +3286,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
42588 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
42589 ntohs(laddr->sin_port));
42590
42591- atomic_inc(&cm_connects);
42592+ atomic_inc_unchecked(&cm_connects);
42593 nesqp->active_conn = 1;
42594
42595 /* cache the cm_id in the qp */
42596@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
42597 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
42598 return err;
42599 }
42600- atomic_inc(&cm_listens_created);
42601+ atomic_inc_unchecked(&cm_listens_created);
42602 }
42603
42604 cm_id->add_ref(cm_id);
42605@@ -3505,7 +3505,7 @@ static void cm_event_connected(struct nes_cm_event *event)
42606
42607 if (nesqp->destroyed)
42608 return;
42609- atomic_inc(&cm_connecteds);
42610+ atomic_inc_unchecked(&cm_connecteds);
42611 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
42612 " local port 0x%04X. jiffies = %lu.\n",
42613 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
42614@@ -3686,7 +3686,7 @@ static void cm_event_reset(struct nes_cm_event *event)
42615
42616 cm_id->add_ref(cm_id);
42617 ret = cm_id->event_handler(cm_id, &cm_event);
42618- atomic_inc(&cm_closes);
42619+ atomic_inc_unchecked(&cm_closes);
42620 cm_event.event = IW_CM_EVENT_CLOSE;
42621 cm_event.status = 0;
42622 cm_event.provider_data = cm_id->provider_data;
42623@@ -3726,7 +3726,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
42624 return;
42625 cm_id = cm_node->cm_id;
42626
42627- atomic_inc(&cm_connect_reqs);
42628+ atomic_inc_unchecked(&cm_connect_reqs);
42629 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
42630 cm_node, cm_id, jiffies);
42631
42632@@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
42633 return;
42634 cm_id = cm_node->cm_id;
42635
42636- atomic_inc(&cm_connect_reqs);
42637+ atomic_inc_unchecked(&cm_connect_reqs);
42638 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
42639 cm_node, cm_id, jiffies);
42640
42641diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
42642index 4166452..fc952c3 100644
42643--- a/drivers/infiniband/hw/nes/nes_mgt.c
42644+++ b/drivers/infiniband/hw/nes/nes_mgt.c
42645@@ -40,8 +40,8 @@
42646 #include "nes.h"
42647 #include "nes_mgt.h"
42648
42649-atomic_t pau_qps_created;
42650-atomic_t pau_qps_destroyed;
42651+atomic_unchecked_t pau_qps_created;
42652+atomic_unchecked_t pau_qps_destroyed;
42653
42654 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
42655 {
42656@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
42657 {
42658 struct sk_buff *skb;
42659 unsigned long flags;
42660- atomic_inc(&pau_qps_destroyed);
42661+ atomic_inc_unchecked(&pau_qps_destroyed);
42662
42663 /* Free packets that have not yet been forwarded */
42664 /* Lock is acquired by skb_dequeue when removing the skb */
42665@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
42666 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
42667 skb_queue_head_init(&nesqp->pau_list);
42668 spin_lock_init(&nesqp->pau_lock);
42669- atomic_inc(&pau_qps_created);
42670+ atomic_inc_unchecked(&pau_qps_created);
42671 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
42672 }
42673
42674diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
42675index 49eb511..a774366 100644
42676--- a/drivers/infiniband/hw/nes/nes_nic.c
42677+++ b/drivers/infiniband/hw/nes/nes_nic.c
42678@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
42679 target_stat_values[++index] = mh_detected;
42680 target_stat_values[++index] = mh_pauses_sent;
42681 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
42682- target_stat_values[++index] = atomic_read(&cm_connects);
42683- target_stat_values[++index] = atomic_read(&cm_accepts);
42684- target_stat_values[++index] = atomic_read(&cm_disconnects);
42685- target_stat_values[++index] = atomic_read(&cm_connecteds);
42686- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
42687- target_stat_values[++index] = atomic_read(&cm_rejects);
42688- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
42689- target_stat_values[++index] = atomic_read(&qps_created);
42690- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
42691- target_stat_values[++index] = atomic_read(&qps_destroyed);
42692- target_stat_values[++index] = atomic_read(&cm_closes);
42693+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
42694+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
42695+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
42696+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
42697+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
42698+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
42699+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
42700+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
42701+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
42702+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
42703+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
42704 target_stat_values[++index] = cm_packets_sent;
42705 target_stat_values[++index] = cm_packets_bounced;
42706 target_stat_values[++index] = cm_packets_created;
42707 target_stat_values[++index] = cm_packets_received;
42708 target_stat_values[++index] = cm_packets_dropped;
42709 target_stat_values[++index] = cm_packets_retrans;
42710- target_stat_values[++index] = atomic_read(&cm_listens_created);
42711- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
42712+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
42713+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
42714 target_stat_values[++index] = cm_backlog_drops;
42715- target_stat_values[++index] = atomic_read(&cm_loopbacks);
42716- target_stat_values[++index] = atomic_read(&cm_nodes_created);
42717- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
42718- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
42719- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
42720+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
42721+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
42722+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
42723+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
42724+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
42725 target_stat_values[++index] = nesadapter->free_4kpbl;
42726 target_stat_values[++index] = nesadapter->free_256pbl;
42727 target_stat_values[++index] = int_mod_timer_init;
42728 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
42729 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
42730 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
42731- target_stat_values[++index] = atomic_read(&pau_qps_created);
42732- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
42733+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
42734+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
42735 }
42736
42737 /**
42738diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
42739index 8308e36..ae0d3b5 100644
42740--- a/drivers/infiniband/hw/nes/nes_verbs.c
42741+++ b/drivers/infiniband/hw/nes/nes_verbs.c
42742@@ -46,9 +46,9 @@
42743
42744 #include <rdma/ib_umem.h>
42745
42746-atomic_t mod_qp_timouts;
42747-atomic_t qps_created;
42748-atomic_t sw_qps_destroyed;
42749+atomic_unchecked_t mod_qp_timouts;
42750+atomic_unchecked_t qps_created;
42751+atomic_unchecked_t sw_qps_destroyed;
42752
42753 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
42754
42755@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
42756 if (init_attr->create_flags)
42757 return ERR_PTR(-EINVAL);
42758
42759- atomic_inc(&qps_created);
42760+ atomic_inc_unchecked(&qps_created);
42761 switch (init_attr->qp_type) {
42762 case IB_QPT_RC:
42763 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
42764@@ -1466,7 +1466,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
42765 struct iw_cm_event cm_event;
42766 int ret = 0;
42767
42768- atomic_inc(&sw_qps_destroyed);
42769+ atomic_inc_unchecked(&sw_qps_destroyed);
42770 nesqp->destroyed = 1;
42771
42772 /* Blow away the connection if it exists. */
42773diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
42774index 1946101..09766d2 100644
42775--- a/drivers/infiniband/hw/qib/qib.h
42776+++ b/drivers/infiniband/hw/qib/qib.h
42777@@ -52,6 +52,7 @@
42778 #include <linux/kref.h>
42779 #include <linux/sched.h>
42780 #include <linux/kthread.h>
42781+#include <linux/slab.h>
42782
42783 #include "qib_common.h"
42784 #include "qib_verbs.h"
42785diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
42786index 24c41ba..102d71f 100644
42787--- a/drivers/input/gameport/gameport.c
42788+++ b/drivers/input/gameport/gameport.c
42789@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
42790 */
42791 static void gameport_init_port(struct gameport *gameport)
42792 {
42793- static atomic_t gameport_no = ATOMIC_INIT(0);
42794+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
42795
42796 __module_get(THIS_MODULE);
42797
42798 mutex_init(&gameport->drv_mutex);
42799 device_initialize(&gameport->dev);
42800 dev_set_name(&gameport->dev, "gameport%lu",
42801- (unsigned long)atomic_inc_return(&gameport_no) - 1);
42802+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
42803 gameport->dev.bus = &gameport_bus;
42804 gameport->dev.release = gameport_release_port;
42805 if (gameport->parent)
42806diff --git a/drivers/input/input.c b/drivers/input/input.c
42807index d2965e4..f52b7d7 100644
42808--- a/drivers/input/input.c
42809+++ b/drivers/input/input.c
42810@@ -1734,7 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
42811 */
42812 struct input_dev *input_allocate_device(void)
42813 {
42814- static atomic_t input_no = ATOMIC_INIT(0);
42815+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
42816 struct input_dev *dev;
42817
42818 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
42819@@ -1749,7 +1749,7 @@ struct input_dev *input_allocate_device(void)
42820 INIT_LIST_HEAD(&dev->node);
42821
42822 dev_set_name(&dev->dev, "input%ld",
42823- (unsigned long) atomic_inc_return(&input_no) - 1);
42824+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
42825
42826 __module_get(THIS_MODULE);
42827 }
42828diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
42829index 04c69af..5f92d00 100644
42830--- a/drivers/input/joystick/sidewinder.c
42831+++ b/drivers/input/joystick/sidewinder.c
42832@@ -30,6 +30,7 @@
42833 #include <linux/kernel.h>
42834 #include <linux/module.h>
42835 #include <linux/slab.h>
42836+#include <linux/sched.h>
42837 #include <linux/init.h>
42838 #include <linux/input.h>
42839 #include <linux/gameport.h>
42840diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
42841index 75e3b10..fb390fd 100644
42842--- a/drivers/input/joystick/xpad.c
42843+++ b/drivers/input/joystick/xpad.c
42844@@ -736,7 +736,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
42845
42846 static int xpad_led_probe(struct usb_xpad *xpad)
42847 {
42848- static atomic_t led_seq = ATOMIC_INIT(0);
42849+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
42850 long led_no;
42851 struct xpad_led *led;
42852 struct led_classdev *led_cdev;
42853@@ -749,7 +749,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
42854 if (!led)
42855 return -ENOMEM;
42856
42857- led_no = (long)atomic_inc_return(&led_seq) - 1;
42858+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
42859
42860 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
42861 led->xpad = xpad;
42862diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
42863index e204f26..8459f15 100644
42864--- a/drivers/input/misc/ims-pcu.c
42865+++ b/drivers/input/misc/ims-pcu.c
42866@@ -1621,7 +1621,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
42867
42868 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
42869 {
42870- static atomic_t device_no = ATOMIC_INIT(0);
42871+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
42872
42873 const struct ims_pcu_device_info *info;
42874 u8 device_id;
42875@@ -1653,7 +1653,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
42876 }
42877
42878 /* Device appears to be operable, complete initialization */
42879- pcu->device_no = atomic_inc_return(&device_no) - 1;
42880+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
42881
42882 error = ims_pcu_setup_backlight(pcu);
42883 if (error)
42884diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
42885index 2f0b39d..7370f13 100644
42886--- a/drivers/input/mouse/psmouse.h
42887+++ b/drivers/input/mouse/psmouse.h
42888@@ -116,7 +116,7 @@ struct psmouse_attribute {
42889 ssize_t (*set)(struct psmouse *psmouse, void *data,
42890 const char *buf, size_t count);
42891 bool protect;
42892-};
42893+} __do_const;
42894 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
42895
42896 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
42897diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
42898index 4c842c3..590b0bf 100644
42899--- a/drivers/input/mousedev.c
42900+++ b/drivers/input/mousedev.c
42901@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
42902
42903 spin_unlock_irq(&client->packet_lock);
42904
42905- if (copy_to_user(buffer, data, count))
42906+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
42907 return -EFAULT;
42908
42909 return count;
42910diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
42911index 8f4c4ab..5fc8a45 100644
42912--- a/drivers/input/serio/serio.c
42913+++ b/drivers/input/serio/serio.c
42914@@ -505,7 +505,7 @@ static void serio_release_port(struct device *dev)
42915 */
42916 static void serio_init_port(struct serio *serio)
42917 {
42918- static atomic_t serio_no = ATOMIC_INIT(0);
42919+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
42920
42921 __module_get(THIS_MODULE);
42922
42923@@ -516,7 +516,7 @@ static void serio_init_port(struct serio *serio)
42924 mutex_init(&serio->drv_mutex);
42925 device_initialize(&serio->dev);
42926 dev_set_name(&serio->dev, "serio%ld",
42927- (long)atomic_inc_return(&serio_no) - 1);
42928+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
42929 serio->dev.bus = &serio_bus;
42930 serio->dev.release = serio_release_port;
42931 serio->dev.groups = serio_device_attr_groups;
42932diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
42933index 59df2e7..8f1cafb 100644
42934--- a/drivers/input/serio/serio_raw.c
42935+++ b/drivers/input/serio/serio_raw.c
42936@@ -293,7 +293,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
42937
42938 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
42939 {
42940- static atomic_t serio_raw_no = ATOMIC_INIT(0);
42941+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
42942 struct serio_raw *serio_raw;
42943 int err;
42944
42945@@ -304,7 +304,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
42946 }
42947
42948 snprintf(serio_raw->name, sizeof(serio_raw->name),
42949- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
42950+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
42951 kref_init(&serio_raw->kref);
42952 INIT_LIST_HEAD(&serio_raw->client_list);
42953 init_waitqueue_head(&serio_raw->wait);
42954diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
42955index e5555fc..937986d 100644
42956--- a/drivers/iommu/iommu.c
42957+++ b/drivers/iommu/iommu.c
42958@@ -588,7 +588,7 @@ static struct notifier_block iommu_bus_nb = {
42959 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
42960 {
42961 bus_register_notifier(bus, &iommu_bus_nb);
42962- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
42963+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
42964 }
42965
42966 /**
42967diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
42968index 39f81ae..2660096 100644
42969--- a/drivers/iommu/irq_remapping.c
42970+++ b/drivers/iommu/irq_remapping.c
42971@@ -356,7 +356,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
42972 void panic_if_irq_remap(const char *msg)
42973 {
42974 if (irq_remapping_enabled)
42975- panic(msg);
42976+ panic("%s", msg);
42977 }
42978
42979 static void ir_ack_apic_edge(struct irq_data *data)
42980@@ -377,10 +377,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
42981
42982 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
42983 {
42984- chip->irq_print_chip = ir_print_prefix;
42985- chip->irq_ack = ir_ack_apic_edge;
42986- chip->irq_eoi = ir_ack_apic_level;
42987- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
42988+ pax_open_kernel();
42989+ *(void **)&chip->irq_print_chip = ir_print_prefix;
42990+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
42991+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
42992+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
42993+ pax_close_kernel();
42994 }
42995
42996 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
42997diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
42998index 341c601..e5f407e 100644
42999--- a/drivers/irqchip/irq-gic.c
43000+++ b/drivers/irqchip/irq-gic.c
43001@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43002 * Supported arch specific GIC irq extension.
43003 * Default make them NULL.
43004 */
43005-struct irq_chip gic_arch_extn = {
43006+irq_chip_no_const gic_arch_extn = {
43007 .irq_eoi = NULL,
43008 .irq_mask = NULL,
43009 .irq_unmask = NULL,
43010@@ -332,7 +332,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43011 chained_irq_exit(chip, desc);
43012 }
43013
43014-static struct irq_chip gic_chip = {
43015+static irq_chip_no_const gic_chip __read_only = {
43016 .name = "GIC",
43017 .irq_mask = gic_mask_irq,
43018 .irq_unmask = gic_unmask_irq,
43019diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43020index ac6f72b..81150f2 100644
43021--- a/drivers/isdn/capi/capi.c
43022+++ b/drivers/isdn/capi/capi.c
43023@@ -81,8 +81,8 @@ struct capiminor {
43024
43025 struct capi20_appl *ap;
43026 u32 ncci;
43027- atomic_t datahandle;
43028- atomic_t msgid;
43029+ atomic_unchecked_t datahandle;
43030+ atomic_unchecked_t msgid;
43031
43032 struct tty_port port;
43033 int ttyinstop;
43034@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43035 capimsg_setu16(s, 2, mp->ap->applid);
43036 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43037 capimsg_setu8 (s, 5, CAPI_RESP);
43038- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43039+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43040 capimsg_setu32(s, 8, mp->ncci);
43041 capimsg_setu16(s, 12, datahandle);
43042 }
43043@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43044 mp->outbytes -= len;
43045 spin_unlock_bh(&mp->outlock);
43046
43047- datahandle = atomic_inc_return(&mp->datahandle);
43048+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43049 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43050 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43051 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43052 capimsg_setu16(skb->data, 2, mp->ap->applid);
43053 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43054 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43055- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43056+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43057 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43058 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43059 capimsg_setu16(skb->data, 16, len); /* Data length */
43060diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43061index c44950d..10ac276 100644
43062--- a/drivers/isdn/gigaset/bas-gigaset.c
43063+++ b/drivers/isdn/gigaset/bas-gigaset.c
43064@@ -2564,22 +2564,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43065
43066
43067 static const struct gigaset_ops gigops = {
43068- gigaset_write_cmd,
43069- gigaset_write_room,
43070- gigaset_chars_in_buffer,
43071- gigaset_brkchars,
43072- gigaset_init_bchannel,
43073- gigaset_close_bchannel,
43074- gigaset_initbcshw,
43075- gigaset_freebcshw,
43076- gigaset_reinitbcshw,
43077- gigaset_initcshw,
43078- gigaset_freecshw,
43079- gigaset_set_modem_ctrl,
43080- gigaset_baud_rate,
43081- gigaset_set_line_ctrl,
43082- gigaset_isoc_send_skb,
43083- gigaset_isoc_input,
43084+ .write_cmd = gigaset_write_cmd,
43085+ .write_room = gigaset_write_room,
43086+ .chars_in_buffer = gigaset_chars_in_buffer,
43087+ .brkchars = gigaset_brkchars,
43088+ .init_bchannel = gigaset_init_bchannel,
43089+ .close_bchannel = gigaset_close_bchannel,
43090+ .initbcshw = gigaset_initbcshw,
43091+ .freebcshw = gigaset_freebcshw,
43092+ .reinitbcshw = gigaset_reinitbcshw,
43093+ .initcshw = gigaset_initcshw,
43094+ .freecshw = gigaset_freecshw,
43095+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43096+ .baud_rate = gigaset_baud_rate,
43097+ .set_line_ctrl = gigaset_set_line_ctrl,
43098+ .send_skb = gigaset_isoc_send_skb,
43099+ .handle_input = gigaset_isoc_input,
43100 };
43101
43102 /* bas_gigaset_init
43103diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43104index 600c79b..3752bab 100644
43105--- a/drivers/isdn/gigaset/interface.c
43106+++ b/drivers/isdn/gigaset/interface.c
43107@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43108 }
43109 tty->driver_data = cs;
43110
43111- ++cs->port.count;
43112+ atomic_inc(&cs->port.count);
43113
43114- if (cs->port.count == 1) {
43115+ if (atomic_read(&cs->port.count) == 1) {
43116 tty_port_tty_set(&cs->port, tty);
43117 cs->port.low_latency = 1;
43118 }
43119@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43120
43121 if (!cs->connected)
43122 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43123- else if (!cs->port.count)
43124+ else if (!atomic_read(&cs->port.count))
43125 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43126- else if (!--cs->port.count)
43127+ else if (!atomic_dec_return(&cs->port.count))
43128 tty_port_tty_set(&cs->port, NULL);
43129
43130 mutex_unlock(&cs->mutex);
43131diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43132index 8c91fd5..14f13ce 100644
43133--- a/drivers/isdn/gigaset/ser-gigaset.c
43134+++ b/drivers/isdn/gigaset/ser-gigaset.c
43135@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43136 }
43137
43138 static const struct gigaset_ops ops = {
43139- gigaset_write_cmd,
43140- gigaset_write_room,
43141- gigaset_chars_in_buffer,
43142- gigaset_brkchars,
43143- gigaset_init_bchannel,
43144- gigaset_close_bchannel,
43145- gigaset_initbcshw,
43146- gigaset_freebcshw,
43147- gigaset_reinitbcshw,
43148- gigaset_initcshw,
43149- gigaset_freecshw,
43150- gigaset_set_modem_ctrl,
43151- gigaset_baud_rate,
43152- gigaset_set_line_ctrl,
43153- gigaset_m10x_send_skb, /* asyncdata.c */
43154- gigaset_m10x_input, /* asyncdata.c */
43155+ .write_cmd = gigaset_write_cmd,
43156+ .write_room = gigaset_write_room,
43157+ .chars_in_buffer = gigaset_chars_in_buffer,
43158+ .brkchars = gigaset_brkchars,
43159+ .init_bchannel = gigaset_init_bchannel,
43160+ .close_bchannel = gigaset_close_bchannel,
43161+ .initbcshw = gigaset_initbcshw,
43162+ .freebcshw = gigaset_freebcshw,
43163+ .reinitbcshw = gigaset_reinitbcshw,
43164+ .initcshw = gigaset_initcshw,
43165+ .freecshw = gigaset_freecshw,
43166+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43167+ .baud_rate = gigaset_baud_rate,
43168+ .set_line_ctrl = gigaset_set_line_ctrl,
43169+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43170+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
43171 };
43172
43173
43174diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43175index d0a41cb..b953e50 100644
43176--- a/drivers/isdn/gigaset/usb-gigaset.c
43177+++ b/drivers/isdn/gigaset/usb-gigaset.c
43178@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43179 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43180 memcpy(cs->hw.usb->bchars, buf, 6);
43181 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43182- 0, 0, &buf, 6, 2000);
43183+ 0, 0, buf, 6, 2000);
43184 }
43185
43186 static void gigaset_freebcshw(struct bc_state *bcs)
43187@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43188 }
43189
43190 static const struct gigaset_ops ops = {
43191- gigaset_write_cmd,
43192- gigaset_write_room,
43193- gigaset_chars_in_buffer,
43194- gigaset_brkchars,
43195- gigaset_init_bchannel,
43196- gigaset_close_bchannel,
43197- gigaset_initbcshw,
43198- gigaset_freebcshw,
43199- gigaset_reinitbcshw,
43200- gigaset_initcshw,
43201- gigaset_freecshw,
43202- gigaset_set_modem_ctrl,
43203- gigaset_baud_rate,
43204- gigaset_set_line_ctrl,
43205- gigaset_m10x_send_skb,
43206- gigaset_m10x_input,
43207+ .write_cmd = gigaset_write_cmd,
43208+ .write_room = gigaset_write_room,
43209+ .chars_in_buffer = gigaset_chars_in_buffer,
43210+ .brkchars = gigaset_brkchars,
43211+ .init_bchannel = gigaset_init_bchannel,
43212+ .close_bchannel = gigaset_close_bchannel,
43213+ .initbcshw = gigaset_initbcshw,
43214+ .freebcshw = gigaset_freebcshw,
43215+ .reinitbcshw = gigaset_reinitbcshw,
43216+ .initcshw = gigaset_initcshw,
43217+ .freecshw = gigaset_freecshw,
43218+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43219+ .baud_rate = gigaset_baud_rate,
43220+ .set_line_ctrl = gigaset_set_line_ctrl,
43221+ .send_skb = gigaset_m10x_send_skb,
43222+ .handle_input = gigaset_m10x_input,
43223 };
43224
43225 /*
43226diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
43227index 4d9b195..455075c 100644
43228--- a/drivers/isdn/hardware/avm/b1.c
43229+++ b/drivers/isdn/hardware/avm/b1.c
43230@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
43231 }
43232 if (left) {
43233 if (t4file->user) {
43234- if (copy_from_user(buf, dp, left))
43235+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43236 return -EFAULT;
43237 } else {
43238 memcpy(buf, dp, left);
43239@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
43240 }
43241 if (left) {
43242 if (config->user) {
43243- if (copy_from_user(buf, dp, left))
43244+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43245 return -EFAULT;
43246 } else {
43247 memcpy(buf, dp, left);
43248diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
43249index 9bb12ba..d4262f7 100644
43250--- a/drivers/isdn/i4l/isdn_common.c
43251+++ b/drivers/isdn/i4l/isdn_common.c
43252@@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
43253 } else
43254 return -EINVAL;
43255 case IIOCDBGVAR:
43256+ if (!capable(CAP_SYS_RAWIO))
43257+ return -EPERM;
43258 if (arg) {
43259 if (copy_to_user(argp, &dev, sizeof(ulong)))
43260 return -EFAULT;
43261diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
43262index 91d5730..336523e 100644
43263--- a/drivers/isdn/i4l/isdn_concap.c
43264+++ b/drivers/isdn/i4l/isdn_concap.c
43265@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
43266 }
43267
43268 struct concap_device_ops isdn_concap_reliable_dl_dops = {
43269- &isdn_concap_dl_data_req,
43270- &isdn_concap_dl_connect_req,
43271- &isdn_concap_dl_disconn_req
43272+ .data_req = &isdn_concap_dl_data_req,
43273+ .connect_req = &isdn_concap_dl_connect_req,
43274+ .disconn_req = &isdn_concap_dl_disconn_req
43275 };
43276
43277 /* The following should better go into a dedicated source file such that
43278diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
43279index 3c5f249..5fac4d0 100644
43280--- a/drivers/isdn/i4l/isdn_tty.c
43281+++ b/drivers/isdn/i4l/isdn_tty.c
43282@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
43283
43284 #ifdef ISDN_DEBUG_MODEM_OPEN
43285 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
43286- port->count);
43287+ atomic_read(&port->count));
43288 #endif
43289- port->count++;
43290+ atomic_inc(&port->count);
43291 port->tty = tty;
43292 /*
43293 * Start up serial port
43294@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43295 #endif
43296 return;
43297 }
43298- if ((tty->count == 1) && (port->count != 1)) {
43299+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
43300 /*
43301 * Uh, oh. tty->count is 1, which means that the tty
43302 * structure will be freed. Info->count should always
43303@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43304 * serial port won't be shutdown.
43305 */
43306 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
43307- "info->count is %d\n", port->count);
43308- port->count = 1;
43309+ "info->count is %d\n", atomic_read(&port->count));
43310+ atomic_set(&port->count, 1);
43311 }
43312- if (--port->count < 0) {
43313+ if (atomic_dec_return(&port->count) < 0) {
43314 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
43315- info->line, port->count);
43316- port->count = 0;
43317+ info->line, atomic_read(&port->count));
43318+ atomic_set(&port->count, 0);
43319 }
43320- if (port->count) {
43321+ if (atomic_read(&port->count)) {
43322 #ifdef ISDN_DEBUG_MODEM_OPEN
43323 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
43324 #endif
43325@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
43326 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
43327 return;
43328 isdn_tty_shutdown(info);
43329- port->count = 0;
43330+ atomic_set(&port->count, 0);
43331 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43332 port->tty = NULL;
43333 wake_up_interruptible(&port->open_wait);
43334@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
43335 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
43336 modem_info *info = &dev->mdm.info[i];
43337
43338- if (info->port.count == 0)
43339+ if (atomic_read(&info->port.count) == 0)
43340 continue;
43341 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
43342 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
43343diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
43344index e2d4e58..40cd045 100644
43345--- a/drivers/isdn/i4l/isdn_x25iface.c
43346+++ b/drivers/isdn/i4l/isdn_x25iface.c
43347@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
43348
43349
43350 static struct concap_proto_ops ix25_pops = {
43351- &isdn_x25iface_proto_new,
43352- &isdn_x25iface_proto_del,
43353- &isdn_x25iface_proto_restart,
43354- &isdn_x25iface_proto_close,
43355- &isdn_x25iface_xmit,
43356- &isdn_x25iface_receive,
43357- &isdn_x25iface_connect_ind,
43358- &isdn_x25iface_disconn_ind
43359+ .proto_new = &isdn_x25iface_proto_new,
43360+ .proto_del = &isdn_x25iface_proto_del,
43361+ .restart = &isdn_x25iface_proto_restart,
43362+ .close = &isdn_x25iface_proto_close,
43363+ .encap_and_xmit = &isdn_x25iface_xmit,
43364+ .data_ind = &isdn_x25iface_receive,
43365+ .connect_ind = &isdn_x25iface_connect_ind,
43366+ .disconn_ind = &isdn_x25iface_disconn_ind
43367 };
43368
43369 /* error message helper function */
43370diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
43371index 53d487f..f020f41 100644
43372--- a/drivers/isdn/icn/icn.c
43373+++ b/drivers/isdn/icn/icn.c
43374@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
43375 if (count > len)
43376 count = len;
43377 if (user) {
43378- if (copy_from_user(msg, buf, count))
43379+ if (count > sizeof msg || copy_from_user(msg, buf, count))
43380 return -EFAULT;
43381 } else
43382 memcpy(msg, buf, count);
43383diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
43384index a4f05c5..1433bc5 100644
43385--- a/drivers/isdn/mISDN/dsp_cmx.c
43386+++ b/drivers/isdn/mISDN/dsp_cmx.c
43387@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
43388 static u16 dsp_count; /* last sample count */
43389 static int dsp_count_valid; /* if we have last sample count */
43390
43391-void
43392+void __intentional_overflow(-1)
43393 dsp_cmx_send(void *arg)
43394 {
43395 struct dsp_conf *conf;
43396diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
43397index d93e245..e7ece6b 100644
43398--- a/drivers/leds/leds-clevo-mail.c
43399+++ b/drivers/leds/leds-clevo-mail.c
43400@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
43401 * detected as working, but in reality it is not) as low as
43402 * possible.
43403 */
43404-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
43405+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
43406 {
43407 .callback = clevo_mail_led_dmi_callback,
43408 .ident = "Clevo D410J",
43409diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
43410index 5b8f938..b73d657 100644
43411--- a/drivers/leds/leds-ss4200.c
43412+++ b/drivers/leds/leds-ss4200.c
43413@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
43414 * detected as working, but in reality it is not) as low as
43415 * possible.
43416 */
43417-static struct dmi_system_id nas_led_whitelist[] __initdata = {
43418+static struct dmi_system_id nas_led_whitelist[] __initconst = {
43419 {
43420 .callback = ss4200_led_dmi_callback,
43421 .ident = "Intel SS4200-E",
43422diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
43423index 0bf1e4e..b4bf44e 100644
43424--- a/drivers/lguest/core.c
43425+++ b/drivers/lguest/core.c
43426@@ -97,9 +97,17 @@ static __init int map_switcher(void)
43427 * The end address needs +1 because __get_vm_area allocates an
43428 * extra guard page, so we need space for that.
43429 */
43430+
43431+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
43432+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43433+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
43434+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43435+#else
43436 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
43437 VM_ALLOC, switcher_addr, switcher_addr
43438 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
43439+#endif
43440+
43441 if (!switcher_vma) {
43442 err = -ENOMEM;
43443 printk("lguest: could not map switcher pages high\n");
43444@@ -124,7 +132,7 @@ static __init int map_switcher(void)
43445 * Now the Switcher is mapped at the right address, we can't fail!
43446 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
43447 */
43448- memcpy(switcher_vma->addr, start_switcher_text,
43449+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
43450 end_switcher_text - start_switcher_text);
43451
43452 printk(KERN_INFO "lguest: mapped switcher at %p\n",
43453diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
43454index bfb39bb..08a603b 100644
43455--- a/drivers/lguest/page_tables.c
43456+++ b/drivers/lguest/page_tables.c
43457@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
43458 /*:*/
43459
43460 #ifdef CONFIG_X86_PAE
43461-static void release_pmd(pmd_t *spmd)
43462+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
43463 {
43464 /* If the entry's not present, there's nothing to release. */
43465 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
43466diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
43467index 922a1ac..9dd0c2a 100644
43468--- a/drivers/lguest/x86/core.c
43469+++ b/drivers/lguest/x86/core.c
43470@@ -59,7 +59,7 @@ static struct {
43471 /* Offset from where switcher.S was compiled to where we've copied it */
43472 static unsigned long switcher_offset(void)
43473 {
43474- return switcher_addr - (unsigned long)start_switcher_text;
43475+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
43476 }
43477
43478 /* This cpu's struct lguest_pages (after the Switcher text page) */
43479@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
43480 * These copies are pretty cheap, so we do them unconditionally: */
43481 /* Save the current Host top-level page directory.
43482 */
43483+
43484+#ifdef CONFIG_PAX_PER_CPU_PGD
43485+ pages->state.host_cr3 = read_cr3();
43486+#else
43487 pages->state.host_cr3 = __pa(current->mm->pgd);
43488+#endif
43489+
43490 /*
43491 * Set up the Guest's page tables to see this CPU's pages (and no
43492 * other CPU's pages).
43493@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
43494 * compiled-in switcher code and the high-mapped copy we just made.
43495 */
43496 for (i = 0; i < IDT_ENTRIES; i++)
43497- default_idt_entries[i] += switcher_offset();
43498+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
43499
43500 /*
43501 * Set up the Switcher's per-cpu areas.
43502@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
43503 * it will be undisturbed when we switch. To change %cs and jump we
43504 * need this structure to feed to Intel's "lcall" instruction.
43505 */
43506- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
43507+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
43508 lguest_entry.segment = LGUEST_CS;
43509
43510 /*
43511diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
43512index 40634b0..4f5855e 100644
43513--- a/drivers/lguest/x86/switcher_32.S
43514+++ b/drivers/lguest/x86/switcher_32.S
43515@@ -87,6 +87,7 @@
43516 #include <asm/page.h>
43517 #include <asm/segment.h>
43518 #include <asm/lguest.h>
43519+#include <asm/processor-flags.h>
43520
43521 // We mark the start of the code to copy
43522 // It's placed in .text tho it's never run here
43523@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
43524 // Changes type when we load it: damn Intel!
43525 // For after we switch over our page tables
43526 // That entry will be read-only: we'd crash.
43527+
43528+#ifdef CONFIG_PAX_KERNEXEC
43529+ mov %cr0, %edx
43530+ xor $X86_CR0_WP, %edx
43531+ mov %edx, %cr0
43532+#endif
43533+
43534 movl $(GDT_ENTRY_TSS*8), %edx
43535 ltr %dx
43536
43537@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
43538 // Let's clear it again for our return.
43539 // The GDT descriptor of the Host
43540 // Points to the table after two "size" bytes
43541- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
43542+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
43543 // Clear "used" from type field (byte 5, bit 2)
43544- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
43545+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
43546+
43547+#ifdef CONFIG_PAX_KERNEXEC
43548+ mov %cr0, %eax
43549+ xor $X86_CR0_WP, %eax
43550+ mov %eax, %cr0
43551+#endif
43552
43553 // Once our page table's switched, the Guest is live!
43554 // The Host fades as we run this final step.
43555@@ -295,13 +309,12 @@ deliver_to_host:
43556 // I consulted gcc, and it gave
43557 // These instructions, which I gladly credit:
43558 leal (%edx,%ebx,8), %eax
43559- movzwl (%eax),%edx
43560- movl 4(%eax), %eax
43561- xorw %ax, %ax
43562- orl %eax, %edx
43563+ movl 4(%eax), %edx
43564+ movw (%eax), %dx
43565 // Now the address of the handler's in %edx
43566 // We call it now: its "iret" drops us home.
43567- jmp *%edx
43568+ ljmp $__KERNEL_CS, $1f
43569+1: jmp *%edx
43570
43571 // Every interrupt can come to us here
43572 // But we must truly tell each apart.
43573diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
43574index 9762f1b..3e79734 100644
43575--- a/drivers/md/bcache/closure.h
43576+++ b/drivers/md/bcache/closure.h
43577@@ -483,7 +483,7 @@ static inline void closure_queue(struct closure *cl)
43578 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
43579 struct workqueue_struct *wq)
43580 {
43581- BUG_ON(object_is_on_stack(cl));
43582+ BUG_ON(object_starts_on_stack(cl));
43583 closure_set_ip(cl);
43584 cl->fn = fn;
43585 cl->wq = wq;
43586diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
43587index 12dc29b..1596277 100644
43588--- a/drivers/md/bitmap.c
43589+++ b/drivers/md/bitmap.c
43590@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
43591 chunk_kb ? "KB" : "B");
43592 if (bitmap->storage.file) {
43593 seq_printf(seq, ", file: ");
43594- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
43595+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
43596 }
43597
43598 seq_printf(seq, "\n");
43599diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
43600index 5152142..623d141 100644
43601--- a/drivers/md/dm-ioctl.c
43602+++ b/drivers/md/dm-ioctl.c
43603@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
43604 cmd == DM_LIST_VERSIONS_CMD)
43605 return 0;
43606
43607- if ((cmd == DM_DEV_CREATE_CMD)) {
43608+ if (cmd == DM_DEV_CREATE_CMD) {
43609 if (!*param->name) {
43610 DMWARN("name not supplied when creating device");
43611 return -EINVAL;
43612diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
43613index 9584443..9fc9ac9 100644
43614--- a/drivers/md/dm-raid1.c
43615+++ b/drivers/md/dm-raid1.c
43616@@ -40,7 +40,7 @@ enum dm_raid1_error {
43617
43618 struct mirror {
43619 struct mirror_set *ms;
43620- atomic_t error_count;
43621+ atomic_unchecked_t error_count;
43622 unsigned long error_type;
43623 struct dm_dev *dev;
43624 sector_t offset;
43625@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
43626 struct mirror *m;
43627
43628 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
43629- if (!atomic_read(&m->error_count))
43630+ if (!atomic_read_unchecked(&m->error_count))
43631 return m;
43632
43633 return NULL;
43634@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
43635 * simple way to tell if a device has encountered
43636 * errors.
43637 */
43638- atomic_inc(&m->error_count);
43639+ atomic_inc_unchecked(&m->error_count);
43640
43641 if (test_and_set_bit(error_type, &m->error_type))
43642 return;
43643@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
43644 struct mirror *m = get_default_mirror(ms);
43645
43646 do {
43647- if (likely(!atomic_read(&m->error_count)))
43648+ if (likely(!atomic_read_unchecked(&m->error_count)))
43649 return m;
43650
43651 if (m-- == ms->mirror)
43652@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
43653 {
43654 struct mirror *default_mirror = get_default_mirror(m->ms);
43655
43656- return !atomic_read(&default_mirror->error_count);
43657+ return !atomic_read_unchecked(&default_mirror->error_count);
43658 }
43659
43660 static int mirror_available(struct mirror_set *ms, struct bio *bio)
43661@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
43662 */
43663 if (likely(region_in_sync(ms, region, 1)))
43664 m = choose_mirror(ms, bio->bi_sector);
43665- else if (m && atomic_read(&m->error_count))
43666+ else if (m && atomic_read_unchecked(&m->error_count))
43667 m = NULL;
43668
43669 if (likely(m))
43670@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
43671 }
43672
43673 ms->mirror[mirror].ms = ms;
43674- atomic_set(&(ms->mirror[mirror].error_count), 0);
43675+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
43676 ms->mirror[mirror].error_type = 0;
43677 ms->mirror[mirror].offset = offset;
43678
43679@@ -1339,7 +1339,7 @@ static void mirror_resume(struct dm_target *ti)
43680 */
43681 static char device_status_char(struct mirror *m)
43682 {
43683- if (!atomic_read(&(m->error_count)))
43684+ if (!atomic_read_unchecked(&(m->error_count)))
43685 return 'A';
43686
43687 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
43688diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
43689index 28a9012..9c0f6a5 100644
43690--- a/drivers/md/dm-stats.c
43691+++ b/drivers/md/dm-stats.c
43692@@ -382,7 +382,7 @@ do_sync_free:
43693 synchronize_rcu_expedited();
43694 dm_stat_free(&s->rcu_head);
43695 } else {
43696- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
43697+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
43698 call_rcu(&s->rcu_head, dm_stat_free);
43699 }
43700 return 0;
43701@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
43702 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
43703 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
43704 ));
43705- ACCESS_ONCE(last->last_sector) = end_sector;
43706- ACCESS_ONCE(last->last_rw) = bi_rw;
43707+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
43708+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
43709 }
43710
43711 rcu_read_lock();
43712diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
43713index 73c1712..7347292 100644
43714--- a/drivers/md/dm-stripe.c
43715+++ b/drivers/md/dm-stripe.c
43716@@ -21,7 +21,7 @@ struct stripe {
43717 struct dm_dev *dev;
43718 sector_t physical_start;
43719
43720- atomic_t error_count;
43721+ atomic_unchecked_t error_count;
43722 };
43723
43724 struct stripe_c {
43725@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
43726 kfree(sc);
43727 return r;
43728 }
43729- atomic_set(&(sc->stripe[i].error_count), 0);
43730+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
43731 }
43732
43733 ti->private = sc;
43734@@ -327,7 +327,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
43735 DMEMIT("%d ", sc->stripes);
43736 for (i = 0; i < sc->stripes; i++) {
43737 DMEMIT("%s ", sc->stripe[i].dev->name);
43738- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
43739+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
43740 'D' : 'A';
43741 }
43742 buffer[i] = '\0';
43743@@ -372,8 +372,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
43744 */
43745 for (i = 0; i < sc->stripes; i++)
43746 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
43747- atomic_inc(&(sc->stripe[i].error_count));
43748- if (atomic_read(&(sc->stripe[i].error_count)) <
43749+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
43750+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
43751 DM_IO_ERROR_THRESHOLD)
43752 schedule_work(&sc->trigger_event);
43753 }
43754diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
43755index 3ba6a38..b0fa9b0 100644
43756--- a/drivers/md/dm-table.c
43757+++ b/drivers/md/dm-table.c
43758@@ -291,7 +291,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
43759 static int open_dev(struct dm_dev_internal *d, dev_t dev,
43760 struct mapped_device *md)
43761 {
43762- static char *_claim_ptr = "I belong to device-mapper";
43763+ static char _claim_ptr[] = "I belong to device-mapper";
43764 struct block_device *bdev;
43765
43766 int r;
43767@@ -359,7 +359,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
43768 if (!dev_size)
43769 return 0;
43770
43771- if ((start >= dev_size) || (start + len > dev_size)) {
43772+ if ((start >= dev_size) || (len > dev_size - start)) {
43773 DMWARN("%s: %s too small for target: "
43774 "start=%llu, len=%llu, dev_size=%llu",
43775 dm_device_name(ti->table->md), bdevname(bdev, b),
43776diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
43777index 3bb4506..56e20cc 100644
43778--- a/drivers/md/dm-thin-metadata.c
43779+++ b/drivers/md/dm-thin-metadata.c
43780@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
43781 {
43782 pmd->info.tm = pmd->tm;
43783 pmd->info.levels = 2;
43784- pmd->info.value_type.context = pmd->data_sm;
43785+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
43786 pmd->info.value_type.size = sizeof(__le64);
43787 pmd->info.value_type.inc = data_block_inc;
43788 pmd->info.value_type.dec = data_block_dec;
43789@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
43790
43791 pmd->bl_info.tm = pmd->tm;
43792 pmd->bl_info.levels = 1;
43793- pmd->bl_info.value_type.context = pmd->data_sm;
43794+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
43795 pmd->bl_info.value_type.size = sizeof(__le64);
43796 pmd->bl_info.value_type.inc = data_block_inc;
43797 pmd->bl_info.value_type.dec = data_block_dec;
43798diff --git a/drivers/md/dm.c b/drivers/md/dm.c
43799index b49c762..c9503cf 100644
43800--- a/drivers/md/dm.c
43801+++ b/drivers/md/dm.c
43802@@ -185,9 +185,9 @@ struct mapped_device {
43803 /*
43804 * Event handling.
43805 */
43806- atomic_t event_nr;
43807+ atomic_unchecked_t event_nr;
43808 wait_queue_head_t eventq;
43809- atomic_t uevent_seq;
43810+ atomic_unchecked_t uevent_seq;
43811 struct list_head uevent_list;
43812 spinlock_t uevent_lock; /* Protect access to uevent_list */
43813
43814@@ -2021,8 +2021,8 @@ static struct mapped_device *alloc_dev(int minor)
43815 spin_lock_init(&md->deferred_lock);
43816 atomic_set(&md->holders, 1);
43817 atomic_set(&md->open_count, 0);
43818- atomic_set(&md->event_nr, 0);
43819- atomic_set(&md->uevent_seq, 0);
43820+ atomic_set_unchecked(&md->event_nr, 0);
43821+ atomic_set_unchecked(&md->uevent_seq, 0);
43822 INIT_LIST_HEAD(&md->uevent_list);
43823 spin_lock_init(&md->uevent_lock);
43824
43825@@ -2176,7 +2176,7 @@ static void event_callback(void *context)
43826
43827 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
43828
43829- atomic_inc(&md->event_nr);
43830+ atomic_inc_unchecked(&md->event_nr);
43831 wake_up(&md->eventq);
43832 }
43833
43834@@ -2869,18 +2869,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
43835
43836 uint32_t dm_next_uevent_seq(struct mapped_device *md)
43837 {
43838- return atomic_add_return(1, &md->uevent_seq);
43839+ return atomic_add_return_unchecked(1, &md->uevent_seq);
43840 }
43841
43842 uint32_t dm_get_event_nr(struct mapped_device *md)
43843 {
43844- return atomic_read(&md->event_nr);
43845+ return atomic_read_unchecked(&md->event_nr);
43846 }
43847
43848 int dm_wait_event(struct mapped_device *md, int event_nr)
43849 {
43850 return wait_event_interruptible(md->eventq,
43851- (event_nr != atomic_read(&md->event_nr)));
43852+ (event_nr != atomic_read_unchecked(&md->event_nr)));
43853 }
43854
43855 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
43856diff --git a/drivers/md/md.c b/drivers/md/md.c
43857index 369d919..ba7049c 100644
43858--- a/drivers/md/md.c
43859+++ b/drivers/md/md.c
43860@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
43861 * start build, activate spare
43862 */
43863 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
43864-static atomic_t md_event_count;
43865+static atomic_unchecked_t md_event_count;
43866 void md_new_event(struct mddev *mddev)
43867 {
43868- atomic_inc(&md_event_count);
43869+ atomic_inc_unchecked(&md_event_count);
43870 wake_up(&md_event_waiters);
43871 }
43872 EXPORT_SYMBOL_GPL(md_new_event);
43873@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
43874 */
43875 static void md_new_event_inintr(struct mddev *mddev)
43876 {
43877- atomic_inc(&md_event_count);
43878+ atomic_inc_unchecked(&md_event_count);
43879 wake_up(&md_event_waiters);
43880 }
43881
43882@@ -1463,7 +1463,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
43883 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
43884 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
43885 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
43886- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
43887+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
43888
43889 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
43890 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
43891@@ -1710,7 +1710,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
43892 else
43893 sb->resync_offset = cpu_to_le64(0);
43894
43895- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
43896+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
43897
43898 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
43899 sb->size = cpu_to_le64(mddev->dev_sectors);
43900@@ -2715,7 +2715,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
43901 static ssize_t
43902 errors_show(struct md_rdev *rdev, char *page)
43903 {
43904- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
43905+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
43906 }
43907
43908 static ssize_t
43909@@ -2724,7 +2724,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
43910 char *e;
43911 unsigned long n = simple_strtoul(buf, &e, 10);
43912 if (*buf && (*e == 0 || *e == '\n')) {
43913- atomic_set(&rdev->corrected_errors, n);
43914+ atomic_set_unchecked(&rdev->corrected_errors, n);
43915 return len;
43916 }
43917 return -EINVAL;
43918@@ -3173,8 +3173,8 @@ int md_rdev_init(struct md_rdev *rdev)
43919 rdev->sb_loaded = 0;
43920 rdev->bb_page = NULL;
43921 atomic_set(&rdev->nr_pending, 0);
43922- atomic_set(&rdev->read_errors, 0);
43923- atomic_set(&rdev->corrected_errors, 0);
43924+ atomic_set_unchecked(&rdev->read_errors, 0);
43925+ atomic_set_unchecked(&rdev->corrected_errors, 0);
43926
43927 INIT_LIST_HEAD(&rdev->same_set);
43928 init_waitqueue_head(&rdev->blocked_wait);
43929@@ -7038,7 +7038,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
43930
43931 spin_unlock(&pers_lock);
43932 seq_printf(seq, "\n");
43933- seq->poll_event = atomic_read(&md_event_count);
43934+ seq->poll_event = atomic_read_unchecked(&md_event_count);
43935 return 0;
43936 }
43937 if (v == (void*)2) {
43938@@ -7141,7 +7141,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
43939 return error;
43940
43941 seq = file->private_data;
43942- seq->poll_event = atomic_read(&md_event_count);
43943+ seq->poll_event = atomic_read_unchecked(&md_event_count);
43944 return error;
43945 }
43946
43947@@ -7155,7 +7155,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
43948 /* always allow read */
43949 mask = POLLIN | POLLRDNORM;
43950
43951- if (seq->poll_event != atomic_read(&md_event_count))
43952+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
43953 mask |= POLLERR | POLLPRI;
43954 return mask;
43955 }
43956@@ -7199,7 +7199,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
43957 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
43958 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
43959 (int)part_stat_read(&disk->part0, sectors[1]) -
43960- atomic_read(&disk->sync_io);
43961+ atomic_read_unchecked(&disk->sync_io);
43962 /* sync IO will cause sync_io to increase before the disk_stats
43963 * as sync_io is counted when a request starts, and
43964 * disk_stats is counted when it completes.
43965diff --git a/drivers/md/md.h b/drivers/md/md.h
43966index 0095ec8..c89277a 100644
43967--- a/drivers/md/md.h
43968+++ b/drivers/md/md.h
43969@@ -94,13 +94,13 @@ struct md_rdev {
43970 * only maintained for arrays that
43971 * support hot removal
43972 */
43973- atomic_t read_errors; /* number of consecutive read errors that
43974+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
43975 * we have tried to ignore.
43976 */
43977 struct timespec last_read_error; /* monotonic time since our
43978 * last read error
43979 */
43980- atomic_t corrected_errors; /* number of corrected read errors,
43981+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
43982 * for reporting to userspace and storing
43983 * in superblock.
43984 */
43985@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
43986
43987 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
43988 {
43989- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
43990+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
43991 }
43992
43993 struct md_personality
43994diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
43995index 3e6d115..ffecdeb 100644
43996--- a/drivers/md/persistent-data/dm-space-map.h
43997+++ b/drivers/md/persistent-data/dm-space-map.h
43998@@ -71,6 +71,7 @@ struct dm_space_map {
43999 dm_sm_threshold_fn fn,
44000 void *context);
44001 };
44002+typedef struct dm_space_map __no_const dm_space_map_no_const;
44003
44004 /*----------------------------------------------------------------*/
44005
44006diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44007index 63b2e8d..225f16b 100644
44008--- a/drivers/md/raid1.c
44009+++ b/drivers/md/raid1.c
44010@@ -1921,7 +1921,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44011 if (r1_sync_page_io(rdev, sect, s,
44012 bio->bi_io_vec[idx].bv_page,
44013 READ) != 0)
44014- atomic_add(s, &rdev->corrected_errors);
44015+ atomic_add_unchecked(s, &rdev->corrected_errors);
44016 }
44017 sectors -= s;
44018 sect += s;
44019@@ -2155,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44020 test_bit(In_sync, &rdev->flags)) {
44021 if (r1_sync_page_io(rdev, sect, s,
44022 conf->tmppage, READ)) {
44023- atomic_add(s, &rdev->corrected_errors);
44024+ atomic_add_unchecked(s, &rdev->corrected_errors);
44025 printk(KERN_INFO
44026 "md/raid1:%s: read error corrected "
44027 "(%d sectors at %llu on %s)\n",
44028diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44029index 06eeb99..770613e 100644
44030--- a/drivers/md/raid10.c
44031+++ b/drivers/md/raid10.c
44032@@ -1963,7 +1963,7 @@ static void end_sync_read(struct bio *bio, int error)
44033 /* The write handler will notice the lack of
44034 * R10BIO_Uptodate and record any errors etc
44035 */
44036- atomic_add(r10_bio->sectors,
44037+ atomic_add_unchecked(r10_bio->sectors,
44038 &conf->mirrors[d].rdev->corrected_errors);
44039
44040 /* for reconstruct, we always reschedule after a read.
44041@@ -2321,7 +2321,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44042 {
44043 struct timespec cur_time_mon;
44044 unsigned long hours_since_last;
44045- unsigned int read_errors = atomic_read(&rdev->read_errors);
44046+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44047
44048 ktime_get_ts(&cur_time_mon);
44049
44050@@ -2343,9 +2343,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44051 * overflowing the shift of read_errors by hours_since_last.
44052 */
44053 if (hours_since_last >= 8 * sizeof(read_errors))
44054- atomic_set(&rdev->read_errors, 0);
44055+ atomic_set_unchecked(&rdev->read_errors, 0);
44056 else
44057- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44058+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44059 }
44060
44061 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44062@@ -2399,8 +2399,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44063 return;
44064
44065 check_decay_read_errors(mddev, rdev);
44066- atomic_inc(&rdev->read_errors);
44067- if (atomic_read(&rdev->read_errors) > max_read_errors) {
44068+ atomic_inc_unchecked(&rdev->read_errors);
44069+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44070 char b[BDEVNAME_SIZE];
44071 bdevname(rdev->bdev, b);
44072
44073@@ -2408,7 +2408,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44074 "md/raid10:%s: %s: Raid device exceeded "
44075 "read_error threshold [cur %d:max %d]\n",
44076 mdname(mddev), b,
44077- atomic_read(&rdev->read_errors), max_read_errors);
44078+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44079 printk(KERN_NOTICE
44080 "md/raid10:%s: %s: Failing raid device\n",
44081 mdname(mddev), b);
44082@@ -2563,7 +2563,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44083 sect +
44084 choose_data_offset(r10_bio, rdev)),
44085 bdevname(rdev->bdev, b));
44086- atomic_add(s, &rdev->corrected_errors);
44087+ atomic_add_unchecked(s, &rdev->corrected_errors);
44088 }
44089
44090 rdev_dec_pending(rdev, mddev);
44091diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44092index 48cdec8..c7726b1 100644
44093--- a/drivers/md/raid5.c
44094+++ b/drivers/md/raid5.c
44095@@ -1991,21 +1991,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44096 mdname(conf->mddev), STRIPE_SECTORS,
44097 (unsigned long long)s,
44098 bdevname(rdev->bdev, b));
44099- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44100+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44101 clear_bit(R5_ReadError, &sh->dev[i].flags);
44102 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44103 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44104 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44105
44106- if (atomic_read(&rdev->read_errors))
44107- atomic_set(&rdev->read_errors, 0);
44108+ if (atomic_read_unchecked(&rdev->read_errors))
44109+ atomic_set_unchecked(&rdev->read_errors, 0);
44110 } else {
44111 const char *bdn = bdevname(rdev->bdev, b);
44112 int retry = 0;
44113 int set_bad = 0;
44114
44115 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44116- atomic_inc(&rdev->read_errors);
44117+ atomic_inc_unchecked(&rdev->read_errors);
44118 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44119 printk_ratelimited(
44120 KERN_WARNING
44121@@ -2033,7 +2033,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44122 mdname(conf->mddev),
44123 (unsigned long long)s,
44124 bdn);
44125- } else if (atomic_read(&rdev->read_errors)
44126+ } else if (atomic_read_unchecked(&rdev->read_errors)
44127 > conf->max_nr_stripes)
44128 printk(KERN_WARNING
44129 "md/raid:%s: Too many read errors, failing device %s.\n",
44130diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44131index 983db75..ef9248c 100644
44132--- a/drivers/media/dvb-core/dvbdev.c
44133+++ b/drivers/media/dvb-core/dvbdev.c
44134@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44135 const struct dvb_device *template, void *priv, int type)
44136 {
44137 struct dvb_device *dvbdev;
44138- struct file_operations *dvbdevfops;
44139+ file_operations_no_const *dvbdevfops;
44140 struct device *clsdev;
44141 int minor;
44142 int id;
44143diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44144index 9b6c3bb..baeb5c7 100644
44145--- a/drivers/media/dvb-frontends/dib3000.h
44146+++ b/drivers/media/dvb-frontends/dib3000.h
44147@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
44148 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
44149 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
44150 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
44151-};
44152+} __no_const;
44153
44154 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
44155 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
44156diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
44157index ed8cb90..5ef7f79 100644
44158--- a/drivers/media/pci/cx88/cx88-video.c
44159+++ b/drivers/media/pci/cx88/cx88-video.c
44160@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
44161
44162 /* ------------------------------------------------------------------ */
44163
44164-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44165-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44166-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44167+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44168+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44169+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44170
44171 module_param_array(video_nr, int, NULL, 0444);
44172 module_param_array(vbi_nr, int, NULL, 0444);
44173diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
44174index 802642d..5534900 100644
44175--- a/drivers/media/pci/ivtv/ivtv-driver.c
44176+++ b/drivers/media/pci/ivtv/ivtv-driver.c
44177@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
44178 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
44179
44180 /* ivtv instance counter */
44181-static atomic_t ivtv_instance = ATOMIC_INIT(0);
44182+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
44183
44184 /* Parameter declarations */
44185 static int cardtype[IVTV_MAX_CARDS];
44186diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
44187index dfd0a21..6bbb465 100644
44188--- a/drivers/media/platform/omap/omap_vout.c
44189+++ b/drivers/media/platform/omap/omap_vout.c
44190@@ -63,7 +63,6 @@ enum omap_vout_channels {
44191 OMAP_VIDEO2,
44192 };
44193
44194-static struct videobuf_queue_ops video_vbq_ops;
44195 /* Variables configurable through module params*/
44196 static u32 video1_numbuffers = 3;
44197 static u32 video2_numbuffers = 3;
44198@@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *file)
44199 {
44200 struct videobuf_queue *q;
44201 struct omap_vout_device *vout = NULL;
44202+ static struct videobuf_queue_ops video_vbq_ops = {
44203+ .buf_setup = omap_vout_buffer_setup,
44204+ .buf_prepare = omap_vout_buffer_prepare,
44205+ .buf_release = omap_vout_buffer_release,
44206+ .buf_queue = omap_vout_buffer_queue,
44207+ };
44208
44209 vout = video_drvdata(file);
44210 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
44211@@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *file)
44212 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
44213
44214 q = &vout->vbq;
44215- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
44216- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
44217- video_vbq_ops.buf_release = omap_vout_buffer_release;
44218- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
44219 spin_lock_init(&vout->vbq_lock);
44220
44221 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
44222diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
44223index fb2acc5..a2fcbdc4 100644
44224--- a/drivers/media/platform/s5p-tv/mixer.h
44225+++ b/drivers/media/platform/s5p-tv/mixer.h
44226@@ -156,7 +156,7 @@ struct mxr_layer {
44227 /** layer index (unique identifier) */
44228 int idx;
44229 /** callbacks for layer methods */
44230- struct mxr_layer_ops ops;
44231+ struct mxr_layer_ops *ops;
44232 /** format array */
44233 const struct mxr_format **fmt_array;
44234 /** size of format array */
44235diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44236index 74344c7..a39e70e 100644
44237--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44238+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
44239@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
44240 {
44241 struct mxr_layer *layer;
44242 int ret;
44243- struct mxr_layer_ops ops = {
44244+ static struct mxr_layer_ops ops = {
44245 .release = mxr_graph_layer_release,
44246 .buffer_set = mxr_graph_buffer_set,
44247 .stream_set = mxr_graph_stream_set,
44248diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
44249index b713403..53cb5ad 100644
44250--- a/drivers/media/platform/s5p-tv/mixer_reg.c
44251+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
44252@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
44253 layer->update_buf = next;
44254 }
44255
44256- layer->ops.buffer_set(layer, layer->update_buf);
44257+ layer->ops->buffer_set(layer, layer->update_buf);
44258
44259 if (done && done != layer->shadow_buf)
44260 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
44261diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
44262index 81b97db..b089ccd 100644
44263--- a/drivers/media/platform/s5p-tv/mixer_video.c
44264+++ b/drivers/media/platform/s5p-tv/mixer_video.c
44265@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
44266 layer->geo.src.height = layer->geo.src.full_height;
44267
44268 mxr_geometry_dump(mdev, &layer->geo);
44269- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44270+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44271 mxr_geometry_dump(mdev, &layer->geo);
44272 }
44273
44274@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
44275 layer->geo.dst.full_width = mbus_fmt.width;
44276 layer->geo.dst.full_height = mbus_fmt.height;
44277 layer->geo.dst.field = mbus_fmt.field;
44278- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44279+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
44280
44281 mxr_geometry_dump(mdev, &layer->geo);
44282 }
44283@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
44284 /* set source size to highest accepted value */
44285 geo->src.full_width = max(geo->dst.full_width, pix->width);
44286 geo->src.full_height = max(geo->dst.full_height, pix->height);
44287- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44288+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44289 mxr_geometry_dump(mdev, &layer->geo);
44290 /* set cropping to total visible screen */
44291 geo->src.width = pix->width;
44292@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
44293 geo->src.x_offset = 0;
44294 geo->src.y_offset = 0;
44295 /* assure consistency of geometry */
44296- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44297+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
44298 mxr_geometry_dump(mdev, &layer->geo);
44299 /* set full size to lowest possible value */
44300 geo->src.full_width = 0;
44301 geo->src.full_height = 0;
44302- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44303+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
44304 mxr_geometry_dump(mdev, &layer->geo);
44305
44306 /* returning results */
44307@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
44308 target->width = s->r.width;
44309 target->height = s->r.height;
44310
44311- layer->ops.fix_geometry(layer, stage, s->flags);
44312+ layer->ops->fix_geometry(layer, stage, s->flags);
44313
44314 /* retrieve update selection rectangle */
44315 res.left = target->x_offset;
44316@@ -955,13 +955,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
44317 mxr_output_get(mdev);
44318
44319 mxr_layer_update_output(layer);
44320- layer->ops.format_set(layer);
44321+ layer->ops->format_set(layer);
44322 /* enabling layer in hardware */
44323 spin_lock_irqsave(&layer->enq_slock, flags);
44324 layer->state = MXR_LAYER_STREAMING;
44325 spin_unlock_irqrestore(&layer->enq_slock, flags);
44326
44327- layer->ops.stream_set(layer, MXR_ENABLE);
44328+ layer->ops->stream_set(layer, MXR_ENABLE);
44329 mxr_streamer_get(mdev);
44330
44331 return 0;
44332@@ -1031,7 +1031,7 @@ static int stop_streaming(struct vb2_queue *vq)
44333 spin_unlock_irqrestore(&layer->enq_slock, flags);
44334
44335 /* disabling layer in hardware */
44336- layer->ops.stream_set(layer, MXR_DISABLE);
44337+ layer->ops->stream_set(layer, MXR_DISABLE);
44338 /* remove one streamer */
44339 mxr_streamer_put(mdev);
44340 /* allow changes in output configuration */
44341@@ -1070,8 +1070,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
44342
44343 void mxr_layer_release(struct mxr_layer *layer)
44344 {
44345- if (layer->ops.release)
44346- layer->ops.release(layer);
44347+ if (layer->ops->release)
44348+ layer->ops->release(layer);
44349 }
44350
44351 void mxr_base_layer_release(struct mxr_layer *layer)
44352@@ -1097,7 +1097,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
44353
44354 layer->mdev = mdev;
44355 layer->idx = idx;
44356- layer->ops = *ops;
44357+ layer->ops = ops;
44358
44359 spin_lock_init(&layer->enq_slock);
44360 INIT_LIST_HEAD(&layer->enq_list);
44361diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
44362index c9388c4..ce71ece 100644
44363--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
44364+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
44365@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
44366 {
44367 struct mxr_layer *layer;
44368 int ret;
44369- struct mxr_layer_ops ops = {
44370+ static struct mxr_layer_ops ops = {
44371 .release = mxr_vp_layer_release,
44372 .buffer_set = mxr_vp_buffer_set,
44373 .stream_set = mxr_vp_stream_set,
44374diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
44375index 2d4e73b..8b4d5b6 100644
44376--- a/drivers/media/platform/vivi.c
44377+++ b/drivers/media/platform/vivi.c
44378@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
44379 MODULE_LICENSE("Dual BSD/GPL");
44380 MODULE_VERSION(VIVI_VERSION);
44381
44382-static unsigned video_nr = -1;
44383-module_param(video_nr, uint, 0644);
44384+static int video_nr = -1;
44385+module_param(video_nr, int, 0644);
44386 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
44387
44388 static unsigned n_devs = 1;
44389diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
44390index 545c04c..a14bded 100644
44391--- a/drivers/media/radio/radio-cadet.c
44392+++ b/drivers/media/radio/radio-cadet.c
44393@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
44394 unsigned char readbuf[RDS_BUFFER];
44395 int i = 0;
44396
44397+ if (count > RDS_BUFFER)
44398+ return -EFAULT;
44399 mutex_lock(&dev->lock);
44400 if (dev->rdsstat == 0)
44401 cadet_start_rds(dev);
44402@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
44403 while (i < count && dev->rdsin != dev->rdsout)
44404 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
44405
44406- if (i && copy_to_user(data, readbuf, i))
44407+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
44408 i = -EFAULT;
44409 unlock:
44410 mutex_unlock(&dev->lock);
44411diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
44412index 5236035..c622c74 100644
44413--- a/drivers/media/radio/radio-maxiradio.c
44414+++ b/drivers/media/radio/radio-maxiradio.c
44415@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
44416 /* TEA5757 pin mappings */
44417 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
44418
44419-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
44420+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
44421
44422 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
44423 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
44424diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
44425index 050b3bb..79f62b9 100644
44426--- a/drivers/media/radio/radio-shark.c
44427+++ b/drivers/media/radio/radio-shark.c
44428@@ -79,7 +79,7 @@ struct shark_device {
44429 u32 last_val;
44430 };
44431
44432-static atomic_t shark_instance = ATOMIC_INIT(0);
44433+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
44434
44435 static void shark_write_val(struct snd_tea575x *tea, u32 val)
44436 {
44437diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
44438index 8654e0d..0608a64 100644
44439--- a/drivers/media/radio/radio-shark2.c
44440+++ b/drivers/media/radio/radio-shark2.c
44441@@ -74,7 +74,7 @@ struct shark_device {
44442 u8 *transfer_buffer;
44443 };
44444
44445-static atomic_t shark_instance = ATOMIC_INIT(0);
44446+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
44447
44448 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
44449 {
44450diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
44451index 2fd9009..278cc1e 100644
44452--- a/drivers/media/radio/radio-si476x.c
44453+++ b/drivers/media/radio/radio-si476x.c
44454@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
44455 struct si476x_radio *radio;
44456 struct v4l2_ctrl *ctrl;
44457
44458- static atomic_t instance = ATOMIC_INIT(0);
44459+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
44460
44461 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
44462 if (!radio)
44463diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
44464index 46da365..3ba4206 100644
44465--- a/drivers/media/rc/rc-main.c
44466+++ b/drivers/media/rc/rc-main.c
44467@@ -1065,7 +1065,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
44468 int rc_register_device(struct rc_dev *dev)
44469 {
44470 static bool raw_init = false; /* raw decoders loaded? */
44471- static atomic_t devno = ATOMIC_INIT(0);
44472+ static atomic_unchecked_t devno = ATOMIC_INIT(0);
44473 struct rc_map *rc_map;
44474 const char *path;
44475 int rc;
44476@@ -1096,7 +1096,7 @@ int rc_register_device(struct rc_dev *dev)
44477 */
44478 mutex_lock(&dev->lock);
44479
44480- dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
44481+ dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
44482 dev_set_name(&dev->dev, "rc%ld", dev->devno);
44483 dev_set_drvdata(&dev->dev, dev);
44484 rc = device_add(&dev->dev);
44485diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
44486index 20e345d..da56fe4 100644
44487--- a/drivers/media/usb/dvb-usb/cxusb.c
44488+++ b/drivers/media/usb/dvb-usb/cxusb.c
44489@@ -1101,7 +1101,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
44490
44491 struct dib0700_adapter_state {
44492 int (*set_param_save) (struct dvb_frontend *);
44493-};
44494+} __no_const;
44495
44496 static int dib7070_set_param_override(struct dvb_frontend *fe)
44497 {
44498diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
44499index c1a63b2..dbcbfb6 100644
44500--- a/drivers/media/usb/dvb-usb/dw2102.c
44501+++ b/drivers/media/usb/dvb-usb/dw2102.c
44502@@ -121,7 +121,7 @@ struct su3000_state {
44503
44504 struct s6x0_state {
44505 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
44506-};
44507+} __no_const;
44508
44509 /* debug */
44510 static int dvb_usb_dw2102_debug;
44511diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
44512index 8f7a6a4..eb0e1d4 100644
44513--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
44514+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
44515@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
44516 __u32 reserved;
44517 };
44518
44519-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
44520+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
44521 enum v4l2_memory memory)
44522 {
44523 void __user *up_pln;
44524@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
44525 return 0;
44526 }
44527
44528-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
44529+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
44530 enum v4l2_memory memory)
44531 {
44532 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
44533@@ -425,7 +425,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
44534 * by passing a very big num_planes value */
44535 uplane = compat_alloc_user_space(num_planes *
44536 sizeof(struct v4l2_plane));
44537- kp->m.planes = uplane;
44538+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
44539
44540 while (--num_planes >= 0) {
44541 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
44542@@ -496,7 +496,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
44543 if (num_planes == 0)
44544 return 0;
44545
44546- uplane = kp->m.planes;
44547+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
44548 if (get_user(p, &up->m.planes))
44549 return -EFAULT;
44550 uplane32 = compat_ptr(p);
44551@@ -550,7 +550,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
44552 get_user(kp->capability, &up->capability) ||
44553 get_user(kp->flags, &up->flags))
44554 return -EFAULT;
44555- kp->base = compat_ptr(tmp);
44556+ kp->base = (void __force_kernel *)compat_ptr(tmp);
44557 get_v4l2_pix_format(&kp->fmt, &up->fmt);
44558 return 0;
44559 }
44560@@ -656,7 +656,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
44561 n * sizeof(struct v4l2_ext_control32)))
44562 return -EFAULT;
44563 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
44564- kp->controls = kcontrols;
44565+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
44566 while (--n >= 0) {
44567 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
44568 return -EFAULT;
44569@@ -678,7 +678,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
44570 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
44571 {
44572 struct v4l2_ext_control32 __user *ucontrols;
44573- struct v4l2_ext_control __user *kcontrols = kp->controls;
44574+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
44575 int n = kp->count;
44576 compat_caddr_t p;
44577
44578@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
44579 put_user(kp->start_block, &up->start_block) ||
44580 put_user(kp->blocks, &up->blocks) ||
44581 put_user(tmp, &up->edid) ||
44582- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
44583+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
44584 return -EFAULT;
44585 return 0;
44586 }
44587diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
44588index fb46790..ae1f8fa 100644
44589--- a/drivers/media/v4l2-core/v4l2-ctrls.c
44590+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
44591@@ -1396,8 +1396,8 @@ static int validate_new(const struct v4l2_ctrl *ctrl,
44592 return 0;
44593
44594 case V4L2_CTRL_TYPE_STRING:
44595- len = strlen(c->string);
44596- if (len < ctrl->minimum)
44597+ len = strlen_user(c->string);
44598+ if (!len || len < ctrl->minimum)
44599 return -ERANGE;
44600 if ((len - ctrl->minimum) % ctrl->step)
44601 return -ERANGE;
44602diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
44603index 02d1b63..5fd6b16 100644
44604--- a/drivers/media/v4l2-core/v4l2-device.c
44605+++ b/drivers/media/v4l2-core/v4l2-device.c
44606@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
44607 EXPORT_SYMBOL_GPL(v4l2_device_put);
44608
44609 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
44610- atomic_t *instance)
44611+ atomic_unchecked_t *instance)
44612 {
44613- int num = atomic_inc_return(instance) - 1;
44614+ int num = atomic_inc_return_unchecked(instance) - 1;
44615 int len = strlen(basename);
44616
44617 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
44618diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
44619index 68e6b5e..8eb2aec 100644
44620--- a/drivers/media/v4l2-core/v4l2-ioctl.c
44621+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
44622@@ -1939,7 +1939,8 @@ struct v4l2_ioctl_info {
44623 struct file *file, void *fh, void *p);
44624 } u;
44625 void (*debug)(const void *arg, bool write_only);
44626-};
44627+} __do_const;
44628+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
44629
44630 /* This control needs a priority check */
44631 #define INFO_FL_PRIO (1 << 0)
44632@@ -2120,7 +2121,7 @@ static long __video_do_ioctl(struct file *file,
44633 struct video_device *vfd = video_devdata(file);
44634 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
44635 bool write_only = false;
44636- struct v4l2_ioctl_info default_info;
44637+ v4l2_ioctl_info_no_const default_info;
44638 const struct v4l2_ioctl_info *info;
44639 void *fh = file->private_data;
44640 struct v4l2_fh *vfh = NULL;
44641@@ -2194,7 +2195,7 @@ done:
44642 }
44643
44644 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
44645- void * __user *user_ptr, void ***kernel_ptr)
44646+ void __user **user_ptr, void ***kernel_ptr)
44647 {
44648 int ret = 0;
44649
44650@@ -2210,7 +2211,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
44651 ret = -EINVAL;
44652 break;
44653 }
44654- *user_ptr = (void __user *)buf->m.planes;
44655+ *user_ptr = (void __force_user *)buf->m.planes;
44656 *kernel_ptr = (void *)&buf->m.planes;
44657 *array_size = sizeof(struct v4l2_plane) * buf->length;
44658 ret = 1;
44659@@ -2245,7 +2246,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
44660 ret = -EINVAL;
44661 break;
44662 }
44663- *user_ptr = (void __user *)ctrls->controls;
44664+ *user_ptr = (void __force_user *)ctrls->controls;
44665 *kernel_ptr = (void *)&ctrls->controls;
44666 *array_size = sizeof(struct v4l2_ext_control)
44667 * ctrls->count;
44668@@ -2340,7 +2341,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
44669 err = -ENOTTY;
44670
44671 if (has_array_args) {
44672- *kernel_ptr = user_ptr;
44673+ *kernel_ptr = (void __force_kernel *)user_ptr;
44674 if (copy_to_user(user_ptr, mbuf, array_size))
44675 err = -EFAULT;
44676 goto out_array_args;
44677diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
44678index 767ff4d..c69d259 100644
44679--- a/drivers/message/fusion/mptbase.c
44680+++ b/drivers/message/fusion/mptbase.c
44681@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
44682 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
44683 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
44684
44685+#ifdef CONFIG_GRKERNSEC_HIDESYM
44686+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
44687+#else
44688 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
44689 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
44690+#endif
44691+
44692 /*
44693 * Rounding UP to nearest 4-kB boundary here...
44694 */
44695@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
44696 ioc->facts.GlobalCredits);
44697
44698 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
44699+#ifdef CONFIG_GRKERNSEC_HIDESYM
44700+ NULL, NULL);
44701+#else
44702 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
44703+#endif
44704 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
44705 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
44706 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
44707diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
44708index dd239bd..689c4f7 100644
44709--- a/drivers/message/fusion/mptsas.c
44710+++ b/drivers/message/fusion/mptsas.c
44711@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
44712 return 0;
44713 }
44714
44715+static inline void
44716+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
44717+{
44718+ if (phy_info->port_details) {
44719+ phy_info->port_details->rphy = rphy;
44720+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
44721+ ioc->name, rphy));
44722+ }
44723+
44724+ if (rphy) {
44725+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
44726+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
44727+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
44728+ ioc->name, rphy, rphy->dev.release));
44729+ }
44730+}
44731+
44732 /* no mutex */
44733 static void
44734 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
44735@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
44736 return NULL;
44737 }
44738
44739-static inline void
44740-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
44741-{
44742- if (phy_info->port_details) {
44743- phy_info->port_details->rphy = rphy;
44744- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
44745- ioc->name, rphy));
44746- }
44747-
44748- if (rphy) {
44749- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
44750- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
44751- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
44752- ioc->name, rphy, rphy->dev.release));
44753- }
44754-}
44755-
44756 static inline struct sas_port *
44757 mptsas_get_port(struct mptsas_phyinfo *phy_info)
44758 {
44759diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
44760index 727819c..ad74694 100644
44761--- a/drivers/message/fusion/mptscsih.c
44762+++ b/drivers/message/fusion/mptscsih.c
44763@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
44764
44765 h = shost_priv(SChost);
44766
44767- if (h) {
44768- if (h->info_kbuf == NULL)
44769- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
44770- return h->info_kbuf;
44771- h->info_kbuf[0] = '\0';
44772+ if (!h)
44773+ return NULL;
44774
44775- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
44776- h->info_kbuf[size-1] = '\0';
44777- }
44778+ if (h->info_kbuf == NULL)
44779+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
44780+ return h->info_kbuf;
44781+ h->info_kbuf[0] = '\0';
44782+
44783+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
44784+ h->info_kbuf[size-1] = '\0';
44785
44786 return h->info_kbuf;
44787 }
44788diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
44789index b7d87cd..3fb36da 100644
44790--- a/drivers/message/i2o/i2o_proc.c
44791+++ b/drivers/message/i2o/i2o_proc.c
44792@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
44793 "Array Controller Device"
44794 };
44795
44796-static char *chtostr(char *tmp, u8 *chars, int n)
44797-{
44798- tmp[0] = 0;
44799- return strncat(tmp, (char *)chars, n);
44800-}
44801-
44802 static int i2o_report_query_status(struct seq_file *seq, int block_status,
44803 char *group)
44804 {
44805@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
44806 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
44807 {
44808 struct i2o_controller *c = (struct i2o_controller *)seq->private;
44809- static u32 work32[5];
44810- static u8 *work8 = (u8 *) work32;
44811- static u16 *work16 = (u16 *) work32;
44812+ u32 work32[5];
44813+ u8 *work8 = (u8 *) work32;
44814+ u16 *work16 = (u16 *) work32;
44815 int token;
44816 u32 hwcap;
44817
44818@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
44819 } *result;
44820
44821 i2o_exec_execute_ddm_table ddm_table;
44822- char tmp[28 + 1];
44823
44824 result = kmalloc(sizeof(*result), GFP_KERNEL);
44825 if (!result)
44826@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
44827
44828 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
44829 seq_printf(seq, "%-#8x", ddm_table.module_id);
44830- seq_printf(seq, "%-29s",
44831- chtostr(tmp, ddm_table.module_name_version, 28));
44832+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
44833 seq_printf(seq, "%9d ", ddm_table.data_size);
44834 seq_printf(seq, "%8d", ddm_table.code_size);
44835
44836@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
44837
44838 i2o_driver_result_table *result;
44839 i2o_driver_store_table *dst;
44840- char tmp[28 + 1];
44841
44842 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
44843 if (result == NULL)
44844@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
44845
44846 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
44847 seq_printf(seq, "%-#8x", dst->module_id);
44848- seq_printf(seq, "%-29s",
44849- chtostr(tmp, dst->module_name_version, 28));
44850- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
44851+ seq_printf(seq, "%-.28s", dst->module_name_version);
44852+ seq_printf(seq, "%-.8s", dst->date);
44853 seq_printf(seq, "%8d ", dst->module_size);
44854 seq_printf(seq, "%8d ", dst->mpb_size);
44855 seq_printf(seq, "0x%04x", dst->module_flags);
44856@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
44857 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
44858 {
44859 struct i2o_device *d = (struct i2o_device *)seq->private;
44860- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
44861+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
44862 // == (allow) 512d bytes (max)
44863- static u16 *work16 = (u16 *) work32;
44864+ u16 *work16 = (u16 *) work32;
44865 int token;
44866- char tmp[16 + 1];
44867
44868 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
44869
44870@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
44871 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
44872 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
44873 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
44874- seq_printf(seq, "Vendor info : %s\n",
44875- chtostr(tmp, (u8 *) (work32 + 2), 16));
44876- seq_printf(seq, "Product info : %s\n",
44877- chtostr(tmp, (u8 *) (work32 + 6), 16));
44878- seq_printf(seq, "Description : %s\n",
44879- chtostr(tmp, (u8 *) (work32 + 10), 16));
44880- seq_printf(seq, "Product rev. : %s\n",
44881- chtostr(tmp, (u8 *) (work32 + 14), 8));
44882+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
44883+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
44884+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
44885+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
44886
44887 seq_printf(seq, "Serial number : ");
44888 print_serial_number(seq, (u8 *) (work32 + 16),
44889@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
44890 u8 pad[256]; // allow up to 256 byte (max) serial number
44891 } result;
44892
44893- char tmp[24 + 1];
44894-
44895 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
44896
44897 if (token < 0) {
44898@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
44899 }
44900
44901 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
44902- seq_printf(seq, "Module name : %s\n",
44903- chtostr(tmp, result.module_name, 24));
44904- seq_printf(seq, "Module revision : %s\n",
44905- chtostr(tmp, result.module_rev, 8));
44906+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
44907+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
44908
44909 seq_printf(seq, "Serial number : ");
44910 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
44911@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
44912 u8 instance_number[4];
44913 } result;
44914
44915- char tmp[64 + 1];
44916-
44917 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
44918
44919 if (token < 0) {
44920@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
44921 return 0;
44922 }
44923
44924- seq_printf(seq, "Device name : %s\n",
44925- chtostr(tmp, result.device_name, 64));
44926- seq_printf(seq, "Service name : %s\n",
44927- chtostr(tmp, result.service_name, 64));
44928- seq_printf(seq, "Physical name : %s\n",
44929- chtostr(tmp, result.physical_location, 64));
44930- seq_printf(seq, "Instance number : %s\n",
44931- chtostr(tmp, result.instance_number, 4));
44932+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
44933+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
44934+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
44935+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
44936
44937 return 0;
44938 }
44939@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
44940 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
44941 {
44942 struct i2o_device *d = (struct i2o_device *)seq->private;
44943- static u32 work32[12];
44944- static u16 *work16 = (u16 *) work32;
44945- static u8 *work8 = (u8 *) work32;
44946+ u32 work32[12];
44947+ u16 *work16 = (u16 *) work32;
44948+ u8 *work8 = (u8 *) work32;
44949 int token;
44950
44951 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
44952diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
44953index a8c08f3..155fe3d 100644
44954--- a/drivers/message/i2o/iop.c
44955+++ b/drivers/message/i2o/iop.c
44956@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
44957
44958 spin_lock_irqsave(&c->context_list_lock, flags);
44959
44960- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
44961- atomic_inc(&c->context_list_counter);
44962+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
44963+ atomic_inc_unchecked(&c->context_list_counter);
44964
44965- entry->context = atomic_read(&c->context_list_counter);
44966+ entry->context = atomic_read_unchecked(&c->context_list_counter);
44967
44968 list_add(&entry->list, &c->context_list);
44969
44970@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
44971
44972 #if BITS_PER_LONG == 64
44973 spin_lock_init(&c->context_list_lock);
44974- atomic_set(&c->context_list_counter, 0);
44975+ atomic_set_unchecked(&c->context_list_counter, 0);
44976 INIT_LIST_HEAD(&c->context_list);
44977 #endif
44978
44979diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
44980index e33e385..28dfd23 100644
44981--- a/drivers/mfd/ab8500-debugfs.c
44982+++ b/drivers/mfd/ab8500-debugfs.c
44983@@ -100,7 +100,7 @@ static int irq_last;
44984 static u32 *irq_count;
44985 static int num_irqs;
44986
44987-static struct device_attribute **dev_attr;
44988+static device_attribute_no_const **dev_attr;
44989 static char **event_name;
44990
44991 static u8 avg_sample = SAMPLE_16;
44992diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
44993index fcbb2e9..2635e11 100644
44994--- a/drivers/mfd/janz-cmodio.c
44995+++ b/drivers/mfd/janz-cmodio.c
44996@@ -13,6 +13,7 @@
44997
44998 #include <linux/kernel.h>
44999 #include <linux/module.h>
45000+#include <linux/slab.h>
45001 #include <linux/init.h>
45002 #include <linux/pci.h>
45003 #include <linux/interrupt.h>
45004diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
45005index 176aa26..27811b2 100644
45006--- a/drivers/mfd/max8925-i2c.c
45007+++ b/drivers/mfd/max8925-i2c.c
45008@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
45009 const struct i2c_device_id *id)
45010 {
45011 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
45012- static struct max8925_chip *chip;
45013+ struct max8925_chip *chip;
45014 struct device_node *node = client->dev.of_node;
45015
45016 if (node && !pdata) {
45017diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
45018index c0f608e..286f8ec 100644
45019--- a/drivers/mfd/tps65910.c
45020+++ b/drivers/mfd/tps65910.c
45021@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
45022 struct tps65910_platform_data *pdata)
45023 {
45024 int ret = 0;
45025- static struct regmap_irq_chip *tps6591x_irqs_chip;
45026+ struct regmap_irq_chip *tps6591x_irqs_chip;
45027
45028 if (!irq) {
45029 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
45030diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
45031index 9aa6d1e..1631bfc 100644
45032--- a/drivers/mfd/twl4030-irq.c
45033+++ b/drivers/mfd/twl4030-irq.c
45034@@ -35,6 +35,7 @@
45035 #include <linux/of.h>
45036 #include <linux/irqdomain.h>
45037 #include <linux/i2c/twl.h>
45038+#include <asm/pgtable.h>
45039
45040 #include "twl-core.h"
45041
45042@@ -726,10 +727,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
45043 * Install an irq handler for each of the SIH modules;
45044 * clone dummy irq_chip since PIH can't *do* anything
45045 */
45046- twl4030_irq_chip = dummy_irq_chip;
45047- twl4030_irq_chip.name = "twl4030";
45048+ pax_open_kernel();
45049+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
45050+ *(const char **)&twl4030_irq_chip.name = "twl4030";
45051
45052- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
45053+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
45054+ pax_close_kernel();
45055
45056 for (i = irq_base; i < irq_end; i++) {
45057 irq_set_chip_and_handler(i, &twl4030_irq_chip,
45058diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
45059index 464419b..64bae8d 100644
45060--- a/drivers/misc/c2port/core.c
45061+++ b/drivers/misc/c2port/core.c
45062@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
45063 goto error_idr_alloc;
45064 c2dev->id = ret;
45065
45066- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
45067+ pax_open_kernel();
45068+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
45069+ pax_close_kernel();
45070
45071 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
45072 "c2port%d", c2dev->id);
45073diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
45074index 9c34e57..b981cda 100644
45075--- a/drivers/misc/eeprom/sunxi_sid.c
45076+++ b/drivers/misc/eeprom/sunxi_sid.c
45077@@ -127,7 +127,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
45078
45079 platform_set_drvdata(pdev, sid_data);
45080
45081- sid_bin_attr.size = sid_data->keysize;
45082+ pax_open_kernel();
45083+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
45084+ pax_close_kernel();
45085 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
45086 return -ENODEV;
45087
45088diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
45089index 36f5d52..32311c3 100644
45090--- a/drivers/misc/kgdbts.c
45091+++ b/drivers/misc/kgdbts.c
45092@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
45093 char before[BREAK_INSTR_SIZE];
45094 char after[BREAK_INSTR_SIZE];
45095
45096- probe_kernel_read(before, (char *)kgdbts_break_test,
45097+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
45098 BREAK_INSTR_SIZE);
45099 init_simple_test();
45100 ts.tst = plant_and_detach_test;
45101@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
45102 /* Activate test with initial breakpoint */
45103 if (!is_early)
45104 kgdb_breakpoint();
45105- probe_kernel_read(after, (char *)kgdbts_break_test,
45106+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
45107 BREAK_INSTR_SIZE);
45108 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
45109 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
45110diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
45111index 036effe..b3a6336 100644
45112--- a/drivers/misc/lis3lv02d/lis3lv02d.c
45113+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
45114@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
45115 * the lid is closed. This leads to interrupts as soon as a little move
45116 * is done.
45117 */
45118- atomic_inc(&lis3->count);
45119+ atomic_inc_unchecked(&lis3->count);
45120
45121 wake_up_interruptible(&lis3->misc_wait);
45122 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
45123@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
45124 if (lis3->pm_dev)
45125 pm_runtime_get_sync(lis3->pm_dev);
45126
45127- atomic_set(&lis3->count, 0);
45128+ atomic_set_unchecked(&lis3->count, 0);
45129 return 0;
45130 }
45131
45132@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
45133 add_wait_queue(&lis3->misc_wait, &wait);
45134 while (true) {
45135 set_current_state(TASK_INTERRUPTIBLE);
45136- data = atomic_xchg(&lis3->count, 0);
45137+ data = atomic_xchg_unchecked(&lis3->count, 0);
45138 if (data)
45139 break;
45140
45141@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
45142 struct lis3lv02d, miscdev);
45143
45144 poll_wait(file, &lis3->misc_wait, wait);
45145- if (atomic_read(&lis3->count))
45146+ if (atomic_read_unchecked(&lis3->count))
45147 return POLLIN | POLLRDNORM;
45148 return 0;
45149 }
45150diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
45151index c439c82..1f20f57 100644
45152--- a/drivers/misc/lis3lv02d/lis3lv02d.h
45153+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
45154@@ -297,7 +297,7 @@ struct lis3lv02d {
45155 struct input_polled_dev *idev; /* input device */
45156 struct platform_device *pdev; /* platform device */
45157 struct regulator_bulk_data regulators[2];
45158- atomic_t count; /* interrupt count after last read */
45159+ atomic_unchecked_t count; /* interrupt count after last read */
45160 union axis_conversion ac; /* hw -> logical axis */
45161 int mapped_btns[3];
45162
45163diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
45164index 2f30bad..c4c13d0 100644
45165--- a/drivers/misc/sgi-gru/gruhandles.c
45166+++ b/drivers/misc/sgi-gru/gruhandles.c
45167@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
45168 unsigned long nsec;
45169
45170 nsec = CLKS2NSEC(clks);
45171- atomic_long_inc(&mcs_op_statistics[op].count);
45172- atomic_long_add(nsec, &mcs_op_statistics[op].total);
45173+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
45174+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
45175 if (mcs_op_statistics[op].max < nsec)
45176 mcs_op_statistics[op].max = nsec;
45177 }
45178diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
45179index 4f76359..cdfcb2e 100644
45180--- a/drivers/misc/sgi-gru/gruprocfs.c
45181+++ b/drivers/misc/sgi-gru/gruprocfs.c
45182@@ -32,9 +32,9 @@
45183
45184 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
45185
45186-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
45187+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
45188 {
45189- unsigned long val = atomic_long_read(v);
45190+ unsigned long val = atomic_long_read_unchecked(v);
45191
45192 seq_printf(s, "%16lu %s\n", val, id);
45193 }
45194@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
45195
45196 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
45197 for (op = 0; op < mcsop_last; op++) {
45198- count = atomic_long_read(&mcs_op_statistics[op].count);
45199- total = atomic_long_read(&mcs_op_statistics[op].total);
45200+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
45201+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
45202 max = mcs_op_statistics[op].max;
45203 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
45204 count ? total / count : 0, max);
45205diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
45206index 5c3ce24..4915ccb 100644
45207--- a/drivers/misc/sgi-gru/grutables.h
45208+++ b/drivers/misc/sgi-gru/grutables.h
45209@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
45210 * GRU statistics.
45211 */
45212 struct gru_stats_s {
45213- atomic_long_t vdata_alloc;
45214- atomic_long_t vdata_free;
45215- atomic_long_t gts_alloc;
45216- atomic_long_t gts_free;
45217- atomic_long_t gms_alloc;
45218- atomic_long_t gms_free;
45219- atomic_long_t gts_double_allocate;
45220- atomic_long_t assign_context;
45221- atomic_long_t assign_context_failed;
45222- atomic_long_t free_context;
45223- atomic_long_t load_user_context;
45224- atomic_long_t load_kernel_context;
45225- atomic_long_t lock_kernel_context;
45226- atomic_long_t unlock_kernel_context;
45227- atomic_long_t steal_user_context;
45228- atomic_long_t steal_kernel_context;
45229- atomic_long_t steal_context_failed;
45230- atomic_long_t nopfn;
45231- atomic_long_t asid_new;
45232- atomic_long_t asid_next;
45233- atomic_long_t asid_wrap;
45234- atomic_long_t asid_reuse;
45235- atomic_long_t intr;
45236- atomic_long_t intr_cbr;
45237- atomic_long_t intr_tfh;
45238- atomic_long_t intr_spurious;
45239- atomic_long_t intr_mm_lock_failed;
45240- atomic_long_t call_os;
45241- atomic_long_t call_os_wait_queue;
45242- atomic_long_t user_flush_tlb;
45243- atomic_long_t user_unload_context;
45244- atomic_long_t user_exception;
45245- atomic_long_t set_context_option;
45246- atomic_long_t check_context_retarget_intr;
45247- atomic_long_t check_context_unload;
45248- atomic_long_t tlb_dropin;
45249- atomic_long_t tlb_preload_page;
45250- atomic_long_t tlb_dropin_fail_no_asid;
45251- atomic_long_t tlb_dropin_fail_upm;
45252- atomic_long_t tlb_dropin_fail_invalid;
45253- atomic_long_t tlb_dropin_fail_range_active;
45254- atomic_long_t tlb_dropin_fail_idle;
45255- atomic_long_t tlb_dropin_fail_fmm;
45256- atomic_long_t tlb_dropin_fail_no_exception;
45257- atomic_long_t tfh_stale_on_fault;
45258- atomic_long_t mmu_invalidate_range;
45259- atomic_long_t mmu_invalidate_page;
45260- atomic_long_t flush_tlb;
45261- atomic_long_t flush_tlb_gru;
45262- atomic_long_t flush_tlb_gru_tgh;
45263- atomic_long_t flush_tlb_gru_zero_asid;
45264+ atomic_long_unchecked_t vdata_alloc;
45265+ atomic_long_unchecked_t vdata_free;
45266+ atomic_long_unchecked_t gts_alloc;
45267+ atomic_long_unchecked_t gts_free;
45268+ atomic_long_unchecked_t gms_alloc;
45269+ atomic_long_unchecked_t gms_free;
45270+ atomic_long_unchecked_t gts_double_allocate;
45271+ atomic_long_unchecked_t assign_context;
45272+ atomic_long_unchecked_t assign_context_failed;
45273+ atomic_long_unchecked_t free_context;
45274+ atomic_long_unchecked_t load_user_context;
45275+ atomic_long_unchecked_t load_kernel_context;
45276+ atomic_long_unchecked_t lock_kernel_context;
45277+ atomic_long_unchecked_t unlock_kernel_context;
45278+ atomic_long_unchecked_t steal_user_context;
45279+ atomic_long_unchecked_t steal_kernel_context;
45280+ atomic_long_unchecked_t steal_context_failed;
45281+ atomic_long_unchecked_t nopfn;
45282+ atomic_long_unchecked_t asid_new;
45283+ atomic_long_unchecked_t asid_next;
45284+ atomic_long_unchecked_t asid_wrap;
45285+ atomic_long_unchecked_t asid_reuse;
45286+ atomic_long_unchecked_t intr;
45287+ atomic_long_unchecked_t intr_cbr;
45288+ atomic_long_unchecked_t intr_tfh;
45289+ atomic_long_unchecked_t intr_spurious;
45290+ atomic_long_unchecked_t intr_mm_lock_failed;
45291+ atomic_long_unchecked_t call_os;
45292+ atomic_long_unchecked_t call_os_wait_queue;
45293+ atomic_long_unchecked_t user_flush_tlb;
45294+ atomic_long_unchecked_t user_unload_context;
45295+ atomic_long_unchecked_t user_exception;
45296+ atomic_long_unchecked_t set_context_option;
45297+ atomic_long_unchecked_t check_context_retarget_intr;
45298+ atomic_long_unchecked_t check_context_unload;
45299+ atomic_long_unchecked_t tlb_dropin;
45300+ atomic_long_unchecked_t tlb_preload_page;
45301+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
45302+ atomic_long_unchecked_t tlb_dropin_fail_upm;
45303+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
45304+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
45305+ atomic_long_unchecked_t tlb_dropin_fail_idle;
45306+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
45307+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
45308+ atomic_long_unchecked_t tfh_stale_on_fault;
45309+ atomic_long_unchecked_t mmu_invalidate_range;
45310+ atomic_long_unchecked_t mmu_invalidate_page;
45311+ atomic_long_unchecked_t flush_tlb;
45312+ atomic_long_unchecked_t flush_tlb_gru;
45313+ atomic_long_unchecked_t flush_tlb_gru_tgh;
45314+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
45315
45316- atomic_long_t copy_gpa;
45317- atomic_long_t read_gpa;
45318+ atomic_long_unchecked_t copy_gpa;
45319+ atomic_long_unchecked_t read_gpa;
45320
45321- atomic_long_t mesq_receive;
45322- atomic_long_t mesq_receive_none;
45323- atomic_long_t mesq_send;
45324- atomic_long_t mesq_send_failed;
45325- atomic_long_t mesq_noop;
45326- atomic_long_t mesq_send_unexpected_error;
45327- atomic_long_t mesq_send_lb_overflow;
45328- atomic_long_t mesq_send_qlimit_reached;
45329- atomic_long_t mesq_send_amo_nacked;
45330- atomic_long_t mesq_send_put_nacked;
45331- atomic_long_t mesq_page_overflow;
45332- atomic_long_t mesq_qf_locked;
45333- atomic_long_t mesq_qf_noop_not_full;
45334- atomic_long_t mesq_qf_switch_head_failed;
45335- atomic_long_t mesq_qf_unexpected_error;
45336- atomic_long_t mesq_noop_unexpected_error;
45337- atomic_long_t mesq_noop_lb_overflow;
45338- atomic_long_t mesq_noop_qlimit_reached;
45339- atomic_long_t mesq_noop_amo_nacked;
45340- atomic_long_t mesq_noop_put_nacked;
45341- atomic_long_t mesq_noop_page_overflow;
45342+ atomic_long_unchecked_t mesq_receive;
45343+ atomic_long_unchecked_t mesq_receive_none;
45344+ atomic_long_unchecked_t mesq_send;
45345+ atomic_long_unchecked_t mesq_send_failed;
45346+ atomic_long_unchecked_t mesq_noop;
45347+ atomic_long_unchecked_t mesq_send_unexpected_error;
45348+ atomic_long_unchecked_t mesq_send_lb_overflow;
45349+ atomic_long_unchecked_t mesq_send_qlimit_reached;
45350+ atomic_long_unchecked_t mesq_send_amo_nacked;
45351+ atomic_long_unchecked_t mesq_send_put_nacked;
45352+ atomic_long_unchecked_t mesq_page_overflow;
45353+ atomic_long_unchecked_t mesq_qf_locked;
45354+ atomic_long_unchecked_t mesq_qf_noop_not_full;
45355+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
45356+ atomic_long_unchecked_t mesq_qf_unexpected_error;
45357+ atomic_long_unchecked_t mesq_noop_unexpected_error;
45358+ atomic_long_unchecked_t mesq_noop_lb_overflow;
45359+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
45360+ atomic_long_unchecked_t mesq_noop_amo_nacked;
45361+ atomic_long_unchecked_t mesq_noop_put_nacked;
45362+ atomic_long_unchecked_t mesq_noop_page_overflow;
45363
45364 };
45365
45366@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
45367 tghop_invalidate, mcsop_last};
45368
45369 struct mcs_op_statistic {
45370- atomic_long_t count;
45371- atomic_long_t total;
45372+ atomic_long_unchecked_t count;
45373+ atomic_long_unchecked_t total;
45374 unsigned long max;
45375 };
45376
45377@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
45378
45379 #define STAT(id) do { \
45380 if (gru_options & OPT_STATS) \
45381- atomic_long_inc(&gru_stats.id); \
45382+ atomic_long_inc_unchecked(&gru_stats.id); \
45383 } while (0)
45384
45385 #ifdef CONFIG_SGI_GRU_DEBUG
45386diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
45387index c862cd4..0d176fe 100644
45388--- a/drivers/misc/sgi-xp/xp.h
45389+++ b/drivers/misc/sgi-xp/xp.h
45390@@ -288,7 +288,7 @@ struct xpc_interface {
45391 xpc_notify_func, void *);
45392 void (*received) (short, int, void *);
45393 enum xp_retval (*partid_to_nasids) (short, void *);
45394-};
45395+} __no_const;
45396
45397 extern struct xpc_interface xpc_interface;
45398
45399diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
45400index 01be66d..e3a0c7e 100644
45401--- a/drivers/misc/sgi-xp/xp_main.c
45402+++ b/drivers/misc/sgi-xp/xp_main.c
45403@@ -78,13 +78,13 @@ xpc_notloaded(void)
45404 }
45405
45406 struct xpc_interface xpc_interface = {
45407- (void (*)(int))xpc_notloaded,
45408- (void (*)(int))xpc_notloaded,
45409- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
45410- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
45411+ .connect = (void (*)(int))xpc_notloaded,
45412+ .disconnect = (void (*)(int))xpc_notloaded,
45413+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
45414+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
45415 void *))xpc_notloaded,
45416- (void (*)(short, int, void *))xpc_notloaded,
45417- (enum xp_retval(*)(short, void *))xpc_notloaded
45418+ .received = (void (*)(short, int, void *))xpc_notloaded,
45419+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
45420 };
45421 EXPORT_SYMBOL_GPL(xpc_interface);
45422
45423diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
45424index b94d5f7..7f494c5 100644
45425--- a/drivers/misc/sgi-xp/xpc.h
45426+++ b/drivers/misc/sgi-xp/xpc.h
45427@@ -835,6 +835,7 @@ struct xpc_arch_operations {
45428 void (*received_payload) (struct xpc_channel *, void *);
45429 void (*notify_senders_of_disconnect) (struct xpc_channel *);
45430 };
45431+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
45432
45433 /* struct xpc_partition act_state values (for XPC HB) */
45434
45435@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
45436 /* found in xpc_main.c */
45437 extern struct device *xpc_part;
45438 extern struct device *xpc_chan;
45439-extern struct xpc_arch_operations xpc_arch_ops;
45440+extern xpc_arch_operations_no_const xpc_arch_ops;
45441 extern int xpc_disengage_timelimit;
45442 extern int xpc_disengage_timedout;
45443 extern int xpc_activate_IRQ_rcvd;
45444diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
45445index 82dc574..8539ab2 100644
45446--- a/drivers/misc/sgi-xp/xpc_main.c
45447+++ b/drivers/misc/sgi-xp/xpc_main.c
45448@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
45449 .notifier_call = xpc_system_die,
45450 };
45451
45452-struct xpc_arch_operations xpc_arch_ops;
45453+xpc_arch_operations_no_const xpc_arch_ops;
45454
45455 /*
45456 * Timer function to enforce the timelimit on the partition disengage.
45457@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
45458
45459 if (((die_args->trapnr == X86_TRAP_MF) ||
45460 (die_args->trapnr == X86_TRAP_XF)) &&
45461- !user_mode_vm(die_args->regs))
45462+ !user_mode(die_args->regs))
45463 xpc_die_deactivate();
45464
45465 break;
45466diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
45467index 7b5424f..ed1d6ac 100644
45468--- a/drivers/mmc/card/block.c
45469+++ b/drivers/mmc/card/block.c
45470@@ -575,7 +575,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
45471 if (idata->ic.postsleep_min_us)
45472 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
45473
45474- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
45475+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
45476 err = -EFAULT;
45477 goto cmd_rel_host;
45478 }
45479diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
45480index 357bbc5..3e049c1 100644
45481--- a/drivers/mmc/card/queue.c
45482+++ b/drivers/mmc/card/queue.c
45483@@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
45484 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
45485
45486 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
45487- limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
45488+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
45489
45490 mq->card = card;
45491 mq->queue = blk_init_queue(mmc_request_fn, lock);
45492diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
45493index e5b5eeb..7bf2212 100644
45494--- a/drivers/mmc/core/mmc_ops.c
45495+++ b/drivers/mmc/core/mmc_ops.c
45496@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
45497 void *data_buf;
45498 int is_on_stack;
45499
45500- is_on_stack = object_is_on_stack(buf);
45501+ is_on_stack = object_starts_on_stack(buf);
45502 if (is_on_stack) {
45503 /*
45504 * dma onto stack is unsafe/nonportable, but callers to this
45505diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
45506index 6bf24ab..13d0293b 100644
45507--- a/drivers/mmc/host/dw_mmc.h
45508+++ b/drivers/mmc/host/dw_mmc.h
45509@@ -258,5 +258,5 @@ struct dw_mci_drv_data {
45510 int (*parse_dt)(struct dw_mci *host);
45511 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
45512 struct dw_mci_tuning_data *tuning_data);
45513-};
45514+} __do_const;
45515 #endif /* _DW_MMC_H_ */
45516diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
45517index f320579..7b7ebac 100644
45518--- a/drivers/mmc/host/mmci.c
45519+++ b/drivers/mmc/host/mmci.c
45520@@ -1504,7 +1504,9 @@ static int mmci_probe(struct amba_device *dev,
45521 }
45522
45523 if (variant->busy_detect) {
45524- mmci_ops.card_busy = mmci_card_busy;
45525+ pax_open_kernel();
45526+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
45527+ pax_close_kernel();
45528 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
45529 }
45530
45531diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
45532index 1dcaf8a..025af25 100644
45533--- a/drivers/mmc/host/sdhci-esdhc-imx.c
45534+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
45535@@ -1009,9 +1009,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
45536 host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
45537 }
45538
45539- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
45540- sdhci_esdhc_ops.platform_execute_tuning =
45541+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
45542+ pax_open_kernel();
45543+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
45544 esdhc_executing_tuning;
45545+ pax_close_kernel();
45546+ }
45547 boarddata = &imx_data->boarddata;
45548 if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) {
45549 if (!host->mmc->parent->platform_data) {
45550diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
45551index 6debda9..2ba7427 100644
45552--- a/drivers/mmc/host/sdhci-s3c.c
45553+++ b/drivers/mmc/host/sdhci-s3c.c
45554@@ -668,9 +668,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
45555 * we can use overriding functions instead of default.
45556 */
45557 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
45558- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
45559- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
45560- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
45561+ pax_open_kernel();
45562+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
45563+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
45564+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
45565+ pax_close_kernel();
45566 }
45567
45568 /* It supports additional host capabilities if needed */
45569diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
45570index 096993f..f02c23b 100644
45571--- a/drivers/mtd/chips/cfi_cmdset_0020.c
45572+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
45573@@ -669,7 +669,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
45574 size_t totlen = 0, thislen;
45575 int ret = 0;
45576 size_t buflen = 0;
45577- static char *buffer;
45578+ char *buffer;
45579
45580 if (!ECCBUF_SIZE) {
45581 /* We should fall back to a general writev implementation.
45582diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
45583index 370b9dd..1a1176b 100644
45584--- a/drivers/mtd/nand/denali.c
45585+++ b/drivers/mtd/nand/denali.c
45586@@ -24,6 +24,7 @@
45587 #include <linux/slab.h>
45588 #include <linux/mtd/mtd.h>
45589 #include <linux/module.h>
45590+#include <linux/slab.h>
45591
45592 #include "denali.h"
45593
45594diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
45595index 51b9d6a..52af9a7 100644
45596--- a/drivers/mtd/nftlmount.c
45597+++ b/drivers/mtd/nftlmount.c
45598@@ -24,6 +24,7 @@
45599 #include <asm/errno.h>
45600 #include <linux/delay.h>
45601 #include <linux/slab.h>
45602+#include <linux/sched.h>
45603 #include <linux/mtd/mtd.h>
45604 #include <linux/mtd/nand.h>
45605 #include <linux/mtd/nftl.h>
45606diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
45607index 4b8e895..6b3c498 100644
45608--- a/drivers/mtd/sm_ftl.c
45609+++ b/drivers/mtd/sm_ftl.c
45610@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
45611 #define SM_CIS_VENDOR_OFFSET 0x59
45612 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
45613 {
45614- struct attribute_group *attr_group;
45615+ attribute_group_no_const *attr_group;
45616 struct attribute **attributes;
45617 struct sm_sysfs_attribute *vendor_attribute;
45618
45619diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
45620index 4b8c58b..a200546 100644
45621--- a/drivers/net/bonding/bond_main.c
45622+++ b/drivers/net/bonding/bond_main.c
45623@@ -4527,6 +4527,7 @@ static void __exit bonding_exit(void)
45624
45625 bond_netlink_fini();
45626 unregister_pernet_subsys(&bond_net_ops);
45627+ rtnl_link_unregister(&bond_link_ops);
45628
45629 #ifdef CONFIG_NET_POLL_CONTROLLER
45630 /*
45631diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
45632index 40e7b1c..6a70fff 100644
45633--- a/drivers/net/bonding/bond_netlink.c
45634+++ b/drivers/net/bonding/bond_netlink.c
45635@@ -102,7 +102,7 @@ nla_put_failure:
45636 return -EMSGSIZE;
45637 }
45638
45639-struct rtnl_link_ops bond_link_ops __read_mostly = {
45640+struct rtnl_link_ops bond_link_ops = {
45641 .kind = "bond",
45642 .priv_size = sizeof(struct bonding),
45643 .setup = bond_setup,
45644diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
45645index 3c06947..fd0e5de 100644
45646--- a/drivers/net/can/Kconfig
45647+++ b/drivers/net/can/Kconfig
45648@@ -104,7 +104,7 @@ config CAN_JANZ_ICAN3
45649
45650 config CAN_FLEXCAN
45651 tristate "Support for Freescale FLEXCAN based chips"
45652- depends on ARM || PPC
45653+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
45654 ---help---
45655 Say Y here if you want to support for Freescale FlexCAN.
45656
45657diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
45658index 36fa577..a158806 100644
45659--- a/drivers/net/ethernet/8390/ax88796.c
45660+++ b/drivers/net/ethernet/8390/ax88796.c
45661@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
45662 if (ax->plat->reg_offsets)
45663 ei_local->reg_offset = ax->plat->reg_offsets;
45664 else {
45665+ resource_size_t _mem_size = mem_size;
45666+ do_div(_mem_size, 0x18);
45667 ei_local->reg_offset = ax->reg_offsets;
45668 for (ret = 0; ret < 0x18; ret++)
45669- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
45670+ ax->reg_offsets[ret] = _mem_size * ret;
45671 }
45672
45673 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
45674diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
45675index 41f3ca5a..1ee5364 100644
45676--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
45677+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
45678@@ -1139,7 +1139,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
45679 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
45680 {
45681 /* RX_MODE controlling object */
45682- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
45683+ bnx2x_init_rx_mode_obj(bp);
45684
45685 /* multicast configuration controlling object */
45686 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
45687diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
45688index 18438a5..c923b8e 100644
45689--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
45690+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
45691@@ -2591,15 +2591,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
45692 return rc;
45693 }
45694
45695-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
45696- struct bnx2x_rx_mode_obj *o)
45697+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
45698 {
45699 if (CHIP_IS_E1x(bp)) {
45700- o->wait_comp = bnx2x_empty_rx_mode_wait;
45701- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
45702+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
45703+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
45704 } else {
45705- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
45706- o->config_rx_mode = bnx2x_set_rx_mode_e2;
45707+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
45708+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
45709 }
45710 }
45711
45712diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
45713index 6a53c15..6e7d1e7 100644
45714--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
45715+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
45716@@ -1332,8 +1332,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
45717
45718 /********************* RX MODE ****************/
45719
45720-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
45721- struct bnx2x_rx_mode_obj *o);
45722+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
45723
45724 /**
45725 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
45726diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
45727index cf9917b..c658558 100644
45728--- a/drivers/net/ethernet/broadcom/tg3.h
45729+++ b/drivers/net/ethernet/broadcom/tg3.h
45730@@ -150,6 +150,7 @@
45731 #define CHIPREV_ID_5750_A0 0x4000
45732 #define CHIPREV_ID_5750_A1 0x4001
45733 #define CHIPREV_ID_5750_A3 0x4003
45734+#define CHIPREV_ID_5750_C1 0x4201
45735 #define CHIPREV_ID_5750_C2 0x4202
45736 #define CHIPREV_ID_5752_A0_HW 0x5000
45737 #define CHIPREV_ID_5752_A0 0x6000
45738diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
45739index 3ca77fa..fcc015f 100644
45740--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
45741+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
45742@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
45743 }
45744
45745 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
45746- bna_cb_ioceth_enable,
45747- bna_cb_ioceth_disable,
45748- bna_cb_ioceth_hbfail,
45749- bna_cb_ioceth_reset
45750+ .enable_cbfn = bna_cb_ioceth_enable,
45751+ .disable_cbfn = bna_cb_ioceth_disable,
45752+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
45753+ .reset_cbfn = bna_cb_ioceth_reset
45754 };
45755
45756 static void bna_attr_init(struct bna_ioceth *ioceth)
45757diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
45758index 8cffcdf..aadf043 100644
45759--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
45760+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
45761@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
45762 */
45763 struct l2t_skb_cb {
45764 arp_failure_handler_func arp_failure_handler;
45765-};
45766+} __no_const;
45767
45768 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
45769
45770diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
45771index fff02ed..d421412 100644
45772--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
45773+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
45774@@ -2120,7 +2120,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
45775
45776 int i;
45777 struct adapter *ap = netdev2adap(dev);
45778- static const unsigned int *reg_ranges;
45779+ const unsigned int *reg_ranges;
45780 int arr_size = 0, buf_size = 0;
45781
45782 if (is_t4(ap->params.chip)) {
45783diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
45784index c05b66d..ed69872 100644
45785--- a/drivers/net/ethernet/dec/tulip/de4x5.c
45786+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
45787@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
45788 for (i=0; i<ETH_ALEN; i++) {
45789 tmp.addr[i] = dev->dev_addr[i];
45790 }
45791- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
45792+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
45793 break;
45794
45795 case DE4X5_SET_HWADDR: /* Set the hardware address */
45796@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
45797 spin_lock_irqsave(&lp->lock, flags);
45798 memcpy(&statbuf, &lp->pktStats, ioc->len);
45799 spin_unlock_irqrestore(&lp->lock, flags);
45800- if (copy_to_user(ioc->data, &statbuf, ioc->len))
45801+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
45802 return -EFAULT;
45803 break;
45804 }
45805diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
45806index a37039d..a51d7e8 100644
45807--- a/drivers/net/ethernet/emulex/benet/be_main.c
45808+++ b/drivers/net/ethernet/emulex/benet/be_main.c
45809@@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
45810
45811 if (wrapped)
45812 newacc += 65536;
45813- ACCESS_ONCE(*acc) = newacc;
45814+ ACCESS_ONCE_RW(*acc) = newacc;
45815 }
45816
45817 static void populate_erx_stats(struct be_adapter *adapter,
45818diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
45819index 212f44b..fb69959 100644
45820--- a/drivers/net/ethernet/faraday/ftgmac100.c
45821+++ b/drivers/net/ethernet/faraday/ftgmac100.c
45822@@ -31,6 +31,8 @@
45823 #include <linux/netdevice.h>
45824 #include <linux/phy.h>
45825 #include <linux/platform_device.h>
45826+#include <linux/interrupt.h>
45827+#include <linux/irqreturn.h>
45828 #include <net/ip.h>
45829
45830 #include "ftgmac100.h"
45831diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
45832index 8be5b40..081bc1b 100644
45833--- a/drivers/net/ethernet/faraday/ftmac100.c
45834+++ b/drivers/net/ethernet/faraday/ftmac100.c
45835@@ -31,6 +31,8 @@
45836 #include <linux/module.h>
45837 #include <linux/netdevice.h>
45838 #include <linux/platform_device.h>
45839+#include <linux/interrupt.h>
45840+#include <linux/irqreturn.h>
45841
45842 #include "ftmac100.h"
45843
45844diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
45845index 5184e2a..acb28c3 100644
45846--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
45847+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
45848@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
45849 }
45850
45851 /* update the base incval used to calculate frequency adjustment */
45852- ACCESS_ONCE(adapter->base_incval) = incval;
45853+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
45854 smp_mb();
45855
45856 /* need lock to prevent incorrect read while modifying cyclecounter */
45857diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
45858index fbe5363..266b4e3 100644
45859--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
45860+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
45861@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
45862 struct __vxge_hw_fifo *fifo;
45863 struct vxge_hw_fifo_config *config;
45864 u32 txdl_size, txdl_per_memblock;
45865- struct vxge_hw_mempool_cbs fifo_mp_callback;
45866+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
45867+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
45868+ };
45869+
45870 struct __vxge_hw_virtualpath *vpath;
45871
45872 if ((vp == NULL) || (attr == NULL)) {
45873@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
45874 goto exit;
45875 }
45876
45877- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
45878-
45879 fifo->mempool =
45880 __vxge_hw_mempool_create(vpath->hldev,
45881 fifo->config->memblock_size,
45882diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
45883index 918e18d..4ca3650 100644
45884--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
45885+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
45886@@ -2086,7 +2086,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
45887 adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
45888 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
45889 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
45890- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
45891+ pax_open_kernel();
45892+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
45893+ pax_close_kernel();
45894 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
45895 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
45896 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
45897diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
45898index 734d286..b017bf5 100644
45899--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
45900+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
45901@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
45902 case QLCNIC_NON_PRIV_FUNC:
45903 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
45904 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
45905- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
45906+ pax_open_kernel();
45907+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
45908+ pax_close_kernel();
45909 break;
45910 case QLCNIC_PRIV_FUNC:
45911 ahw->op_mode = QLCNIC_PRIV_FUNC;
45912 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
45913- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
45914+ pax_open_kernel();
45915+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
45916+ pax_close_kernel();
45917 break;
45918 case QLCNIC_MGMT_FUNC:
45919 ahw->op_mode = QLCNIC_MGMT_FUNC;
45920 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
45921- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
45922+ pax_open_kernel();
45923+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
45924+ pax_close_kernel();
45925 break;
45926 default:
45927 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
45928diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
45929index 7763962..c3499a7 100644
45930--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
45931+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
45932@@ -1108,7 +1108,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
45933 struct qlcnic_dump_entry *entry;
45934 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
45935 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
45936- static const struct qlcnic_dump_operations *fw_dump_ops;
45937+ const struct qlcnic_dump_operations *fw_dump_ops;
45938 struct device *dev = &adapter->pdev->dev;
45939 struct qlcnic_hardware_context *ahw;
45940 void *temp_buffer;
45941diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
45942index c737f0e..32b8682 100644
45943--- a/drivers/net/ethernet/realtek/r8169.c
45944+++ b/drivers/net/ethernet/realtek/r8169.c
45945@@ -759,22 +759,22 @@ struct rtl8169_private {
45946 struct mdio_ops {
45947 void (*write)(struct rtl8169_private *, int, int);
45948 int (*read)(struct rtl8169_private *, int);
45949- } mdio_ops;
45950+ } __no_const mdio_ops;
45951
45952 struct pll_power_ops {
45953 void (*down)(struct rtl8169_private *);
45954 void (*up)(struct rtl8169_private *);
45955- } pll_power_ops;
45956+ } __no_const pll_power_ops;
45957
45958 struct jumbo_ops {
45959 void (*enable)(struct rtl8169_private *);
45960 void (*disable)(struct rtl8169_private *);
45961- } jumbo_ops;
45962+ } __no_const jumbo_ops;
45963
45964 struct csi_ops {
45965 void (*write)(struct rtl8169_private *, int, int);
45966 u32 (*read)(struct rtl8169_private *, int);
45967- } csi_ops;
45968+ } __no_const csi_ops;
45969
45970 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
45971 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
45972diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
45973index a124103..59c74f8 100644
45974--- a/drivers/net/ethernet/sfc/ptp.c
45975+++ b/drivers/net/ethernet/sfc/ptp.c
45976@@ -541,7 +541,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
45977 ptp->start.dma_addr);
45978
45979 /* Clear flag that signals MC ready */
45980- ACCESS_ONCE(*start) = 0;
45981+ ACCESS_ONCE_RW(*start) = 0;
45982 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
45983 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
45984 EFX_BUG_ON_PARANOID(rc);
45985diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
45986index 50617c5..b13724c 100644
45987--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
45988+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
45989@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
45990
45991 writel(value, ioaddr + MMC_CNTRL);
45992
45993- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
45994- MMC_CNTRL, value);
45995+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
45996+// MMC_CNTRL, value);
45997 }
45998
45999 /* To mask all all interrupts.*/
46000diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
46001index e6fe0d8..2b7d752 100644
46002--- a/drivers/net/hyperv/hyperv_net.h
46003+++ b/drivers/net/hyperv/hyperv_net.h
46004@@ -101,7 +101,7 @@ struct rndis_device {
46005
46006 enum rndis_device_state state;
46007 bool link_state;
46008- atomic_t new_req_id;
46009+ atomic_unchecked_t new_req_id;
46010
46011 spinlock_t request_lock;
46012 struct list_head req_list;
46013diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
46014index 0775f0a..d4fb316 100644
46015--- a/drivers/net/hyperv/rndis_filter.c
46016+++ b/drivers/net/hyperv/rndis_filter.c
46017@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
46018 * template
46019 */
46020 set = &rndis_msg->msg.set_req;
46021- set->req_id = atomic_inc_return(&dev->new_req_id);
46022+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
46023
46024 /* Add to the request list */
46025 spin_lock_irqsave(&dev->request_lock, flags);
46026@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
46027
46028 /* Setup the rndis set */
46029 halt = &request->request_msg.msg.halt_req;
46030- halt->req_id = atomic_inc_return(&dev->new_req_id);
46031+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
46032
46033 /* Ignore return since this msg is optional. */
46034 rndis_filter_send_request(dev, request);
46035diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
46036index bf0d55e..82bcfbd1 100644
46037--- a/drivers/net/ieee802154/fakehard.c
46038+++ b/drivers/net/ieee802154/fakehard.c
46039@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
46040 phy->transmit_power = 0xbf;
46041
46042 dev->netdev_ops = &fake_ops;
46043- dev->ml_priv = &fake_mlme;
46044+ dev->ml_priv = (void *)&fake_mlme;
46045
46046 priv = netdev_priv(dev);
46047 priv->phy = phy;
46048diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
46049index d7e2907..1f8bfee 100644
46050--- a/drivers/net/macvlan.c
46051+++ b/drivers/net/macvlan.c
46052@@ -993,13 +993,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
46053 int macvlan_link_register(struct rtnl_link_ops *ops)
46054 {
46055 /* common fields */
46056- ops->priv_size = sizeof(struct macvlan_dev);
46057- ops->validate = macvlan_validate;
46058- ops->maxtype = IFLA_MACVLAN_MAX;
46059- ops->policy = macvlan_policy;
46060- ops->changelink = macvlan_changelink;
46061- ops->get_size = macvlan_get_size;
46062- ops->fill_info = macvlan_fill_info;
46063+ pax_open_kernel();
46064+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
46065+ *(void **)&ops->validate = macvlan_validate;
46066+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
46067+ *(const void **)&ops->policy = macvlan_policy;
46068+ *(void **)&ops->changelink = macvlan_changelink;
46069+ *(void **)&ops->get_size = macvlan_get_size;
46070+ *(void **)&ops->fill_info = macvlan_fill_info;
46071+ pax_close_kernel();
46072
46073 return rtnl_link_register(ops);
46074 };
46075@@ -1054,7 +1056,7 @@ static int macvlan_device_event(struct notifier_block *unused,
46076 return NOTIFY_DONE;
46077 }
46078
46079-static struct notifier_block macvlan_notifier_block __read_mostly = {
46080+static struct notifier_block macvlan_notifier_block = {
46081 .notifier_call = macvlan_device_event,
46082 };
46083
46084diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
46085index 2a89da0..c17fe1d 100644
46086--- a/drivers/net/macvtap.c
46087+++ b/drivers/net/macvtap.c
46088@@ -1012,7 +1012,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
46089 }
46090
46091 ret = 0;
46092- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
46093+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
46094 put_user(q->flags, &ifr->ifr_flags))
46095 ret = -EFAULT;
46096 macvtap_put_vlan(vlan);
46097@@ -1182,7 +1182,7 @@ static int macvtap_device_event(struct notifier_block *unused,
46098 return NOTIFY_DONE;
46099 }
46100
46101-static struct notifier_block macvtap_notifier_block __read_mostly = {
46102+static struct notifier_block macvtap_notifier_block = {
46103 .notifier_call = macvtap_device_event,
46104 };
46105
46106diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
46107index daec9b0..6428fcb 100644
46108--- a/drivers/net/phy/mdio-bitbang.c
46109+++ b/drivers/net/phy/mdio-bitbang.c
46110@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
46111 struct mdiobb_ctrl *ctrl = bus->priv;
46112
46113 module_put(ctrl->ops->owner);
46114+ mdiobus_unregister(bus);
46115 mdiobus_free(bus);
46116 }
46117 EXPORT_SYMBOL(free_mdio_bitbang);
46118diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
46119index 72ff14b..11d442d 100644
46120--- a/drivers/net/ppp/ppp_generic.c
46121+++ b/drivers/net/ppp/ppp_generic.c
46122@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46123 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
46124 struct ppp_stats stats;
46125 struct ppp_comp_stats cstats;
46126- char *vers;
46127
46128 switch (cmd) {
46129 case SIOCGPPPSTATS:
46130@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
46131 break;
46132
46133 case SIOCGPPPVER:
46134- vers = PPP_VERSION;
46135- if (copy_to_user(addr, vers, strlen(vers) + 1))
46136+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
46137 break;
46138 err = 0;
46139 break;
46140diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
46141index 1252d9c..80e660b 100644
46142--- a/drivers/net/slip/slhc.c
46143+++ b/drivers/net/slip/slhc.c
46144@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
46145 register struct tcphdr *thp;
46146 register struct iphdr *ip;
46147 register struct cstate *cs;
46148- int len, hdrlen;
46149+ long len, hdrlen;
46150 unsigned char *cp = icp;
46151
46152 /* We've got a compressed packet; read the change byte */
46153diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
46154index b75ae5b..953c157 100644
46155--- a/drivers/net/team/team.c
46156+++ b/drivers/net/team/team.c
46157@@ -2865,7 +2865,7 @@ static int team_device_event(struct notifier_block *unused,
46158 return NOTIFY_DONE;
46159 }
46160
46161-static struct notifier_block team_notifier_block __read_mostly = {
46162+static struct notifier_block team_notifier_block = {
46163 .notifier_call = team_device_event,
46164 };
46165
46166diff --git a/drivers/net/tun.c b/drivers/net/tun.c
46167index 55c9238..ebb6ee5 100644
46168--- a/drivers/net/tun.c
46169+++ b/drivers/net/tun.c
46170@@ -1841,7 +1841,7 @@ unlock:
46171 }
46172
46173 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
46174- unsigned long arg, int ifreq_len)
46175+ unsigned long arg, size_t ifreq_len)
46176 {
46177 struct tun_file *tfile = file->private_data;
46178 struct tun_struct *tun;
46179@@ -1854,6 +1854,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
46180 unsigned int ifindex;
46181 int ret;
46182
46183+ if (ifreq_len > sizeof ifr)
46184+ return -EFAULT;
46185+
46186 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
46187 if (copy_from_user(&ifr, argp, ifreq_len))
46188 return -EFAULT;
46189diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
46190index 1a48234..a555339 100644
46191--- a/drivers/net/usb/hso.c
46192+++ b/drivers/net/usb/hso.c
46193@@ -71,7 +71,7 @@
46194 #include <asm/byteorder.h>
46195 #include <linux/serial_core.h>
46196 #include <linux/serial.h>
46197-
46198+#include <asm/local.h>
46199
46200 #define MOD_AUTHOR "Option Wireless"
46201 #define MOD_DESCRIPTION "USB High Speed Option driver"
46202@@ -1179,7 +1179,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
46203 struct urb *urb;
46204
46205 urb = serial->rx_urb[0];
46206- if (serial->port.count > 0) {
46207+ if (atomic_read(&serial->port.count) > 0) {
46208 count = put_rxbuf_data(urb, serial);
46209 if (count == -1)
46210 return;
46211@@ -1215,7 +1215,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
46212 DUMP1(urb->transfer_buffer, urb->actual_length);
46213
46214 /* Anyone listening? */
46215- if (serial->port.count == 0)
46216+ if (atomic_read(&serial->port.count) == 0)
46217 return;
46218
46219 if (status == 0) {
46220@@ -1297,8 +1297,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
46221 tty_port_tty_set(&serial->port, tty);
46222
46223 /* check for port already opened, if not set the termios */
46224- serial->port.count++;
46225- if (serial->port.count == 1) {
46226+ if (atomic_inc_return(&serial->port.count) == 1) {
46227 serial->rx_state = RX_IDLE;
46228 /* Force default termio settings */
46229 _hso_serial_set_termios(tty, NULL);
46230@@ -1310,7 +1309,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
46231 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
46232 if (result) {
46233 hso_stop_serial_device(serial->parent);
46234- serial->port.count--;
46235+ atomic_dec(&serial->port.count);
46236 kref_put(&serial->parent->ref, hso_serial_ref_free);
46237 }
46238 } else {
46239@@ -1347,10 +1346,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
46240
46241 /* reset the rts and dtr */
46242 /* do the actual close */
46243- serial->port.count--;
46244+ atomic_dec(&serial->port.count);
46245
46246- if (serial->port.count <= 0) {
46247- serial->port.count = 0;
46248+ if (atomic_read(&serial->port.count) <= 0) {
46249+ atomic_set(&serial->port.count, 0);
46250 tty_port_tty_set(&serial->port, NULL);
46251 if (!usb_gone)
46252 hso_stop_serial_device(serial->parent);
46253@@ -1426,7 +1425,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
46254
46255 /* the actual setup */
46256 spin_lock_irqsave(&serial->serial_lock, flags);
46257- if (serial->port.count)
46258+ if (atomic_read(&serial->port.count))
46259 _hso_serial_set_termios(tty, old);
46260 else
46261 tty->termios = *old;
46262@@ -1895,7 +1894,7 @@ static void intr_callback(struct urb *urb)
46263 D1("Pending read interrupt on port %d\n", i);
46264 spin_lock(&serial->serial_lock);
46265 if (serial->rx_state == RX_IDLE &&
46266- serial->port.count > 0) {
46267+ atomic_read(&serial->port.count) > 0) {
46268 /* Setup and send a ctrl req read on
46269 * port i */
46270 if (!serial->rx_urb_filled[0]) {
46271@@ -3071,7 +3070,7 @@ static int hso_resume(struct usb_interface *iface)
46272 /* Start all serial ports */
46273 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
46274 if (serial_table[i] && (serial_table[i]->interface == iface)) {
46275- if (dev2ser(serial_table[i])->port.count) {
46276+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
46277 result =
46278 hso_start_serial_device(serial_table[i], GFP_NOIO);
46279 hso_kick_transmit(dev2ser(serial_table[i]));
46280diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
46281index a79e9d3..78cd4fa 100644
46282--- a/drivers/net/usb/sierra_net.c
46283+++ b/drivers/net/usb/sierra_net.c
46284@@ -52,7 +52,7 @@ static const char driver_name[] = "sierra_net";
46285 /* atomic counter partially included in MAC address to make sure 2 devices
46286 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
46287 */
46288-static atomic_t iface_counter = ATOMIC_INIT(0);
46289+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
46290
46291 /*
46292 * SYNC Timer Delay definition used to set the expiry time
46293@@ -698,7 +698,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
46294 dev->net->netdev_ops = &sierra_net_device_ops;
46295
46296 /* change MAC addr to include, ifacenum, and to be unique */
46297- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
46298+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
46299 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
46300
46301 /* we will have to manufacture ethernet headers, prepare template */
46302diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
46303index 0247973..088193a 100644
46304--- a/drivers/net/vxlan.c
46305+++ b/drivers/net/vxlan.c
46306@@ -2615,7 +2615,7 @@ nla_put_failure:
46307 return -EMSGSIZE;
46308 }
46309
46310-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
46311+static struct rtnl_link_ops vxlan_link_ops = {
46312 .kind = "vxlan",
46313 .maxtype = IFLA_VXLAN_MAX,
46314 .policy = vxlan_policy,
46315diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
46316index 5920c99..ff2e4a5 100644
46317--- a/drivers/net/wan/lmc/lmc_media.c
46318+++ b/drivers/net/wan/lmc/lmc_media.c
46319@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
46320 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
46321
46322 lmc_media_t lmc_ds3_media = {
46323- lmc_ds3_init, /* special media init stuff */
46324- lmc_ds3_default, /* reset to default state */
46325- lmc_ds3_set_status, /* reset status to state provided */
46326- lmc_dummy_set_1, /* set clock source */
46327- lmc_dummy_set2_1, /* set line speed */
46328- lmc_ds3_set_100ft, /* set cable length */
46329- lmc_ds3_set_scram, /* set scrambler */
46330- lmc_ds3_get_link_status, /* get link status */
46331- lmc_dummy_set_1, /* set link status */
46332- lmc_ds3_set_crc_length, /* set CRC length */
46333- lmc_dummy_set_1, /* set T1 or E1 circuit type */
46334- lmc_ds3_watchdog
46335+ .init = lmc_ds3_init, /* special media init stuff */
46336+ .defaults = lmc_ds3_default, /* reset to default state */
46337+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
46338+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
46339+ .set_speed = lmc_dummy_set2_1, /* set line speed */
46340+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
46341+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
46342+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
46343+ .set_link_status = lmc_dummy_set_1, /* set link status */
46344+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
46345+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
46346+ .watchdog = lmc_ds3_watchdog
46347 };
46348
46349 lmc_media_t lmc_hssi_media = {
46350- lmc_hssi_init, /* special media init stuff */
46351- lmc_hssi_default, /* reset to default state */
46352- lmc_hssi_set_status, /* reset status to state provided */
46353- lmc_hssi_set_clock, /* set clock source */
46354- lmc_dummy_set2_1, /* set line speed */
46355- lmc_dummy_set_1, /* set cable length */
46356- lmc_dummy_set_1, /* set scrambler */
46357- lmc_hssi_get_link_status, /* get link status */
46358- lmc_hssi_set_link_status, /* set link status */
46359- lmc_hssi_set_crc_length, /* set CRC length */
46360- lmc_dummy_set_1, /* set T1 or E1 circuit type */
46361- lmc_hssi_watchdog
46362+ .init = lmc_hssi_init, /* special media init stuff */
46363+ .defaults = lmc_hssi_default, /* reset to default state */
46364+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
46365+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
46366+ .set_speed = lmc_dummy_set2_1, /* set line speed */
46367+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
46368+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
46369+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
46370+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
46371+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
46372+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
46373+ .watchdog = lmc_hssi_watchdog
46374 };
46375
46376-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
46377- lmc_ssi_default, /* reset to default state */
46378- lmc_ssi_set_status, /* reset status to state provided */
46379- lmc_ssi_set_clock, /* set clock source */
46380- lmc_ssi_set_speed, /* set line speed */
46381- lmc_dummy_set_1, /* set cable length */
46382- lmc_dummy_set_1, /* set scrambler */
46383- lmc_ssi_get_link_status, /* get link status */
46384- lmc_ssi_set_link_status, /* set link status */
46385- lmc_ssi_set_crc_length, /* set CRC length */
46386- lmc_dummy_set_1, /* set T1 or E1 circuit type */
46387- lmc_ssi_watchdog
46388+lmc_media_t lmc_ssi_media = {
46389+ .init = lmc_ssi_init, /* special media init stuff */
46390+ .defaults = lmc_ssi_default, /* reset to default state */
46391+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
46392+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
46393+ .set_speed = lmc_ssi_set_speed, /* set line speed */
46394+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
46395+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
46396+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
46397+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
46398+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
46399+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
46400+ .watchdog = lmc_ssi_watchdog
46401 };
46402
46403 lmc_media_t lmc_t1_media = {
46404- lmc_t1_init, /* special media init stuff */
46405- lmc_t1_default, /* reset to default state */
46406- lmc_t1_set_status, /* reset status to state provided */
46407- lmc_t1_set_clock, /* set clock source */
46408- lmc_dummy_set2_1, /* set line speed */
46409- lmc_dummy_set_1, /* set cable length */
46410- lmc_dummy_set_1, /* set scrambler */
46411- lmc_t1_get_link_status, /* get link status */
46412- lmc_dummy_set_1, /* set link status */
46413- lmc_t1_set_crc_length, /* set CRC length */
46414- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
46415- lmc_t1_watchdog
46416+ .init = lmc_t1_init, /* special media init stuff */
46417+ .defaults = lmc_t1_default, /* reset to default state */
46418+ .set_status = lmc_t1_set_status, /* reset status to state provided */
46419+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
46420+ .set_speed = lmc_dummy_set2_1, /* set line speed */
46421+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
46422+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
46423+ .get_link_status = lmc_t1_get_link_status, /* get link status */
46424+ .set_link_status = lmc_dummy_set_1, /* set link status */
46425+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
46426+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
46427+ .watchdog = lmc_t1_watchdog
46428 };
46429
46430 static void
46431diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
46432index feacc3b..5bac0de 100644
46433--- a/drivers/net/wan/z85230.c
46434+++ b/drivers/net/wan/z85230.c
46435@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
46436
46437 struct z8530_irqhandler z8530_sync =
46438 {
46439- z8530_rx,
46440- z8530_tx,
46441- z8530_status
46442+ .rx = z8530_rx,
46443+ .tx = z8530_tx,
46444+ .status = z8530_status
46445 };
46446
46447 EXPORT_SYMBOL(z8530_sync);
46448@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
46449 }
46450
46451 static struct z8530_irqhandler z8530_dma_sync = {
46452- z8530_dma_rx,
46453- z8530_dma_tx,
46454- z8530_dma_status
46455+ .rx = z8530_dma_rx,
46456+ .tx = z8530_dma_tx,
46457+ .status = z8530_dma_status
46458 };
46459
46460 static struct z8530_irqhandler z8530_txdma_sync = {
46461- z8530_rx,
46462- z8530_dma_tx,
46463- z8530_dma_status
46464+ .rx = z8530_rx,
46465+ .tx = z8530_dma_tx,
46466+ .status = z8530_dma_status
46467 };
46468
46469 /**
46470@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
46471
46472 struct z8530_irqhandler z8530_nop=
46473 {
46474- z8530_rx_clear,
46475- z8530_tx_clear,
46476- z8530_status_clear
46477+ .rx = z8530_rx_clear,
46478+ .tx = z8530_tx_clear,
46479+ .status = z8530_status_clear
46480 };
46481
46482
46483diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
46484index 0b60295..b8bfa5b 100644
46485--- a/drivers/net/wimax/i2400m/rx.c
46486+++ b/drivers/net/wimax/i2400m/rx.c
46487@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
46488 if (i2400m->rx_roq == NULL)
46489 goto error_roq_alloc;
46490
46491- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
46492+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
46493 GFP_KERNEL);
46494 if (rd == NULL) {
46495 result = -ENOMEM;
46496diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
46497index edf4b57..68b51c0 100644
46498--- a/drivers/net/wireless/airo.c
46499+++ b/drivers/net/wireless/airo.c
46500@@ -7843,7 +7843,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
46501 struct airo_info *ai = dev->ml_priv;
46502 int ridcode;
46503 int enabled;
46504- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
46505+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
46506 unsigned char *iobuf;
46507
46508 /* Only super-user can write RIDs */
46509diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
46510index 34c8a33..3261fdc 100644
46511--- a/drivers/net/wireless/at76c50x-usb.c
46512+++ b/drivers/net/wireless/at76c50x-usb.c
46513@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
46514 }
46515
46516 /* Convert timeout from the DFU status to jiffies */
46517-static inline unsigned long at76_get_timeout(struct dfu_status *s)
46518+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
46519 {
46520 return msecs_to_jiffies((s->poll_timeout[2] << 16)
46521 | (s->poll_timeout[1] << 8)
46522diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
46523index edae50b..b24278c 100644
46524--- a/drivers/net/wireless/ath/ath10k/htc.c
46525+++ b/drivers/net/wireless/ath/ath10k/htc.c
46526@@ -842,7 +842,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
46527 /* registered target arrival callback from the HIF layer */
46528 int ath10k_htc_init(struct ath10k *ar)
46529 {
46530- struct ath10k_hif_cb htc_callbacks;
46531+ static struct ath10k_hif_cb htc_callbacks = {
46532+ .rx_completion = ath10k_htc_rx_completion_handler,
46533+ .tx_completion = ath10k_htc_tx_completion_handler,
46534+ };
46535 struct ath10k_htc_ep *ep = NULL;
46536 struct ath10k_htc *htc = &ar->htc;
46537
46538@@ -852,8 +855,6 @@ int ath10k_htc_init(struct ath10k *ar)
46539 ath10k_htc_reset_endpoint_states(htc);
46540
46541 /* setup HIF layer callbacks */
46542- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
46543- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
46544 htc->ar = ar;
46545
46546 /* Get HIF default pipe for HTC message exchange */
46547diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
46548index 4716d33..a688310 100644
46549--- a/drivers/net/wireless/ath/ath10k/htc.h
46550+++ b/drivers/net/wireless/ath/ath10k/htc.h
46551@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
46552
46553 struct ath10k_htc_ops {
46554 void (*target_send_suspend_complete)(struct ath10k *ar);
46555-};
46556+} __no_const;
46557
46558 struct ath10k_htc_ep_ops {
46559 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
46560 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
46561 void (*ep_tx_credits)(struct ath10k *);
46562-};
46563+} __no_const;
46564
46565 /* service connection information */
46566 struct ath10k_htc_svc_conn_req {
46567diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
46568index a366d6b..b6f28f8 100644
46569--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
46570+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
46571@@ -218,8 +218,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46572 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
46573 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
46574
46575- ACCESS_ONCE(ads->ds_link) = i->link;
46576- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
46577+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
46578+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
46579
46580 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
46581 ctl6 = SM(i->keytype, AR_EncrType);
46582@@ -233,26 +233,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46583
46584 if ((i->is_first || i->is_last) &&
46585 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
46586- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
46587+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
46588 | set11nTries(i->rates, 1)
46589 | set11nTries(i->rates, 2)
46590 | set11nTries(i->rates, 3)
46591 | (i->dur_update ? AR_DurUpdateEna : 0)
46592 | SM(0, AR_BurstDur);
46593
46594- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
46595+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
46596 | set11nRate(i->rates, 1)
46597 | set11nRate(i->rates, 2)
46598 | set11nRate(i->rates, 3);
46599 } else {
46600- ACCESS_ONCE(ads->ds_ctl2) = 0;
46601- ACCESS_ONCE(ads->ds_ctl3) = 0;
46602+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
46603+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
46604 }
46605
46606 if (!i->is_first) {
46607- ACCESS_ONCE(ads->ds_ctl0) = 0;
46608- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
46609- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
46610+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
46611+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
46612+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
46613 return;
46614 }
46615
46616@@ -277,7 +277,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46617 break;
46618 }
46619
46620- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
46621+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
46622 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
46623 | SM(i->txpower, AR_XmitPower)
46624 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
46625@@ -287,19 +287,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46626 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
46627 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
46628
46629- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
46630- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
46631+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
46632+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
46633
46634 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
46635 return;
46636
46637- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
46638+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
46639 | set11nPktDurRTSCTS(i->rates, 1);
46640
46641- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
46642+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
46643 | set11nPktDurRTSCTS(i->rates, 3);
46644
46645- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
46646+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
46647 | set11nRateFlags(i->rates, 1)
46648 | set11nRateFlags(i->rates, 2)
46649 | set11nRateFlags(i->rates, 3)
46650diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
46651index f6c5c1b..6058354 100644
46652--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
46653+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
46654@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46655 (i->qcu << AR_TxQcuNum_S) | desc_len;
46656
46657 checksum += val;
46658- ACCESS_ONCE(ads->info) = val;
46659+ ACCESS_ONCE_RW(ads->info) = val;
46660
46661 checksum += i->link;
46662- ACCESS_ONCE(ads->link) = i->link;
46663+ ACCESS_ONCE_RW(ads->link) = i->link;
46664
46665 checksum += i->buf_addr[0];
46666- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
46667+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
46668 checksum += i->buf_addr[1];
46669- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
46670+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
46671 checksum += i->buf_addr[2];
46672- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
46673+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
46674 checksum += i->buf_addr[3];
46675- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
46676+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
46677
46678 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
46679- ACCESS_ONCE(ads->ctl3) = val;
46680+ ACCESS_ONCE_RW(ads->ctl3) = val;
46681 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
46682- ACCESS_ONCE(ads->ctl5) = val;
46683+ ACCESS_ONCE_RW(ads->ctl5) = val;
46684 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
46685- ACCESS_ONCE(ads->ctl7) = val;
46686+ ACCESS_ONCE_RW(ads->ctl7) = val;
46687 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
46688- ACCESS_ONCE(ads->ctl9) = val;
46689+ ACCESS_ONCE_RW(ads->ctl9) = val;
46690
46691 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
46692- ACCESS_ONCE(ads->ctl10) = checksum;
46693+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
46694
46695 if (i->is_first || i->is_last) {
46696- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
46697+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
46698 | set11nTries(i->rates, 1)
46699 | set11nTries(i->rates, 2)
46700 | set11nTries(i->rates, 3)
46701 | (i->dur_update ? AR_DurUpdateEna : 0)
46702 | SM(0, AR_BurstDur);
46703
46704- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
46705+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
46706 | set11nRate(i->rates, 1)
46707 | set11nRate(i->rates, 2)
46708 | set11nRate(i->rates, 3);
46709 } else {
46710- ACCESS_ONCE(ads->ctl13) = 0;
46711- ACCESS_ONCE(ads->ctl14) = 0;
46712+ ACCESS_ONCE_RW(ads->ctl13) = 0;
46713+ ACCESS_ONCE_RW(ads->ctl14) = 0;
46714 }
46715
46716 ads->ctl20 = 0;
46717@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46718
46719 ctl17 = SM(i->keytype, AR_EncrType);
46720 if (!i->is_first) {
46721- ACCESS_ONCE(ads->ctl11) = 0;
46722- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
46723- ACCESS_ONCE(ads->ctl15) = 0;
46724- ACCESS_ONCE(ads->ctl16) = 0;
46725- ACCESS_ONCE(ads->ctl17) = ctl17;
46726- ACCESS_ONCE(ads->ctl18) = 0;
46727- ACCESS_ONCE(ads->ctl19) = 0;
46728+ ACCESS_ONCE_RW(ads->ctl11) = 0;
46729+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
46730+ ACCESS_ONCE_RW(ads->ctl15) = 0;
46731+ ACCESS_ONCE_RW(ads->ctl16) = 0;
46732+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
46733+ ACCESS_ONCE_RW(ads->ctl18) = 0;
46734+ ACCESS_ONCE_RW(ads->ctl19) = 0;
46735 return;
46736 }
46737
46738- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
46739+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
46740 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
46741 | SM(i->txpower, AR_XmitPower)
46742 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
46743@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
46744 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
46745 ctl12 |= SM(val, AR_PAPRDChainMask);
46746
46747- ACCESS_ONCE(ads->ctl12) = ctl12;
46748- ACCESS_ONCE(ads->ctl17) = ctl17;
46749+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
46750+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
46751
46752- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
46753+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
46754 | set11nPktDurRTSCTS(i->rates, 1);
46755
46756- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
46757+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
46758 | set11nPktDurRTSCTS(i->rates, 3);
46759
46760- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
46761+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
46762 | set11nRateFlags(i->rates, 1)
46763 | set11nRateFlags(i->rates, 2)
46764 | set11nRateFlags(i->rates, 3)
46765 | SM(i->rtscts_rate, AR_RTSCTSRate);
46766
46767- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
46768+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
46769 }
46770
46771 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
46772diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
46773index a2c9a5d..b52273e 100644
46774--- a/drivers/net/wireless/ath/ath9k/hw.h
46775+++ b/drivers/net/wireless/ath/ath9k/hw.h
46776@@ -635,7 +635,7 @@ struct ath_hw_private_ops {
46777
46778 /* ANI */
46779 void (*ani_cache_ini_regs)(struct ath_hw *ah);
46780-};
46781+} __no_const;
46782
46783 /**
46784 * struct ath_spec_scan - parameters for Atheros spectral scan
46785@@ -711,7 +711,7 @@ struct ath_hw_ops {
46786 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
46787 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
46788 #endif
46789-};
46790+} __no_const;
46791
46792 struct ath_nf_limits {
46793 s16 max;
46794diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
46795index 92190da..f3a4c4c 100644
46796--- a/drivers/net/wireless/b43/phy_lp.c
46797+++ b/drivers/net/wireless/b43/phy_lp.c
46798@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
46799 {
46800 struct ssb_bus *bus = dev->dev->sdev->bus;
46801
46802- static const struct b206x_channel *chandata = NULL;
46803+ const struct b206x_channel *chandata = NULL;
46804 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
46805 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
46806 u16 old_comm15, scale;
46807diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
46808index dea3b50..543db99 100644
46809--- a/drivers/net/wireless/iwlegacy/3945-mac.c
46810+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
46811@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
46812 */
46813 if (il3945_mod_params.disable_hw_scan) {
46814 D_INFO("Disabling hw_scan\n");
46815- il3945_mac_ops.hw_scan = NULL;
46816+ pax_open_kernel();
46817+ *(void **)&il3945_mac_ops.hw_scan = NULL;
46818+ pax_close_kernel();
46819 }
46820
46821 D_INFO("*** LOAD DRIVER ***\n");
46822diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
46823index d94f8ab..5b568c8 100644
46824--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
46825+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
46826@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
46827 {
46828 struct iwl_priv *priv = file->private_data;
46829 char buf[64];
46830- int buf_size;
46831+ size_t buf_size;
46832 u32 offset, len;
46833
46834 memset(buf, 0, sizeof(buf));
46835@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
46836 struct iwl_priv *priv = file->private_data;
46837
46838 char buf[8];
46839- int buf_size;
46840+ size_t buf_size;
46841 u32 reset_flag;
46842
46843 memset(buf, 0, sizeof(buf));
46844@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
46845 {
46846 struct iwl_priv *priv = file->private_data;
46847 char buf[8];
46848- int buf_size;
46849+ size_t buf_size;
46850 int ht40;
46851
46852 memset(buf, 0, sizeof(buf));
46853@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
46854 {
46855 struct iwl_priv *priv = file->private_data;
46856 char buf[8];
46857- int buf_size;
46858+ size_t buf_size;
46859 int value;
46860
46861 memset(buf, 0, sizeof(buf));
46862@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
46863 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
46864 DEBUGFS_READ_FILE_OPS(current_sleep_command);
46865
46866-static const char *fmt_value = " %-30s %10u\n";
46867-static const char *fmt_hex = " %-30s 0x%02X\n";
46868-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
46869-static const char *fmt_header =
46870+static const char fmt_value[] = " %-30s %10u\n";
46871+static const char fmt_hex[] = " %-30s 0x%02X\n";
46872+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
46873+static const char fmt_header[] =
46874 "%-32s current cumulative delta max\n";
46875
46876 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
46877@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
46878 {
46879 struct iwl_priv *priv = file->private_data;
46880 char buf[8];
46881- int buf_size;
46882+ size_t buf_size;
46883 int clear;
46884
46885 memset(buf, 0, sizeof(buf));
46886@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
46887 {
46888 struct iwl_priv *priv = file->private_data;
46889 char buf[8];
46890- int buf_size;
46891+ size_t buf_size;
46892 int trace;
46893
46894 memset(buf, 0, sizeof(buf));
46895@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
46896 {
46897 struct iwl_priv *priv = file->private_data;
46898 char buf[8];
46899- int buf_size;
46900+ size_t buf_size;
46901 int missed;
46902
46903 memset(buf, 0, sizeof(buf));
46904@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
46905
46906 struct iwl_priv *priv = file->private_data;
46907 char buf[8];
46908- int buf_size;
46909+ size_t buf_size;
46910 int plcp;
46911
46912 memset(buf, 0, sizeof(buf));
46913@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
46914
46915 struct iwl_priv *priv = file->private_data;
46916 char buf[8];
46917- int buf_size;
46918+ size_t buf_size;
46919 int flush;
46920
46921 memset(buf, 0, sizeof(buf));
46922@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
46923
46924 struct iwl_priv *priv = file->private_data;
46925 char buf[8];
46926- int buf_size;
46927+ size_t buf_size;
46928 int rts;
46929
46930 if (!priv->cfg->ht_params)
46931@@ -2205,7 +2205,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
46932 {
46933 struct iwl_priv *priv = file->private_data;
46934 char buf[8];
46935- int buf_size;
46936+ size_t buf_size;
46937
46938 memset(buf, 0, sizeof(buf));
46939 buf_size = min(count, sizeof(buf) - 1);
46940@@ -2239,7 +2239,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
46941 struct iwl_priv *priv = file->private_data;
46942 u32 event_log_flag;
46943 char buf[8];
46944- int buf_size;
46945+ size_t buf_size;
46946
46947 /* check that the interface is up */
46948 if (!iwl_is_ready(priv))
46949@@ -2293,7 +2293,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
46950 struct iwl_priv *priv = file->private_data;
46951 char buf[8];
46952 u32 calib_disabled;
46953- int buf_size;
46954+ size_t buf_size;
46955
46956 memset(buf, 0, sizeof(buf));
46957 buf_size = min(count, sizeof(buf) - 1);
46958diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
46959index 7aad766..06addb4 100644
46960--- a/drivers/net/wireless/iwlwifi/dvm/main.c
46961+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
46962@@ -1123,7 +1123,7 @@ static void iwl_option_config(struct iwl_priv *priv)
46963 static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
46964 {
46965 struct iwl_nvm_data *data = priv->nvm_data;
46966- char *debug_msg;
46967+ static const char debug_msg[] = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
46968
46969 if (data->sku_cap_11n_enable &&
46970 !priv->cfg->ht_params) {
46971@@ -1137,7 +1137,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
46972 return -EINVAL;
46973 }
46974
46975- debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
46976 IWL_DEBUG_INFO(priv, debug_msg,
46977 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
46978 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
46979diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
46980index f53ef83..5e34bcb 100644
46981--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
46982+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
46983@@ -1390,7 +1390,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
46984 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
46985
46986 char buf[8];
46987- int buf_size;
46988+ size_t buf_size;
46989 u32 reset_flag;
46990
46991 memset(buf, 0, sizeof(buf));
46992@@ -1411,7 +1411,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
46993 {
46994 struct iwl_trans *trans = file->private_data;
46995 char buf[8];
46996- int buf_size;
46997+ size_t buf_size;
46998 int csr;
46999
47000 memset(buf, 0, sizeof(buf));
47001diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
47002index a1b32ee..94b3c3d 100644
47003--- a/drivers/net/wireless/mac80211_hwsim.c
47004+++ b/drivers/net/wireless/mac80211_hwsim.c
47005@@ -2224,25 +2224,19 @@ static int __init init_mac80211_hwsim(void)
47006
47007 if (channels > 1) {
47008 hwsim_if_comb.num_different_channels = channels;
47009- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
47010- mac80211_hwsim_ops.cancel_hw_scan =
47011- mac80211_hwsim_cancel_hw_scan;
47012- mac80211_hwsim_ops.sw_scan_start = NULL;
47013- mac80211_hwsim_ops.sw_scan_complete = NULL;
47014- mac80211_hwsim_ops.remain_on_channel =
47015- mac80211_hwsim_roc;
47016- mac80211_hwsim_ops.cancel_remain_on_channel =
47017- mac80211_hwsim_croc;
47018- mac80211_hwsim_ops.add_chanctx =
47019- mac80211_hwsim_add_chanctx;
47020- mac80211_hwsim_ops.remove_chanctx =
47021- mac80211_hwsim_remove_chanctx;
47022- mac80211_hwsim_ops.change_chanctx =
47023- mac80211_hwsim_change_chanctx;
47024- mac80211_hwsim_ops.assign_vif_chanctx =
47025- mac80211_hwsim_assign_vif_chanctx;
47026- mac80211_hwsim_ops.unassign_vif_chanctx =
47027- mac80211_hwsim_unassign_vif_chanctx;
47028+ pax_open_kernel();
47029+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
47030+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
47031+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
47032+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
47033+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
47034+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
47035+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
47036+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
47037+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
47038+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
47039+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
47040+ pax_close_kernel();
47041 }
47042
47043 spin_lock_init(&hwsim_radio_lock);
47044diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
47045index 8169a85..7fa3b47 100644
47046--- a/drivers/net/wireless/rndis_wlan.c
47047+++ b/drivers/net/wireless/rndis_wlan.c
47048@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
47049
47050 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
47051
47052- if (rts_threshold < 0 || rts_threshold > 2347)
47053+ if (rts_threshold > 2347)
47054 rts_threshold = 2347;
47055
47056 tmp = cpu_to_le32(rts_threshold);
47057diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
47058index e4ba2ce..63d7417 100644
47059--- a/drivers/net/wireless/rt2x00/rt2x00.h
47060+++ b/drivers/net/wireless/rt2x00/rt2x00.h
47061@@ -377,7 +377,7 @@ struct rt2x00_intf {
47062 * for hardware which doesn't support hardware
47063 * sequence counting.
47064 */
47065- atomic_t seqno;
47066+ atomic_unchecked_t seqno;
47067 };
47068
47069 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
47070diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
47071index a5d38e8..d3c24ea 100644
47072--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
47073+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
47074@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
47075 * sequence counter given by mac80211.
47076 */
47077 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
47078- seqno = atomic_add_return(0x10, &intf->seqno);
47079+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
47080 else
47081- seqno = atomic_read(&intf->seqno);
47082+ seqno = atomic_read_unchecked(&intf->seqno);
47083
47084 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
47085 hdr->seq_ctrl |= cpu_to_le16(seqno);
47086diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
47087index e2b3d9c..67a5184 100644
47088--- a/drivers/net/wireless/ti/wl1251/sdio.c
47089+++ b/drivers/net/wireless/ti/wl1251/sdio.c
47090@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
47091
47092 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
47093
47094- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
47095- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
47096+ pax_open_kernel();
47097+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
47098+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
47099+ pax_close_kernel();
47100
47101 wl1251_info("using dedicated interrupt line");
47102 } else {
47103- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
47104- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
47105+ pax_open_kernel();
47106+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
47107+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
47108+ pax_close_kernel();
47109
47110 wl1251_info("using SDIO interrupt");
47111 }
47112diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
47113index be7129b..4161356 100644
47114--- a/drivers/net/wireless/ti/wl12xx/main.c
47115+++ b/drivers/net/wireless/ti/wl12xx/main.c
47116@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
47117 sizeof(wl->conf.mem));
47118
47119 /* read data preparation is only needed by wl127x */
47120- wl->ops->prepare_read = wl127x_prepare_read;
47121+ pax_open_kernel();
47122+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
47123+ pax_close_kernel();
47124
47125 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
47126 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
47127@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
47128 sizeof(wl->conf.mem));
47129
47130 /* read data preparation is only needed by wl127x */
47131- wl->ops->prepare_read = wl127x_prepare_read;
47132+ pax_open_kernel();
47133+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
47134+ pax_close_kernel();
47135
47136 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
47137 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
47138diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
47139index ec37b16..7e34d66 100644
47140--- a/drivers/net/wireless/ti/wl18xx/main.c
47141+++ b/drivers/net/wireless/ti/wl18xx/main.c
47142@@ -1823,8 +1823,10 @@ static int wl18xx_setup(struct wl1271 *wl)
47143 }
47144
47145 if (!checksum_param) {
47146- wl18xx_ops.set_rx_csum = NULL;
47147- wl18xx_ops.init_vif = NULL;
47148+ pax_open_kernel();
47149+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
47150+ *(void **)&wl18xx_ops.init_vif = NULL;
47151+ pax_close_kernel();
47152 }
47153
47154 /* Enable 11a Band only if we have 5G antennas */
47155diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
47156index 84d94f5..bd6c61c 100644
47157--- a/drivers/net/wireless/zd1211rw/zd_usb.c
47158+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
47159@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
47160 {
47161 struct zd_usb *usb = urb->context;
47162 struct zd_usb_interrupt *intr = &usb->intr;
47163- int len;
47164+ unsigned int len;
47165 u16 int_num;
47166
47167 ZD_ASSERT(in_interrupt());
47168diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
47169index 7130864..00e64de 100644
47170--- a/drivers/nfc/nfcwilink.c
47171+++ b/drivers/nfc/nfcwilink.c
47172@@ -498,7 +498,7 @@ static struct nci_ops nfcwilink_ops = {
47173
47174 static int nfcwilink_probe(struct platform_device *pdev)
47175 {
47176- static struct nfcwilink *drv;
47177+ struct nfcwilink *drv;
47178 int rc;
47179 __u32 protocols;
47180
47181diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
47182index d93b2b6..ae50401 100644
47183--- a/drivers/oprofile/buffer_sync.c
47184+++ b/drivers/oprofile/buffer_sync.c
47185@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
47186 if (cookie == NO_COOKIE)
47187 offset = pc;
47188 if (cookie == INVALID_COOKIE) {
47189- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
47190+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
47191 offset = pc;
47192 }
47193 if (cookie != last_cookie) {
47194@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
47195 /* add userspace sample */
47196
47197 if (!mm) {
47198- atomic_inc(&oprofile_stats.sample_lost_no_mm);
47199+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
47200 return 0;
47201 }
47202
47203 cookie = lookup_dcookie(mm, s->eip, &offset);
47204
47205 if (cookie == INVALID_COOKIE) {
47206- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
47207+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
47208 return 0;
47209 }
47210
47211@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
47212 /* ignore backtraces if failed to add a sample */
47213 if (state == sb_bt_start) {
47214 state = sb_bt_ignore;
47215- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
47216+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
47217 }
47218 }
47219 release_mm(mm);
47220diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
47221index c0cc4e7..44d4e54 100644
47222--- a/drivers/oprofile/event_buffer.c
47223+++ b/drivers/oprofile/event_buffer.c
47224@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
47225 }
47226
47227 if (buffer_pos == buffer_size) {
47228- atomic_inc(&oprofile_stats.event_lost_overflow);
47229+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
47230 return;
47231 }
47232
47233diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
47234index ed2c3ec..deda85a 100644
47235--- a/drivers/oprofile/oprof.c
47236+++ b/drivers/oprofile/oprof.c
47237@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
47238 if (oprofile_ops.switch_events())
47239 return;
47240
47241- atomic_inc(&oprofile_stats.multiplex_counter);
47242+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
47243 start_switch_worker();
47244 }
47245
47246diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
47247index ee2cfce..7f8f699 100644
47248--- a/drivers/oprofile/oprofile_files.c
47249+++ b/drivers/oprofile/oprofile_files.c
47250@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
47251
47252 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
47253
47254-static ssize_t timeout_read(struct file *file, char __user *buf,
47255+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
47256 size_t count, loff_t *offset)
47257 {
47258 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
47259diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
47260index 59659ce..6c860a0 100644
47261--- a/drivers/oprofile/oprofile_stats.c
47262+++ b/drivers/oprofile/oprofile_stats.c
47263@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
47264 cpu_buf->sample_invalid_eip = 0;
47265 }
47266
47267- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
47268- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
47269- atomic_set(&oprofile_stats.event_lost_overflow, 0);
47270- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
47271- atomic_set(&oprofile_stats.multiplex_counter, 0);
47272+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
47273+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
47274+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
47275+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
47276+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
47277 }
47278
47279
47280diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
47281index 1fc622b..8c48fc3 100644
47282--- a/drivers/oprofile/oprofile_stats.h
47283+++ b/drivers/oprofile/oprofile_stats.h
47284@@ -13,11 +13,11 @@
47285 #include <linux/atomic.h>
47286
47287 struct oprofile_stat_struct {
47288- atomic_t sample_lost_no_mm;
47289- atomic_t sample_lost_no_mapping;
47290- atomic_t bt_lost_no_mapping;
47291- atomic_t event_lost_overflow;
47292- atomic_t multiplex_counter;
47293+ atomic_unchecked_t sample_lost_no_mm;
47294+ atomic_unchecked_t sample_lost_no_mapping;
47295+ atomic_unchecked_t bt_lost_no_mapping;
47296+ atomic_unchecked_t event_lost_overflow;
47297+ atomic_unchecked_t multiplex_counter;
47298 };
47299
47300 extern struct oprofile_stat_struct oprofile_stats;
47301diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
47302index 3f49345..c750d0b 100644
47303--- a/drivers/oprofile/oprofilefs.c
47304+++ b/drivers/oprofile/oprofilefs.c
47305@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
47306
47307 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
47308 {
47309- atomic_t *val = file->private_data;
47310- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
47311+ atomic_unchecked_t *val = file->private_data;
47312+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
47313 }
47314
47315
47316@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
47317
47318
47319 int oprofilefs_create_ro_atomic(struct dentry *root,
47320- char const *name, atomic_t *val)
47321+ char const *name, atomic_unchecked_t *val)
47322 {
47323 return __oprofilefs_create_file(root, name,
47324 &atomic_ro_fops, 0444, val);
47325diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
47326index 61be1d9..dec05d7 100644
47327--- a/drivers/oprofile/timer_int.c
47328+++ b/drivers/oprofile/timer_int.c
47329@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
47330 return NOTIFY_OK;
47331 }
47332
47333-static struct notifier_block __refdata oprofile_cpu_notifier = {
47334+static struct notifier_block oprofile_cpu_notifier = {
47335 .notifier_call = oprofile_cpu_notify,
47336 };
47337
47338diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
47339index 92ed045..62d39bd7 100644
47340--- a/drivers/parport/procfs.c
47341+++ b/drivers/parport/procfs.c
47342@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
47343
47344 *ppos += len;
47345
47346- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
47347+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
47348 }
47349
47350 #ifdef CONFIG_PARPORT_1284
47351@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
47352
47353 *ppos += len;
47354
47355- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
47356+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
47357 }
47358 #endif /* IEEE1284.3 support. */
47359
47360diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
47361index ecfac7e..41be7028 100644
47362--- a/drivers/pci/hotplug/acpiphp_ibm.c
47363+++ b/drivers/pci/hotplug/acpiphp_ibm.c
47364@@ -453,7 +453,9 @@ static int __init ibm_acpiphp_init(void)
47365 goto init_cleanup;
47366 }
47367
47368- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
47369+ pax_open_kernel();
47370+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
47371+ pax_close_kernel();
47372 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
47373
47374 return retval;
47375diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
47376index 7536eef..52dc8fa 100644
47377--- a/drivers/pci/hotplug/cpcihp_generic.c
47378+++ b/drivers/pci/hotplug/cpcihp_generic.c
47379@@ -73,7 +73,6 @@ static u16 port;
47380 static unsigned int enum_bit;
47381 static u8 enum_mask;
47382
47383-static struct cpci_hp_controller_ops generic_hpc_ops;
47384 static struct cpci_hp_controller generic_hpc;
47385
47386 static int __init validate_parameters(void)
47387@@ -139,6 +138,10 @@ static int query_enum(void)
47388 return ((value & enum_mask) == enum_mask);
47389 }
47390
47391+static struct cpci_hp_controller_ops generic_hpc_ops = {
47392+ .query_enum = query_enum,
47393+};
47394+
47395 static int __init cpcihp_generic_init(void)
47396 {
47397 int status;
47398@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
47399 pci_dev_put(dev);
47400
47401 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
47402- generic_hpc_ops.query_enum = query_enum;
47403 generic_hpc.ops = &generic_hpc_ops;
47404
47405 status = cpci_hp_register_controller(&generic_hpc);
47406diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
47407index e8c4a7c..7046f5c 100644
47408--- a/drivers/pci/hotplug/cpcihp_zt5550.c
47409+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
47410@@ -59,7 +59,6 @@
47411 /* local variables */
47412 static bool debug;
47413 static bool poll;
47414-static struct cpci_hp_controller_ops zt5550_hpc_ops;
47415 static struct cpci_hp_controller zt5550_hpc;
47416
47417 /* Primary cPCI bus bridge device */
47418@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
47419 return 0;
47420 }
47421
47422+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
47423+ .query_enum = zt5550_hc_query_enum,
47424+};
47425+
47426 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
47427 {
47428 int status;
47429@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
47430 dbg("returned from zt5550_hc_config");
47431
47432 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
47433- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
47434 zt5550_hpc.ops = &zt5550_hpc_ops;
47435 if(!poll) {
47436 zt5550_hpc.irq = hc_dev->irq;
47437 zt5550_hpc.irq_flags = IRQF_SHARED;
47438 zt5550_hpc.dev_id = hc_dev;
47439
47440- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
47441- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
47442- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
47443+ pax_open_kernel();
47444+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
47445+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
47446+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
47447+ pax_open_kernel();
47448 } else {
47449 info("using ENUM# polling mode");
47450 }
47451diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
47452index 76ba8a1..20ca857 100644
47453--- a/drivers/pci/hotplug/cpqphp_nvram.c
47454+++ b/drivers/pci/hotplug/cpqphp_nvram.c
47455@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
47456
47457 void compaq_nvram_init (void __iomem *rom_start)
47458 {
47459+
47460+#ifndef CONFIG_PAX_KERNEXEC
47461 if (rom_start) {
47462 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
47463 }
47464+#endif
47465+
47466 dbg("int15 entry = %p\n", compaq_int15_entry_point);
47467
47468 /* initialize our int15 lock */
47469diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
47470index cfa92a9..29539c5 100644
47471--- a/drivers/pci/hotplug/pci_hotplug_core.c
47472+++ b/drivers/pci/hotplug/pci_hotplug_core.c
47473@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
47474 return -EINVAL;
47475 }
47476
47477- slot->ops->owner = owner;
47478- slot->ops->mod_name = mod_name;
47479+ pax_open_kernel();
47480+ *(struct module **)&slot->ops->owner = owner;
47481+ *(const char **)&slot->ops->mod_name = mod_name;
47482+ pax_close_kernel();
47483
47484 mutex_lock(&pci_hp_mutex);
47485 /*
47486diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
47487index bbd48bb..6907ef4 100644
47488--- a/drivers/pci/hotplug/pciehp_core.c
47489+++ b/drivers/pci/hotplug/pciehp_core.c
47490@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
47491 struct slot *slot = ctrl->slot;
47492 struct hotplug_slot *hotplug = NULL;
47493 struct hotplug_slot_info *info = NULL;
47494- struct hotplug_slot_ops *ops = NULL;
47495+ hotplug_slot_ops_no_const *ops = NULL;
47496 char name[SLOT_NAME_SIZE];
47497 int retval = -ENOMEM;
47498
47499diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
47500index c91e6c1..5c723ef 100644
47501--- a/drivers/pci/pci-sysfs.c
47502+++ b/drivers/pci/pci-sysfs.c
47503@@ -1117,7 +1117,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
47504 {
47505 /* allocate attribute structure, piggyback attribute name */
47506 int name_len = write_combine ? 13 : 10;
47507- struct bin_attribute *res_attr;
47508+ bin_attribute_no_const *res_attr;
47509 int retval;
47510
47511 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
47512@@ -1302,7 +1302,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
47513 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
47514 {
47515 int retval;
47516- struct bin_attribute *attr;
47517+ bin_attribute_no_const *attr;
47518
47519 /* If the device has VPD, try to expose it in sysfs. */
47520 if (dev->vpd) {
47521@@ -1349,7 +1349,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
47522 {
47523 int retval;
47524 int rom_size = 0;
47525- struct bin_attribute *attr;
47526+ bin_attribute_no_const *attr;
47527
47528 if (!sysfs_initialized)
47529 return -EACCES;
47530diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
47531index 9c91ecc..bda4796 100644
47532--- a/drivers/pci/pci.h
47533+++ b/drivers/pci/pci.h
47534@@ -95,7 +95,7 @@ struct pci_vpd_ops {
47535 struct pci_vpd {
47536 unsigned int len;
47537 const struct pci_vpd_ops *ops;
47538- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
47539+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
47540 };
47541
47542 int pci_vpd_pci22_init(struct pci_dev *dev);
47543diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
47544index f1272dc..e92a1ac 100644
47545--- a/drivers/pci/pcie/aspm.c
47546+++ b/drivers/pci/pcie/aspm.c
47547@@ -27,9 +27,9 @@
47548 #define MODULE_PARAM_PREFIX "pcie_aspm."
47549
47550 /* Note: those are not register definitions */
47551-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
47552-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
47553-#define ASPM_STATE_L1 (4) /* L1 state */
47554+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
47555+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
47556+#define ASPM_STATE_L1 (4U) /* L1 state */
47557 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
47558 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
47559
47560diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
47561index 38e403d..a2ce55a 100644
47562--- a/drivers/pci/probe.c
47563+++ b/drivers/pci/probe.c
47564@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
47565 struct pci_bus_region region, inverted_region;
47566 bool bar_too_big = false, bar_disabled = false;
47567
47568- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
47569+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
47570
47571 /* No printks while decoding is disabled! */
47572 if (!dev->mmio_always_on) {
47573diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
47574index 46d1378..30e452b 100644
47575--- a/drivers/pci/proc.c
47576+++ b/drivers/pci/proc.c
47577@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
47578 static int __init pci_proc_init(void)
47579 {
47580 struct pci_dev *dev = NULL;
47581+
47582+#ifdef CONFIG_GRKERNSEC_PROC_ADD
47583+#ifdef CONFIG_GRKERNSEC_PROC_USER
47584+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
47585+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47586+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
47587+#endif
47588+#else
47589 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
47590+#endif
47591 proc_create("devices", 0, proc_bus_pci_dir,
47592 &proc_bus_pci_dev_operations);
47593 proc_initialized = 1;
47594diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
47595index 3e5b4497..dcdfb70 100644
47596--- a/drivers/platform/chrome/chromeos_laptop.c
47597+++ b/drivers/platform/chrome/chromeos_laptop.c
47598@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
47599 return 0;
47600 }
47601
47602-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
47603+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
47604 {
47605 .ident = "Samsung Series 5 550 - Touchpad",
47606 .matches = {
47607diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
47608index 19c313b..ed28b38 100644
47609--- a/drivers/platform/x86/asus-wmi.c
47610+++ b/drivers/platform/x86/asus-wmi.c
47611@@ -1618,6 +1618,10 @@ static int show_dsts(struct seq_file *m, void *data)
47612 int err;
47613 u32 retval = -1;
47614
47615+#ifdef CONFIG_GRKERNSEC_KMEM
47616+ return -EPERM;
47617+#endif
47618+
47619 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
47620
47621 if (err < 0)
47622@@ -1634,6 +1638,10 @@ static int show_devs(struct seq_file *m, void *data)
47623 int err;
47624 u32 retval = -1;
47625
47626+#ifdef CONFIG_GRKERNSEC_KMEM
47627+ return -EPERM;
47628+#endif
47629+
47630 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
47631 &retval);
47632
47633@@ -1658,6 +1666,10 @@ static int show_call(struct seq_file *m, void *data)
47634 union acpi_object *obj;
47635 acpi_status status;
47636
47637+#ifdef CONFIG_GRKERNSEC_KMEM
47638+ return -EPERM;
47639+#endif
47640+
47641 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
47642 1, asus->debug.method_id,
47643 &input, &output);
47644diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
47645index 62f8030..c7f2a45 100644
47646--- a/drivers/platform/x86/msi-laptop.c
47647+++ b/drivers/platform/x86/msi-laptop.c
47648@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
47649
47650 if (!quirks->ec_read_only) {
47651 /* allow userland write sysfs file */
47652- dev_attr_bluetooth.store = store_bluetooth;
47653- dev_attr_wlan.store = store_wlan;
47654- dev_attr_threeg.store = store_threeg;
47655- dev_attr_bluetooth.attr.mode |= S_IWUSR;
47656- dev_attr_wlan.attr.mode |= S_IWUSR;
47657- dev_attr_threeg.attr.mode |= S_IWUSR;
47658+ pax_open_kernel();
47659+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
47660+ *(void **)&dev_attr_wlan.store = store_wlan;
47661+ *(void **)&dev_attr_threeg.store = store_threeg;
47662+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
47663+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
47664+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
47665+ pax_close_kernel();
47666 }
47667
47668 /* disable hardware control by fn key */
47669diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
47670index 70222f2..8c8ce66 100644
47671--- a/drivers/platform/x86/msi-wmi.c
47672+++ b/drivers/platform/x86/msi-wmi.c
47673@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
47674 static void msi_wmi_notify(u32 value, void *context)
47675 {
47676 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
47677- static struct key_entry *key;
47678+ struct key_entry *key;
47679 union acpi_object *obj;
47680 acpi_status status;
47681
47682diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
47683index fb233ae..23a325c 100644
47684--- a/drivers/platform/x86/sony-laptop.c
47685+++ b/drivers/platform/x86/sony-laptop.c
47686@@ -2453,7 +2453,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
47687 }
47688
47689 /* High speed charging function */
47690-static struct device_attribute *hsc_handle;
47691+static device_attribute_no_const *hsc_handle;
47692
47693 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
47694 struct device_attribute *attr,
47695diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
47696index 58b0274..6704626 100644
47697--- a/drivers/platform/x86/thinkpad_acpi.c
47698+++ b/drivers/platform/x86/thinkpad_acpi.c
47699@@ -2100,7 +2100,7 @@ static int hotkey_mask_get(void)
47700 return 0;
47701 }
47702
47703-void static hotkey_mask_warn_incomplete_mask(void)
47704+static void hotkey_mask_warn_incomplete_mask(void)
47705 {
47706 /* log only what the user can fix... */
47707 const u32 wantedmask = hotkey_driver_mask &
47708@@ -2327,11 +2327,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
47709 }
47710 }
47711
47712-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47713- struct tp_nvram_state *newn,
47714- const u32 event_mask)
47715-{
47716-
47717 #define TPACPI_COMPARE_KEY(__scancode, __member) \
47718 do { \
47719 if ((event_mask & (1 << __scancode)) && \
47720@@ -2345,36 +2340,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47721 tpacpi_hotkey_send_key(__scancode); \
47722 } while (0)
47723
47724- void issue_volchange(const unsigned int oldvol,
47725- const unsigned int newvol)
47726- {
47727- unsigned int i = oldvol;
47728+static void issue_volchange(const unsigned int oldvol,
47729+ const unsigned int newvol,
47730+ const u32 event_mask)
47731+{
47732+ unsigned int i = oldvol;
47733
47734- while (i > newvol) {
47735- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
47736- i--;
47737- }
47738- while (i < newvol) {
47739- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
47740- i++;
47741- }
47742+ while (i > newvol) {
47743+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
47744+ i--;
47745 }
47746+ while (i < newvol) {
47747+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
47748+ i++;
47749+ }
47750+}
47751
47752- void issue_brightnesschange(const unsigned int oldbrt,
47753- const unsigned int newbrt)
47754- {
47755- unsigned int i = oldbrt;
47756+static void issue_brightnesschange(const unsigned int oldbrt,
47757+ const unsigned int newbrt,
47758+ const u32 event_mask)
47759+{
47760+ unsigned int i = oldbrt;
47761
47762- while (i > newbrt) {
47763- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
47764- i--;
47765- }
47766- while (i < newbrt) {
47767- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
47768- i++;
47769- }
47770+ while (i > newbrt) {
47771+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
47772+ i--;
47773+ }
47774+ while (i < newbrt) {
47775+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
47776+ i++;
47777 }
47778+}
47779
47780+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47781+ struct tp_nvram_state *newn,
47782+ const u32 event_mask)
47783+{
47784 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
47785 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
47786 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
47787@@ -2408,7 +2409,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47788 oldn->volume_level != newn->volume_level) {
47789 /* recently muted, or repeated mute keypress, or
47790 * multiple presses ending in mute */
47791- issue_volchange(oldn->volume_level, newn->volume_level);
47792+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
47793 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
47794 }
47795 } else {
47796@@ -2418,7 +2419,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47797 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
47798 }
47799 if (oldn->volume_level != newn->volume_level) {
47800- issue_volchange(oldn->volume_level, newn->volume_level);
47801+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
47802 } else if (oldn->volume_toggle != newn->volume_toggle) {
47803 /* repeated vol up/down keypress at end of scale ? */
47804 if (newn->volume_level == 0)
47805@@ -2431,7 +2432,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47806 /* handle brightness */
47807 if (oldn->brightness_level != newn->brightness_level) {
47808 issue_brightnesschange(oldn->brightness_level,
47809- newn->brightness_level);
47810+ newn->brightness_level,
47811+ event_mask);
47812 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
47813 /* repeated key presses that didn't change state */
47814 if (newn->brightness_level == 0)
47815@@ -2440,10 +2442,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
47816 && !tp_features.bright_unkfw)
47817 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
47818 }
47819+}
47820
47821 #undef TPACPI_COMPARE_KEY
47822 #undef TPACPI_MAY_SEND_KEY
47823-}
47824
47825 /*
47826 * Polling driver
47827diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
47828index 769d265..a3a05ca 100644
47829--- a/drivers/pnp/pnpbios/bioscalls.c
47830+++ b/drivers/pnp/pnpbios/bioscalls.c
47831@@ -58,7 +58,7 @@ do { \
47832 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
47833 } while(0)
47834
47835-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
47836+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
47837 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
47838
47839 /*
47840@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
47841
47842 cpu = get_cpu();
47843 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
47844+
47845+ pax_open_kernel();
47846 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
47847+ pax_close_kernel();
47848
47849 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
47850 spin_lock_irqsave(&pnp_bios_lock, flags);
47851@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
47852 :"memory");
47853 spin_unlock_irqrestore(&pnp_bios_lock, flags);
47854
47855+ pax_open_kernel();
47856 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
47857+ pax_close_kernel();
47858+
47859 put_cpu();
47860
47861 /* If we get here and this is set then the PnP BIOS faulted on us. */
47862@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
47863 return status;
47864 }
47865
47866-void pnpbios_calls_init(union pnp_bios_install_struct *header)
47867+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
47868 {
47869 int i;
47870
47871@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
47872 pnp_bios_callpoint.offset = header->fields.pm16offset;
47873 pnp_bios_callpoint.segment = PNP_CS16;
47874
47875+ pax_open_kernel();
47876+
47877 for_each_possible_cpu(i) {
47878 struct desc_struct *gdt = get_cpu_gdt_table(i);
47879 if (!gdt)
47880@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
47881 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
47882 (unsigned long)__va(header->fields.pm16dseg));
47883 }
47884+
47885+ pax_close_kernel();
47886 }
47887diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
47888index d95e101..67f0c3f 100644
47889--- a/drivers/pnp/resource.c
47890+++ b/drivers/pnp/resource.c
47891@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
47892 return 1;
47893
47894 /* check if the resource is valid */
47895- if (*irq < 0 || *irq > 15)
47896+ if (*irq > 15)
47897 return 0;
47898
47899 /* check if the resource is reserved */
47900@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
47901 return 1;
47902
47903 /* check if the resource is valid */
47904- if (*dma < 0 || *dma == 4 || *dma > 7)
47905+ if (*dma == 4 || *dma > 7)
47906 return 0;
47907
47908 /* check if the resource is reserved */
47909diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
47910index 0c52e2a..3421ab7 100644
47911--- a/drivers/power/pda_power.c
47912+++ b/drivers/power/pda_power.c
47913@@ -37,7 +37,11 @@ static int polling;
47914
47915 #if IS_ENABLED(CONFIG_USB_PHY)
47916 static struct usb_phy *transceiver;
47917-static struct notifier_block otg_nb;
47918+static int otg_handle_notification(struct notifier_block *nb,
47919+ unsigned long event, void *unused);
47920+static struct notifier_block otg_nb = {
47921+ .notifier_call = otg_handle_notification
47922+};
47923 #endif
47924
47925 static struct regulator *ac_draw;
47926@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
47927
47928 #if IS_ENABLED(CONFIG_USB_PHY)
47929 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
47930- otg_nb.notifier_call = otg_handle_notification;
47931 ret = usb_register_notifier(transceiver, &otg_nb);
47932 if (ret) {
47933 dev_err(dev, "failure to register otg notifier\n");
47934diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
47935index cc439fd..8fa30df 100644
47936--- a/drivers/power/power_supply.h
47937+++ b/drivers/power/power_supply.h
47938@@ -16,12 +16,12 @@ struct power_supply;
47939
47940 #ifdef CONFIG_SYSFS
47941
47942-extern void power_supply_init_attrs(struct device_type *dev_type);
47943+extern void power_supply_init_attrs(void);
47944 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
47945
47946 #else
47947
47948-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
47949+static inline void power_supply_init_attrs(void) {}
47950 #define power_supply_uevent NULL
47951
47952 #endif /* CONFIG_SYSFS */
47953diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
47954index 557af94..84dc1fe 100644
47955--- a/drivers/power/power_supply_core.c
47956+++ b/drivers/power/power_supply_core.c
47957@@ -24,7 +24,10 @@
47958 struct class *power_supply_class;
47959 EXPORT_SYMBOL_GPL(power_supply_class);
47960
47961-static struct device_type power_supply_dev_type;
47962+extern const struct attribute_group *power_supply_attr_groups[];
47963+static struct device_type power_supply_dev_type = {
47964+ .groups = power_supply_attr_groups,
47965+};
47966
47967 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
47968 struct power_supply *supply)
47969@@ -584,7 +587,7 @@ static int __init power_supply_class_init(void)
47970 return PTR_ERR(power_supply_class);
47971
47972 power_supply_class->dev_uevent = power_supply_uevent;
47973- power_supply_init_attrs(&power_supply_dev_type);
47974+ power_supply_init_attrs();
47975
47976 return 0;
47977 }
47978diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
47979index 44420d1..967126e 100644
47980--- a/drivers/power/power_supply_sysfs.c
47981+++ b/drivers/power/power_supply_sysfs.c
47982@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
47983 .is_visible = power_supply_attr_is_visible,
47984 };
47985
47986-static const struct attribute_group *power_supply_attr_groups[] = {
47987+const struct attribute_group *power_supply_attr_groups[] = {
47988 &power_supply_attr_group,
47989 NULL,
47990 };
47991
47992-void power_supply_init_attrs(struct device_type *dev_type)
47993+void power_supply_init_attrs(void)
47994 {
47995 int i;
47996
47997- dev_type->groups = power_supply_attr_groups;
47998-
47999 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
48000 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
48001 }
48002diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
48003index 84419af..268ede8 100644
48004--- a/drivers/powercap/powercap_sys.c
48005+++ b/drivers/powercap/powercap_sys.c
48006@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
48007 struct device_attribute name_attr;
48008 };
48009
48010+static ssize_t show_constraint_name(struct device *dev,
48011+ struct device_attribute *dev_attr,
48012+ char *buf);
48013+
48014 static struct powercap_constraint_attr
48015- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
48016+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
48017+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
48018+ .power_limit_attr = {
48019+ .attr = {
48020+ .name = NULL,
48021+ .mode = S_IWUSR | S_IRUGO
48022+ },
48023+ .show = show_constraint_power_limit_uw,
48024+ .store = store_constraint_power_limit_uw
48025+ },
48026+
48027+ .time_window_attr = {
48028+ .attr = {
48029+ .name = NULL,
48030+ .mode = S_IWUSR | S_IRUGO
48031+ },
48032+ .show = show_constraint_time_window_us,
48033+ .store = store_constraint_time_window_us
48034+ },
48035+
48036+ .max_power_attr = {
48037+ .attr = {
48038+ .name = NULL,
48039+ .mode = S_IRUGO
48040+ },
48041+ .show = show_constraint_max_power_uw,
48042+ .store = NULL
48043+ },
48044+
48045+ .min_power_attr = {
48046+ .attr = {
48047+ .name = NULL,
48048+ .mode = S_IRUGO
48049+ },
48050+ .show = show_constraint_min_power_uw,
48051+ .store = NULL
48052+ },
48053+
48054+ .max_time_window_attr = {
48055+ .attr = {
48056+ .name = NULL,
48057+ .mode = S_IRUGO
48058+ },
48059+ .show = show_constraint_max_time_window_us,
48060+ .store = NULL
48061+ },
48062+
48063+ .min_time_window_attr = {
48064+ .attr = {
48065+ .name = NULL,
48066+ .mode = S_IRUGO
48067+ },
48068+ .show = show_constraint_min_time_window_us,
48069+ .store = NULL
48070+ },
48071+
48072+ .name_attr = {
48073+ .attr = {
48074+ .name = NULL,
48075+ .mode = S_IRUGO
48076+ },
48077+ .show = show_constraint_name,
48078+ .store = NULL
48079+ }
48080+ }
48081+};
48082
48083 /* A list of powercap control_types */
48084 static LIST_HEAD(powercap_cntrl_list);
48085@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
48086 }
48087
48088 static int create_constraint_attribute(int id, const char *name,
48089- int mode,
48090- struct device_attribute *dev_attr,
48091- ssize_t (*show)(struct device *,
48092- struct device_attribute *, char *),
48093- ssize_t (*store)(struct device *,
48094- struct device_attribute *,
48095- const char *, size_t)
48096- )
48097+ struct device_attribute *dev_attr)
48098 {
48099+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
48100
48101- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
48102- id, name);
48103- if (!dev_attr->attr.name)
48104+ if (!name)
48105 return -ENOMEM;
48106- dev_attr->attr.mode = mode;
48107- dev_attr->show = show;
48108- dev_attr->store = store;
48109+
48110+ pax_open_kernel();
48111+ *(const char **)&dev_attr->attr.name = name;
48112+ pax_close_kernel();
48113
48114 return 0;
48115 }
48116@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
48117
48118 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
48119 ret = create_constraint_attribute(i, "power_limit_uw",
48120- S_IWUSR | S_IRUGO,
48121- &constraint_attrs[i].power_limit_attr,
48122- show_constraint_power_limit_uw,
48123- store_constraint_power_limit_uw);
48124+ &constraint_attrs[i].power_limit_attr);
48125 if (ret)
48126 goto err_alloc;
48127 ret = create_constraint_attribute(i, "time_window_us",
48128- S_IWUSR | S_IRUGO,
48129- &constraint_attrs[i].time_window_attr,
48130- show_constraint_time_window_us,
48131- store_constraint_time_window_us);
48132+ &constraint_attrs[i].time_window_attr);
48133 if (ret)
48134 goto err_alloc;
48135- ret = create_constraint_attribute(i, "name", S_IRUGO,
48136- &constraint_attrs[i].name_attr,
48137- show_constraint_name,
48138- NULL);
48139+ ret = create_constraint_attribute(i, "name",
48140+ &constraint_attrs[i].name_attr);
48141 if (ret)
48142 goto err_alloc;
48143- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
48144- &constraint_attrs[i].max_power_attr,
48145- show_constraint_max_power_uw,
48146- NULL);
48147+ ret = create_constraint_attribute(i, "max_power_uw",
48148+ &constraint_attrs[i].max_power_attr);
48149 if (ret)
48150 goto err_alloc;
48151- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
48152- &constraint_attrs[i].min_power_attr,
48153- show_constraint_min_power_uw,
48154- NULL);
48155+ ret = create_constraint_attribute(i, "min_power_uw",
48156+ &constraint_attrs[i].min_power_attr);
48157 if (ret)
48158 goto err_alloc;
48159 ret = create_constraint_attribute(i, "max_time_window_us",
48160- S_IRUGO,
48161- &constraint_attrs[i].max_time_window_attr,
48162- show_constraint_max_time_window_us,
48163- NULL);
48164+ &constraint_attrs[i].max_time_window_attr);
48165 if (ret)
48166 goto err_alloc;
48167 ret = create_constraint_attribute(i, "min_time_window_us",
48168- S_IRUGO,
48169- &constraint_attrs[i].min_time_window_attr,
48170- show_constraint_min_time_window_us,
48171- NULL);
48172+ &constraint_attrs[i].min_time_window_attr);
48173 if (ret)
48174 goto err_alloc;
48175
48176@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
48177 power_zone->zone_dev_attrs[count++] =
48178 &dev_attr_max_energy_range_uj.attr;
48179 if (power_zone->ops->get_energy_uj) {
48180+ pax_open_kernel();
48181 if (power_zone->ops->reset_energy_uj)
48182- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
48183+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
48184 else
48185- dev_attr_energy_uj.attr.mode = S_IRUGO;
48186+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
48187+ pax_close_kernel();
48188 power_zone->zone_dev_attrs[count++] =
48189 &dev_attr_energy_uj.attr;
48190 }
48191diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
48192index 75dffb79..df850cd 100644
48193--- a/drivers/regulator/core.c
48194+++ b/drivers/regulator/core.c
48195@@ -3370,7 +3370,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
48196 {
48197 const struct regulation_constraints *constraints = NULL;
48198 const struct regulator_init_data *init_data;
48199- static atomic_t regulator_no = ATOMIC_INIT(0);
48200+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
48201 struct regulator_dev *rdev;
48202 struct device *dev;
48203 int ret, i;
48204@@ -3440,7 +3440,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
48205 rdev->dev.of_node = config->of_node;
48206 rdev->dev.parent = dev;
48207 dev_set_name(&rdev->dev, "regulator.%d",
48208- atomic_inc_return(&regulator_no) - 1);
48209+ atomic_inc_return_unchecked(&regulator_no) - 1);
48210 ret = device_register(&rdev->dev);
48211 if (ret != 0) {
48212 put_device(&rdev->dev);
48213diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
48214index 8d94d3d..653b623 100644
48215--- a/drivers/regulator/max8660.c
48216+++ b/drivers/regulator/max8660.c
48217@@ -420,8 +420,10 @@ static int max8660_probe(struct i2c_client *client,
48218 max8660->shadow_regs[MAX8660_OVER1] = 5;
48219 } else {
48220 /* Otherwise devices can be toggled via software */
48221- max8660_dcdc_ops.enable = max8660_dcdc_enable;
48222- max8660_dcdc_ops.disable = max8660_dcdc_disable;
48223+ pax_open_kernel();
48224+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
48225+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
48226+ pax_close_kernel();
48227 }
48228
48229 /*
48230diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
48231index 892aa1e..ebd1b9c 100644
48232--- a/drivers/regulator/max8973-regulator.c
48233+++ b/drivers/regulator/max8973-regulator.c
48234@@ -406,9 +406,11 @@ static int max8973_probe(struct i2c_client *client,
48235 if (!pdata || !pdata->enable_ext_control) {
48236 max->desc.enable_reg = MAX8973_VOUT;
48237 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
48238- max->ops.enable = regulator_enable_regmap;
48239- max->ops.disable = regulator_disable_regmap;
48240- max->ops.is_enabled = regulator_is_enabled_regmap;
48241+ pax_open_kernel();
48242+ *(void **)&max->ops.enable = regulator_enable_regmap;
48243+ *(void **)&max->ops.disable = regulator_disable_regmap;
48244+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
48245+ pax_close_kernel();
48246 }
48247
48248 if (pdata) {
48249diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
48250index 96c9f80..90974ca 100644
48251--- a/drivers/regulator/mc13892-regulator.c
48252+++ b/drivers/regulator/mc13892-regulator.c
48253@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
48254 }
48255 mc13xxx_unlock(mc13892);
48256
48257- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
48258+ pax_open_kernel();
48259+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
48260 = mc13892_vcam_set_mode;
48261- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
48262+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
48263 = mc13892_vcam_get_mode;
48264+ pax_close_kernel();
48265
48266 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
48267 ARRAY_SIZE(mc13892_regulators));
48268diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
48269index a2325bc..04c549f 100644
48270--- a/drivers/rtc/rtc-cmos.c
48271+++ b/drivers/rtc/rtc-cmos.c
48272@@ -779,7 +779,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
48273 hpet_rtc_timer_init();
48274
48275 /* export at least the first block of NVRAM */
48276- nvram.size = address_space - NVRAM_OFFSET;
48277+ pax_open_kernel();
48278+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
48279+ pax_close_kernel();
48280 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
48281 if (retval < 0) {
48282 dev_dbg(dev, "can't create nvram file? %d\n", retval);
48283diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
48284index d049393..bb20be0 100644
48285--- a/drivers/rtc/rtc-dev.c
48286+++ b/drivers/rtc/rtc-dev.c
48287@@ -16,6 +16,7 @@
48288 #include <linux/module.h>
48289 #include <linux/rtc.h>
48290 #include <linux/sched.h>
48291+#include <linux/grsecurity.h>
48292 #include "rtc-core.h"
48293
48294 static dev_t rtc_devt;
48295@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
48296 if (copy_from_user(&tm, uarg, sizeof(tm)))
48297 return -EFAULT;
48298
48299+ gr_log_timechange();
48300+
48301 return rtc_set_time(rtc, &tm);
48302
48303 case RTC_PIE_ON:
48304diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
48305index 4e75345..09f8663 100644
48306--- a/drivers/rtc/rtc-ds1307.c
48307+++ b/drivers/rtc/rtc-ds1307.c
48308@@ -107,7 +107,7 @@ struct ds1307 {
48309 u8 offset; /* register's offset */
48310 u8 regs[11];
48311 u16 nvram_offset;
48312- struct bin_attribute *nvram;
48313+ bin_attribute_no_const *nvram;
48314 enum ds_type type;
48315 unsigned long flags;
48316 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
48317diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
48318index 11880c1..b823aa4 100644
48319--- a/drivers/rtc/rtc-m48t59.c
48320+++ b/drivers/rtc/rtc-m48t59.c
48321@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
48322 if (IS_ERR(m48t59->rtc))
48323 return PTR_ERR(m48t59->rtc);
48324
48325- m48t59_nvram_attr.size = pdata->offset;
48326+ pax_open_kernel();
48327+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
48328+ pax_close_kernel();
48329
48330 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
48331 if (ret)
48332diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
48333index 14b5f8d..cc9bd26 100644
48334--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
48335+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
48336@@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
48337 for (bit = 0; bit < 8; bit++) {
48338
48339 if ((pci_status[i] & (0x1 << bit)) != 0) {
48340- static const char *s;
48341+ const char *s;
48342
48343 s = pci_status_strings[bit];
48344 if (i == 7/*TARG*/ && bit == 3)
48345@@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
48346
48347 for (bit = 0; bit < 8; bit++) {
48348
48349- if ((split_status[i] & (0x1 << bit)) != 0) {
48350- static const char *s;
48351-
48352- s = split_status_strings[bit];
48353- printk(s, ahd_name(ahd),
48354+ if ((split_status[i] & (0x1 << bit)) != 0)
48355+ printk(split_status_strings[bit], ahd_name(ahd),
48356 split_status_source[i]);
48357- }
48358
48359 if (i > 1)
48360 continue;
48361
48362- if ((sg_split_status[i] & (0x1 << bit)) != 0) {
48363- static const char *s;
48364-
48365- s = split_status_strings[bit];
48366- printk(s, ahd_name(ahd), "SG");
48367- }
48368+ if ((sg_split_status[i] & (0x1 << bit)) != 0)
48369+ printk(split_status_strings[bit], ahd_name(ahd), "SG");
48370 }
48371 }
48372 /*
48373diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
48374index e693af6..2e525b6 100644
48375--- a/drivers/scsi/bfa/bfa_fcpim.h
48376+++ b/drivers/scsi/bfa/bfa_fcpim.h
48377@@ -36,7 +36,7 @@ struct bfa_iotag_s {
48378
48379 struct bfa_itn_s {
48380 bfa_isr_func_t isr;
48381-};
48382+} __no_const;
48383
48384 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
48385 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
48386diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
48387index a3ab5cc..8143622 100644
48388--- a/drivers/scsi/bfa/bfa_fcs.c
48389+++ b/drivers/scsi/bfa/bfa_fcs.c
48390@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
48391 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
48392
48393 static struct bfa_fcs_mod_s fcs_modules[] = {
48394- { bfa_fcs_port_attach, NULL, NULL },
48395- { bfa_fcs_uf_attach, NULL, NULL },
48396- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
48397- bfa_fcs_fabric_modexit },
48398+ {
48399+ .attach = bfa_fcs_port_attach,
48400+ .modinit = NULL,
48401+ .modexit = NULL
48402+ },
48403+ {
48404+ .attach = bfa_fcs_uf_attach,
48405+ .modinit = NULL,
48406+ .modexit = NULL
48407+ },
48408+ {
48409+ .attach = bfa_fcs_fabric_attach,
48410+ .modinit = bfa_fcs_fabric_modinit,
48411+ .modexit = bfa_fcs_fabric_modexit
48412+ },
48413 };
48414
48415 /*
48416diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
48417index f5e4e61..a0acaf6 100644
48418--- a/drivers/scsi/bfa/bfa_fcs_lport.c
48419+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
48420@@ -89,15 +89,26 @@ static struct {
48421 void (*offline) (struct bfa_fcs_lport_s *port);
48422 } __port_action[] = {
48423 {
48424- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
48425- bfa_fcs_lport_unknown_offline}, {
48426- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
48427- bfa_fcs_lport_fab_offline}, {
48428- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
48429- bfa_fcs_lport_n2n_offline}, {
48430- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
48431- bfa_fcs_lport_loop_offline},
48432- };
48433+ .init = bfa_fcs_lport_unknown_init,
48434+ .online = bfa_fcs_lport_unknown_online,
48435+ .offline = bfa_fcs_lport_unknown_offline
48436+ },
48437+ {
48438+ .init = bfa_fcs_lport_fab_init,
48439+ .online = bfa_fcs_lport_fab_online,
48440+ .offline = bfa_fcs_lport_fab_offline
48441+ },
48442+ {
48443+ .init = bfa_fcs_lport_n2n_init,
48444+ .online = bfa_fcs_lport_n2n_online,
48445+ .offline = bfa_fcs_lport_n2n_offline
48446+ },
48447+ {
48448+ .init = bfa_fcs_lport_loop_init,
48449+ .online = bfa_fcs_lport_loop_online,
48450+ .offline = bfa_fcs_lport_loop_offline
48451+ },
48452+};
48453
48454 /*
48455 * fcs_port_sm FCS logical port state machine
48456diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
48457index 90814fe..4384138 100644
48458--- a/drivers/scsi/bfa/bfa_ioc.h
48459+++ b/drivers/scsi/bfa/bfa_ioc.h
48460@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
48461 bfa_ioc_disable_cbfn_t disable_cbfn;
48462 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
48463 bfa_ioc_reset_cbfn_t reset_cbfn;
48464-};
48465+} __no_const;
48466
48467 /*
48468 * IOC event notification mechanism.
48469@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
48470 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
48471 enum bfi_ioc_state fwstate);
48472 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
48473-};
48474+} __no_const;
48475
48476 /*
48477 * Queue element to wait for room in request queue. FIFO order is
48478diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
48479index a14c784..6de6790 100644
48480--- a/drivers/scsi/bfa/bfa_modules.h
48481+++ b/drivers/scsi/bfa/bfa_modules.h
48482@@ -78,12 +78,12 @@ enum {
48483 \
48484 extern struct bfa_module_s hal_mod_ ## __mod; \
48485 struct bfa_module_s hal_mod_ ## __mod = { \
48486- bfa_ ## __mod ## _meminfo, \
48487- bfa_ ## __mod ## _attach, \
48488- bfa_ ## __mod ## _detach, \
48489- bfa_ ## __mod ## _start, \
48490- bfa_ ## __mod ## _stop, \
48491- bfa_ ## __mod ## _iocdisable, \
48492+ .meminfo = bfa_ ## __mod ## _meminfo, \
48493+ .attach = bfa_ ## __mod ## _attach, \
48494+ .detach = bfa_ ## __mod ## _detach, \
48495+ .start = bfa_ ## __mod ## _start, \
48496+ .stop = bfa_ ## __mod ## _stop, \
48497+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
48498 }
48499
48500 #define BFA_CACHELINE_SZ (256)
48501diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
48502index 045c4e1..13de803 100644
48503--- a/drivers/scsi/fcoe/fcoe_sysfs.c
48504+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
48505@@ -33,8 +33,8 @@
48506 */
48507 #include "libfcoe.h"
48508
48509-static atomic_t ctlr_num;
48510-static atomic_t fcf_num;
48511+static atomic_unchecked_t ctlr_num;
48512+static atomic_unchecked_t fcf_num;
48513
48514 /*
48515 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
48516@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
48517 if (!ctlr)
48518 goto out;
48519
48520- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
48521+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
48522 ctlr->f = f;
48523 ctlr->mode = FIP_CONN_TYPE_FABRIC;
48524 INIT_LIST_HEAD(&ctlr->fcfs);
48525@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
48526 fcf->dev.parent = &ctlr->dev;
48527 fcf->dev.bus = &fcoe_bus_type;
48528 fcf->dev.type = &fcoe_fcf_device_type;
48529- fcf->id = atomic_inc_return(&fcf_num) - 1;
48530+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
48531 fcf->state = FCOE_FCF_STATE_UNKNOWN;
48532
48533 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
48534@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
48535 {
48536 int error;
48537
48538- atomic_set(&ctlr_num, 0);
48539- atomic_set(&fcf_num, 0);
48540+ atomic_set_unchecked(&ctlr_num, 0);
48541+ atomic_set_unchecked(&fcf_num, 0);
48542
48543 error = bus_register(&fcoe_bus_type);
48544 if (error)
48545diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
48546index f2c5005..db36c02 100644
48547--- a/drivers/scsi/hosts.c
48548+++ b/drivers/scsi/hosts.c
48549@@ -42,7 +42,7 @@
48550 #include "scsi_logging.h"
48551
48552
48553-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
48554+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
48555
48556
48557 static void scsi_host_cls_release(struct device *dev)
48558@@ -367,7 +367,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
48559 * subtract one because we increment first then return, but we need to
48560 * know what the next host number was before increment
48561 */
48562- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
48563+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
48564 shost->dma_channel = 0xff;
48565
48566 /* These three are default values which can be overridden */
48567diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
48568index 20a5e6e..8b23cea 100644
48569--- a/drivers/scsi/hpsa.c
48570+++ b/drivers/scsi/hpsa.c
48571@@ -578,7 +578,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
48572 unsigned long flags;
48573
48574 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
48575- return h->access.command_completed(h, q);
48576+ return h->access->command_completed(h, q);
48577
48578 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
48579 a = rq->head[rq->current_entry];
48580@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
48581 while (!list_empty(&h->reqQ)) {
48582 c = list_entry(h->reqQ.next, struct CommandList, list);
48583 /* can't do anything if fifo is full */
48584- if ((h->access.fifo_full(h))) {
48585+ if ((h->access->fifo_full(h))) {
48586 dev_warn(&h->pdev->dev, "fifo full\n");
48587 break;
48588 }
48589@@ -3466,7 +3466,7 @@ static void start_io(struct ctlr_info *h)
48590
48591 /* Tell the controller execute command */
48592 spin_unlock_irqrestore(&h->lock, flags);
48593- h->access.submit_command(h, c);
48594+ h->access->submit_command(h, c);
48595 spin_lock_irqsave(&h->lock, flags);
48596 }
48597 spin_unlock_irqrestore(&h->lock, flags);
48598@@ -3474,17 +3474,17 @@ static void start_io(struct ctlr_info *h)
48599
48600 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
48601 {
48602- return h->access.command_completed(h, q);
48603+ return h->access->command_completed(h, q);
48604 }
48605
48606 static inline bool interrupt_pending(struct ctlr_info *h)
48607 {
48608- return h->access.intr_pending(h);
48609+ return h->access->intr_pending(h);
48610 }
48611
48612 static inline long interrupt_not_for_us(struct ctlr_info *h)
48613 {
48614- return (h->access.intr_pending(h) == 0) ||
48615+ return (h->access->intr_pending(h) == 0) ||
48616 (h->interrupts_enabled == 0);
48617 }
48618
48619@@ -4386,7 +4386,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
48620 if (prod_index < 0)
48621 return -ENODEV;
48622 h->product_name = products[prod_index].product_name;
48623- h->access = *(products[prod_index].access);
48624+ h->access = products[prod_index].access;
48625
48626 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
48627 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
48628@@ -4668,7 +4668,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
48629
48630 assert_spin_locked(&lockup_detector_lock);
48631 remove_ctlr_from_lockup_detector_list(h);
48632- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48633+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48634 spin_lock_irqsave(&h->lock, flags);
48635 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
48636 spin_unlock_irqrestore(&h->lock, flags);
48637@@ -4845,7 +4845,7 @@ reinit_after_soft_reset:
48638 }
48639
48640 /* make sure the board interrupts are off */
48641- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48642+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48643
48644 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
48645 goto clean2;
48646@@ -4879,7 +4879,7 @@ reinit_after_soft_reset:
48647 * fake ones to scoop up any residual completions.
48648 */
48649 spin_lock_irqsave(&h->lock, flags);
48650- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48651+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48652 spin_unlock_irqrestore(&h->lock, flags);
48653 free_irqs(h);
48654 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
48655@@ -4898,9 +4898,9 @@ reinit_after_soft_reset:
48656 dev_info(&h->pdev->dev, "Board READY.\n");
48657 dev_info(&h->pdev->dev,
48658 "Waiting for stale completions to drain.\n");
48659- h->access.set_intr_mask(h, HPSA_INTR_ON);
48660+ h->access->set_intr_mask(h, HPSA_INTR_ON);
48661 msleep(10000);
48662- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48663+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48664
48665 rc = controller_reset_failed(h->cfgtable);
48666 if (rc)
48667@@ -4921,7 +4921,7 @@ reinit_after_soft_reset:
48668 }
48669
48670 /* Turn the interrupts on so we can service requests */
48671- h->access.set_intr_mask(h, HPSA_INTR_ON);
48672+ h->access->set_intr_mask(h, HPSA_INTR_ON);
48673
48674 hpsa_hba_inquiry(h);
48675 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
48676@@ -4976,7 +4976,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
48677 * To write all data in the battery backed cache to disks
48678 */
48679 hpsa_flush_cache(h);
48680- h->access.set_intr_mask(h, HPSA_INTR_OFF);
48681+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
48682 hpsa_free_irqs_and_disable_msix(h);
48683 }
48684
48685@@ -5143,7 +5143,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
48686 return;
48687 }
48688 /* Change the access methods to the performant access methods */
48689- h->access = SA5_performant_access;
48690+ h->access = &SA5_performant_access;
48691 h->transMethod = CFGTBL_Trans_Performant;
48692 }
48693
48694diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
48695index bc85e72..d463049 100644
48696--- a/drivers/scsi/hpsa.h
48697+++ b/drivers/scsi/hpsa.h
48698@@ -79,7 +79,7 @@ struct ctlr_info {
48699 unsigned int msix_vector;
48700 unsigned int msi_vector;
48701 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
48702- struct access_method access;
48703+ struct access_method *access;
48704
48705 /* queue and queue Info */
48706 struct list_head reqQ;
48707@@ -381,19 +381,19 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)
48708 }
48709
48710 static struct access_method SA5_access = {
48711- SA5_submit_command,
48712- SA5_intr_mask,
48713- SA5_fifo_full,
48714- SA5_intr_pending,
48715- SA5_completed,
48716+ .submit_command = SA5_submit_command,
48717+ .set_intr_mask = SA5_intr_mask,
48718+ .fifo_full = SA5_fifo_full,
48719+ .intr_pending = SA5_intr_pending,
48720+ .command_completed = SA5_completed,
48721 };
48722
48723 static struct access_method SA5_performant_access = {
48724- SA5_submit_command,
48725- SA5_performant_intr_mask,
48726- SA5_fifo_full,
48727- SA5_performant_intr_pending,
48728- SA5_performant_completed,
48729+ .submit_command = SA5_submit_command,
48730+ .set_intr_mask = SA5_performant_intr_mask,
48731+ .fifo_full = SA5_fifo_full,
48732+ .intr_pending = SA5_performant_intr_pending,
48733+ .command_completed = SA5_performant_completed,
48734 };
48735
48736 struct board_type {
48737diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
48738index 1b3a094..068e683 100644
48739--- a/drivers/scsi/libfc/fc_exch.c
48740+++ b/drivers/scsi/libfc/fc_exch.c
48741@@ -101,12 +101,12 @@ struct fc_exch_mgr {
48742 u16 pool_max_index;
48743
48744 struct {
48745- atomic_t no_free_exch;
48746- atomic_t no_free_exch_xid;
48747- atomic_t xid_not_found;
48748- atomic_t xid_busy;
48749- atomic_t seq_not_found;
48750- atomic_t non_bls_resp;
48751+ atomic_unchecked_t no_free_exch;
48752+ atomic_unchecked_t no_free_exch_xid;
48753+ atomic_unchecked_t xid_not_found;
48754+ atomic_unchecked_t xid_busy;
48755+ atomic_unchecked_t seq_not_found;
48756+ atomic_unchecked_t non_bls_resp;
48757 } stats;
48758 };
48759
48760@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
48761 /* allocate memory for exchange */
48762 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
48763 if (!ep) {
48764- atomic_inc(&mp->stats.no_free_exch);
48765+ atomic_inc_unchecked(&mp->stats.no_free_exch);
48766 goto out;
48767 }
48768 memset(ep, 0, sizeof(*ep));
48769@@ -874,7 +874,7 @@ out:
48770 return ep;
48771 err:
48772 spin_unlock_bh(&pool->lock);
48773- atomic_inc(&mp->stats.no_free_exch_xid);
48774+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
48775 mempool_free(ep, mp->ep_pool);
48776 return NULL;
48777 }
48778@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48779 xid = ntohs(fh->fh_ox_id); /* we originated exch */
48780 ep = fc_exch_find(mp, xid);
48781 if (!ep) {
48782- atomic_inc(&mp->stats.xid_not_found);
48783+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48784 reject = FC_RJT_OX_ID;
48785 goto out;
48786 }
48787@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48788 ep = fc_exch_find(mp, xid);
48789 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
48790 if (ep) {
48791- atomic_inc(&mp->stats.xid_busy);
48792+ atomic_inc_unchecked(&mp->stats.xid_busy);
48793 reject = FC_RJT_RX_ID;
48794 goto rel;
48795 }
48796@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48797 }
48798 xid = ep->xid; /* get our XID */
48799 } else if (!ep) {
48800- atomic_inc(&mp->stats.xid_not_found);
48801+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48802 reject = FC_RJT_RX_ID; /* XID not found */
48803 goto out;
48804 }
48805@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
48806 } else {
48807 sp = &ep->seq;
48808 if (sp->id != fh->fh_seq_id) {
48809- atomic_inc(&mp->stats.seq_not_found);
48810+ atomic_inc_unchecked(&mp->stats.seq_not_found);
48811 if (f_ctl & FC_FC_END_SEQ) {
48812 /*
48813 * Update sequence_id based on incoming last
48814@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
48815
48816 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
48817 if (!ep) {
48818- atomic_inc(&mp->stats.xid_not_found);
48819+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48820 goto out;
48821 }
48822 if (ep->esb_stat & ESB_ST_COMPLETE) {
48823- atomic_inc(&mp->stats.xid_not_found);
48824+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48825 goto rel;
48826 }
48827 if (ep->rxid == FC_XID_UNKNOWN)
48828 ep->rxid = ntohs(fh->fh_rx_id);
48829 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
48830- atomic_inc(&mp->stats.xid_not_found);
48831+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48832 goto rel;
48833 }
48834 if (ep->did != ntoh24(fh->fh_s_id) &&
48835 ep->did != FC_FID_FLOGI) {
48836- atomic_inc(&mp->stats.xid_not_found);
48837+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48838 goto rel;
48839 }
48840 sof = fr_sof(fp);
48841@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
48842 sp->ssb_stat |= SSB_ST_RESP;
48843 sp->id = fh->fh_seq_id;
48844 } else if (sp->id != fh->fh_seq_id) {
48845- atomic_inc(&mp->stats.seq_not_found);
48846+ atomic_inc_unchecked(&mp->stats.seq_not_found);
48847 goto rel;
48848 }
48849
48850@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
48851 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
48852
48853 if (!sp)
48854- atomic_inc(&mp->stats.xid_not_found);
48855+ atomic_inc_unchecked(&mp->stats.xid_not_found);
48856 else
48857- atomic_inc(&mp->stats.non_bls_resp);
48858+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
48859
48860 fc_frame_free(fp);
48861 }
48862@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
48863
48864 list_for_each_entry(ema, &lport->ema_list, ema_list) {
48865 mp = ema->mp;
48866- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
48867+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
48868 st->fc_no_free_exch_xid +=
48869- atomic_read(&mp->stats.no_free_exch_xid);
48870- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
48871- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
48872- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
48873- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
48874+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
48875+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
48876+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
48877+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
48878+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
48879 }
48880 }
48881 EXPORT_SYMBOL(fc_exch_update_stats);
48882diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
48883index d289583..b745eec 100644
48884--- a/drivers/scsi/libsas/sas_ata.c
48885+++ b/drivers/scsi/libsas/sas_ata.c
48886@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
48887 .postreset = ata_std_postreset,
48888 .error_handler = ata_std_error_handler,
48889 .post_internal_cmd = sas_ata_post_internal,
48890- .qc_defer = ata_std_qc_defer,
48891+ .qc_defer = ata_std_qc_defer,
48892 .qc_prep = ata_noop_qc_prep,
48893 .qc_issue = sas_ata_qc_issue,
48894 .qc_fill_rtf = sas_ata_qc_fill_rtf,
48895diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
48896index 4e1b75c..0bbdfa9 100644
48897--- a/drivers/scsi/lpfc/lpfc.h
48898+++ b/drivers/scsi/lpfc/lpfc.h
48899@@ -432,7 +432,7 @@ struct lpfc_vport {
48900 struct dentry *debug_nodelist;
48901 struct dentry *vport_debugfs_root;
48902 struct lpfc_debugfs_trc *disc_trc;
48903- atomic_t disc_trc_cnt;
48904+ atomic_unchecked_t disc_trc_cnt;
48905 #endif
48906 uint8_t stat_data_enabled;
48907 uint8_t stat_data_blocked;
48908@@ -865,8 +865,8 @@ struct lpfc_hba {
48909 struct timer_list fabric_block_timer;
48910 unsigned long bit_flags;
48911 #define FABRIC_COMANDS_BLOCKED 0
48912- atomic_t num_rsrc_err;
48913- atomic_t num_cmd_success;
48914+ atomic_unchecked_t num_rsrc_err;
48915+ atomic_unchecked_t num_cmd_success;
48916 unsigned long last_rsrc_error_time;
48917 unsigned long last_ramp_down_time;
48918 unsigned long last_ramp_up_time;
48919@@ -902,7 +902,7 @@ struct lpfc_hba {
48920
48921 struct dentry *debug_slow_ring_trc;
48922 struct lpfc_debugfs_trc *slow_ring_trc;
48923- atomic_t slow_ring_trc_cnt;
48924+ atomic_unchecked_t slow_ring_trc_cnt;
48925 /* iDiag debugfs sub-directory */
48926 struct dentry *idiag_root;
48927 struct dentry *idiag_pci_cfg;
48928diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
48929index 60084e6..0e2e700 100644
48930--- a/drivers/scsi/lpfc/lpfc_debugfs.c
48931+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
48932@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
48933
48934 #include <linux/debugfs.h>
48935
48936-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
48937+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
48938 static unsigned long lpfc_debugfs_start_time = 0L;
48939
48940 /* iDiag */
48941@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
48942 lpfc_debugfs_enable = 0;
48943
48944 len = 0;
48945- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
48946+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
48947 (lpfc_debugfs_max_disc_trc - 1);
48948 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
48949 dtp = vport->disc_trc + i;
48950@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
48951 lpfc_debugfs_enable = 0;
48952
48953 len = 0;
48954- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
48955+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
48956 (lpfc_debugfs_max_slow_ring_trc - 1);
48957 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
48958 dtp = phba->slow_ring_trc + i;
48959@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
48960 !vport || !vport->disc_trc)
48961 return;
48962
48963- index = atomic_inc_return(&vport->disc_trc_cnt) &
48964+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
48965 (lpfc_debugfs_max_disc_trc - 1);
48966 dtp = vport->disc_trc + index;
48967 dtp->fmt = fmt;
48968 dtp->data1 = data1;
48969 dtp->data2 = data2;
48970 dtp->data3 = data3;
48971- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
48972+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
48973 dtp->jif = jiffies;
48974 #endif
48975 return;
48976@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
48977 !phba || !phba->slow_ring_trc)
48978 return;
48979
48980- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
48981+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
48982 (lpfc_debugfs_max_slow_ring_trc - 1);
48983 dtp = phba->slow_ring_trc + index;
48984 dtp->fmt = fmt;
48985 dtp->data1 = data1;
48986 dtp->data2 = data2;
48987 dtp->data3 = data3;
48988- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
48989+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
48990 dtp->jif = jiffies;
48991 #endif
48992 return;
48993@@ -4168,7 +4168,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
48994 "slow_ring buffer\n");
48995 goto debug_failed;
48996 }
48997- atomic_set(&phba->slow_ring_trc_cnt, 0);
48998+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
48999 memset(phba->slow_ring_trc, 0,
49000 (sizeof(struct lpfc_debugfs_trc) *
49001 lpfc_debugfs_max_slow_ring_trc));
49002@@ -4214,7 +4214,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
49003 "buffer\n");
49004 goto debug_failed;
49005 }
49006- atomic_set(&vport->disc_trc_cnt, 0);
49007+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
49008
49009 snprintf(name, sizeof(name), "discovery_trace");
49010 vport->debug_disc_trc =
49011diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
49012index 68c94cc..8c27be5 100644
49013--- a/drivers/scsi/lpfc/lpfc_init.c
49014+++ b/drivers/scsi/lpfc/lpfc_init.c
49015@@ -10949,8 +10949,10 @@ lpfc_init(void)
49016 "misc_register returned with status %d", error);
49017
49018 if (lpfc_enable_npiv) {
49019- lpfc_transport_functions.vport_create = lpfc_vport_create;
49020- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
49021+ pax_open_kernel();
49022+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
49023+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
49024+ pax_close_kernel();
49025 }
49026 lpfc_transport_template =
49027 fc_attach_transport(&lpfc_transport_functions);
49028diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
49029index b2ede05..aaf482ca 100644
49030--- a/drivers/scsi/lpfc/lpfc_scsi.c
49031+++ b/drivers/scsi/lpfc/lpfc_scsi.c
49032@@ -353,7 +353,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
49033 uint32_t evt_posted;
49034
49035 spin_lock_irqsave(&phba->hbalock, flags);
49036- atomic_inc(&phba->num_rsrc_err);
49037+ atomic_inc_unchecked(&phba->num_rsrc_err);
49038 phba->last_rsrc_error_time = jiffies;
49039
49040 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
49041@@ -394,7 +394,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
49042 unsigned long flags;
49043 struct lpfc_hba *phba = vport->phba;
49044 uint32_t evt_posted;
49045- atomic_inc(&phba->num_cmd_success);
49046+ atomic_inc_unchecked(&phba->num_cmd_success);
49047
49048 if (vport->cfg_lun_queue_depth <= queue_depth)
49049 return;
49050@@ -438,8 +438,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
49051 unsigned long num_rsrc_err, num_cmd_success;
49052 int i;
49053
49054- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
49055- num_cmd_success = atomic_read(&phba->num_cmd_success);
49056+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
49057+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
49058
49059 /*
49060 * The error and success command counters are global per
49061@@ -467,8 +467,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
49062 }
49063 }
49064 lpfc_destroy_vport_work_array(phba, vports);
49065- atomic_set(&phba->num_rsrc_err, 0);
49066- atomic_set(&phba->num_cmd_success, 0);
49067+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
49068+ atomic_set_unchecked(&phba->num_cmd_success, 0);
49069 }
49070
49071 /**
49072@@ -502,8 +502,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
49073 }
49074 }
49075 lpfc_destroy_vport_work_array(phba, vports);
49076- atomic_set(&phba->num_rsrc_err, 0);
49077- atomic_set(&phba->num_cmd_success, 0);
49078+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
49079+ atomic_set_unchecked(&phba->num_cmd_success, 0);
49080 }
49081
49082 /**
49083diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
49084index 7f0af4f..193ac3e 100644
49085--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
49086+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
49087@@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
49088 {
49089 struct scsi_device *sdev = to_scsi_device(dev);
49090 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
49091- static struct _raid_device *raid_device;
49092+ struct _raid_device *raid_device;
49093 unsigned long flags;
49094 Mpi2RaidVolPage0_t vol_pg0;
49095 Mpi2ConfigReply_t mpi_reply;
49096@@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev)
49097 {
49098 struct scsi_device *sdev = to_scsi_device(dev);
49099 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
49100- static struct _raid_device *raid_device;
49101+ struct _raid_device *raid_device;
49102 unsigned long flags;
49103 Mpi2RaidVolPage0_t vol_pg0;
49104 Mpi2ConfigReply_t mpi_reply;
49105@@ -6637,7 +6637,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
49106 struct fw_event_work *fw_event)
49107 {
49108 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
49109- static struct _raid_device *raid_device;
49110+ struct _raid_device *raid_device;
49111 unsigned long flags;
49112 u16 handle;
49113
49114@@ -7108,7 +7108,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
49115 u64 sas_address;
49116 struct _sas_device *sas_device;
49117 struct _sas_node *expander_device;
49118- static struct _raid_device *raid_device;
49119+ struct _raid_device *raid_device;
49120 u8 retry_count;
49121 unsigned long flags;
49122
49123diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
49124index be8ce54..94ed33a 100644
49125--- a/drivers/scsi/pmcraid.c
49126+++ b/drivers/scsi/pmcraid.c
49127@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
49128 res->scsi_dev = scsi_dev;
49129 scsi_dev->hostdata = res;
49130 res->change_detected = 0;
49131- atomic_set(&res->read_failures, 0);
49132- atomic_set(&res->write_failures, 0);
49133+ atomic_set_unchecked(&res->read_failures, 0);
49134+ atomic_set_unchecked(&res->write_failures, 0);
49135 rc = 0;
49136 }
49137 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
49138@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
49139
49140 /* If this was a SCSI read/write command keep count of errors */
49141 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
49142- atomic_inc(&res->read_failures);
49143+ atomic_inc_unchecked(&res->read_failures);
49144 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
49145- atomic_inc(&res->write_failures);
49146+ atomic_inc_unchecked(&res->write_failures);
49147
49148 if (!RES_IS_GSCSI(res->cfg_entry) &&
49149 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
49150@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
49151 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
49152 * hrrq_id assigned here in queuecommand
49153 */
49154- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
49155+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
49156 pinstance->num_hrrq;
49157 cmd->cmd_done = pmcraid_io_done;
49158
49159@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
49160 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
49161 * hrrq_id assigned here in queuecommand
49162 */
49163- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
49164+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
49165 pinstance->num_hrrq;
49166
49167 if (request_size) {
49168@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
49169
49170 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
49171 /* add resources only after host is added into system */
49172- if (!atomic_read(&pinstance->expose_resources))
49173+ if (!atomic_read_unchecked(&pinstance->expose_resources))
49174 return;
49175
49176 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
49177@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
49178 init_waitqueue_head(&pinstance->reset_wait_q);
49179
49180 atomic_set(&pinstance->outstanding_cmds, 0);
49181- atomic_set(&pinstance->last_message_id, 0);
49182- atomic_set(&pinstance->expose_resources, 0);
49183+ atomic_set_unchecked(&pinstance->last_message_id, 0);
49184+ atomic_set_unchecked(&pinstance->expose_resources, 0);
49185
49186 INIT_LIST_HEAD(&pinstance->free_res_q);
49187 INIT_LIST_HEAD(&pinstance->used_res_q);
49188@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
49189 /* Schedule worker thread to handle CCN and take care of adding and
49190 * removing devices to OS
49191 */
49192- atomic_set(&pinstance->expose_resources, 1);
49193+ atomic_set_unchecked(&pinstance->expose_resources, 1);
49194 schedule_work(&pinstance->worker_q);
49195 return rc;
49196
49197diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
49198index e1d150f..6c6df44 100644
49199--- a/drivers/scsi/pmcraid.h
49200+++ b/drivers/scsi/pmcraid.h
49201@@ -748,7 +748,7 @@ struct pmcraid_instance {
49202 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
49203
49204 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
49205- atomic_t last_message_id;
49206+ atomic_unchecked_t last_message_id;
49207
49208 /* configuration table */
49209 struct pmcraid_config_table *cfg_table;
49210@@ -777,7 +777,7 @@ struct pmcraid_instance {
49211 atomic_t outstanding_cmds;
49212
49213 /* should add/delete resources to mid-layer now ?*/
49214- atomic_t expose_resources;
49215+ atomic_unchecked_t expose_resources;
49216
49217
49218
49219@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
49220 struct pmcraid_config_table_entry_ext cfg_entry_ext;
49221 };
49222 struct scsi_device *scsi_dev; /* Link scsi_device structure */
49223- atomic_t read_failures; /* count of failed READ commands */
49224- atomic_t write_failures; /* count of failed WRITE commands */
49225+ atomic_unchecked_t read_failures; /* count of failed READ commands */
49226+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
49227
49228 /* To indicate add/delete/modify during CCN */
49229 u8 change_detected;
49230diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
49231index 5f174b8..98d32b0 100644
49232--- a/drivers/scsi/qla2xxx/qla_attr.c
49233+++ b/drivers/scsi/qla2xxx/qla_attr.c
49234@@ -2040,7 +2040,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
49235 return 0;
49236 }
49237
49238-struct fc_function_template qla2xxx_transport_functions = {
49239+fc_function_template_no_const qla2xxx_transport_functions = {
49240
49241 .show_host_node_name = 1,
49242 .show_host_port_name = 1,
49243@@ -2088,7 +2088,7 @@ struct fc_function_template qla2xxx_transport_functions = {
49244 .bsg_timeout = qla24xx_bsg_timeout,
49245 };
49246
49247-struct fc_function_template qla2xxx_transport_vport_functions = {
49248+fc_function_template_no_const qla2xxx_transport_vport_functions = {
49249
49250 .show_host_node_name = 1,
49251 .show_host_port_name = 1,
49252diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
49253index 4446bf5..9a3574d 100644
49254--- a/drivers/scsi/qla2xxx/qla_gbl.h
49255+++ b/drivers/scsi/qla2xxx/qla_gbl.h
49256@@ -538,8 +538,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
49257 struct device_attribute;
49258 extern struct device_attribute *qla2x00_host_attrs[];
49259 struct fc_function_template;
49260-extern struct fc_function_template qla2xxx_transport_functions;
49261-extern struct fc_function_template qla2xxx_transport_vport_functions;
49262+extern fc_function_template_no_const qla2xxx_transport_functions;
49263+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
49264 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
49265 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
49266 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
49267diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
49268index 52be35e..b933907 100644
49269--- a/drivers/scsi/qla2xxx/qla_os.c
49270+++ b/drivers/scsi/qla2xxx/qla_os.c
49271@@ -1568,8 +1568,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
49272 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
49273 /* Ok, a 64bit DMA mask is applicable. */
49274 ha->flags.enable_64bit_addressing = 1;
49275- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
49276- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
49277+ pax_open_kernel();
49278+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
49279+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
49280+ pax_close_kernel();
49281 return;
49282 }
49283 }
49284diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
49285index 084d1fd..9f939eb 100644
49286--- a/drivers/scsi/qla4xxx/ql4_def.h
49287+++ b/drivers/scsi/qla4xxx/ql4_def.h
49288@@ -296,7 +296,7 @@ struct ddb_entry {
49289 * (4000 only) */
49290 atomic_t relogin_timer; /* Max Time to wait for
49291 * relogin to complete */
49292- atomic_t relogin_retry_count; /* Num of times relogin has been
49293+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
49294 * retried */
49295 uint32_t default_time2wait; /* Default Min time between
49296 * relogins (+aens) */
49297diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
49298index cf174a4..128a420 100644
49299--- a/drivers/scsi/qla4xxx/ql4_os.c
49300+++ b/drivers/scsi/qla4xxx/ql4_os.c
49301@@ -3311,12 +3311,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
49302 */
49303 if (!iscsi_is_session_online(cls_sess)) {
49304 /* Reset retry relogin timer */
49305- atomic_inc(&ddb_entry->relogin_retry_count);
49306+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
49307 DEBUG2(ql4_printk(KERN_INFO, ha,
49308 "%s: index[%d] relogin timed out-retrying"
49309 " relogin (%d), retry (%d)\n", __func__,
49310 ddb_entry->fw_ddb_index,
49311- atomic_read(&ddb_entry->relogin_retry_count),
49312+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
49313 ddb_entry->default_time2wait + 4));
49314 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
49315 atomic_set(&ddb_entry->retry_relogin_timer,
49316@@ -5458,7 +5458,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
49317
49318 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
49319 atomic_set(&ddb_entry->relogin_timer, 0);
49320- atomic_set(&ddb_entry->relogin_retry_count, 0);
49321+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
49322 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
49323 ddb_entry->default_relogin_timeout =
49324 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
49325diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
49326index fe0bcb1..c9255be 100644
49327--- a/drivers/scsi/scsi.c
49328+++ b/drivers/scsi/scsi.c
49329@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
49330 struct Scsi_Host *host = cmd->device->host;
49331 int rtn = 0;
49332
49333- atomic_inc(&cmd->device->iorequest_cnt);
49334+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
49335
49336 /* check if the device is still usable */
49337 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
49338diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
49339index 7bd7f0d..93159d8 100644
49340--- a/drivers/scsi/scsi_lib.c
49341+++ b/drivers/scsi/scsi_lib.c
49342@@ -1474,7 +1474,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
49343 shost = sdev->host;
49344 scsi_init_cmd_errh(cmd);
49345 cmd->result = DID_NO_CONNECT << 16;
49346- atomic_inc(&cmd->device->iorequest_cnt);
49347+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
49348
49349 /*
49350 * SCSI request completion path will do scsi_device_unbusy(),
49351@@ -1500,9 +1500,9 @@ static void scsi_softirq_done(struct request *rq)
49352
49353 INIT_LIST_HEAD(&cmd->eh_entry);
49354
49355- atomic_inc(&cmd->device->iodone_cnt);
49356+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
49357 if (cmd->result)
49358- atomic_inc(&cmd->device->ioerr_cnt);
49359+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
49360
49361 disposition = scsi_decide_disposition(cmd);
49362 if (disposition != SUCCESS &&
49363@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
49364
49365 host_dev = scsi_get_device(shost);
49366 if (host_dev && host_dev->dma_mask)
49367- bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
49368+ bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
49369
49370 return bounce_limit;
49371 }
49372diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
49373index 8ff62c2..693b6f7 100644
49374--- a/drivers/scsi/scsi_sysfs.c
49375+++ b/drivers/scsi/scsi_sysfs.c
49376@@ -725,7 +725,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
49377 char *buf) \
49378 { \
49379 struct scsi_device *sdev = to_scsi_device(dev); \
49380- unsigned long long count = atomic_read(&sdev->field); \
49381+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
49382 return snprintf(buf, 20, "0x%llx\n", count); \
49383 } \
49384 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
49385diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
49386index 84a1fdf..693b0d6 100644
49387--- a/drivers/scsi/scsi_tgt_lib.c
49388+++ b/drivers/scsi/scsi_tgt_lib.c
49389@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
49390 int err;
49391
49392 dprintk("%lx %u\n", uaddr, len);
49393- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
49394+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
49395 if (err) {
49396 /*
49397 * TODO: need to fixup sg_tablesize, max_segment_size,
49398diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
49399index 4628fd5..a94a1c2 100644
49400--- a/drivers/scsi/scsi_transport_fc.c
49401+++ b/drivers/scsi/scsi_transport_fc.c
49402@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
49403 * Netlink Infrastructure
49404 */
49405
49406-static atomic_t fc_event_seq;
49407+static atomic_unchecked_t fc_event_seq;
49408
49409 /**
49410 * fc_get_event_number - Obtain the next sequential FC event number
49411@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
49412 u32
49413 fc_get_event_number(void)
49414 {
49415- return atomic_add_return(1, &fc_event_seq);
49416+ return atomic_add_return_unchecked(1, &fc_event_seq);
49417 }
49418 EXPORT_SYMBOL(fc_get_event_number);
49419
49420@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
49421 {
49422 int error;
49423
49424- atomic_set(&fc_event_seq, 0);
49425+ atomic_set_unchecked(&fc_event_seq, 0);
49426
49427 error = transport_class_register(&fc_host_class);
49428 if (error)
49429@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
49430 char *cp;
49431
49432 *val = simple_strtoul(buf, &cp, 0);
49433- if ((*cp && (*cp != '\n')) || (*val < 0))
49434+ if (*cp && (*cp != '\n'))
49435 return -EINVAL;
49436 /*
49437 * Check for overflow; dev_loss_tmo is u32
49438diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
49439index 63a6ca4..5d5cadd 100644
49440--- a/drivers/scsi/scsi_transport_iscsi.c
49441+++ b/drivers/scsi/scsi_transport_iscsi.c
49442@@ -79,7 +79,7 @@ struct iscsi_internal {
49443 struct transport_container session_cont;
49444 };
49445
49446-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
49447+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
49448 static struct workqueue_struct *iscsi_eh_timer_workq;
49449
49450 static DEFINE_IDA(iscsi_sess_ida);
49451@@ -1737,7 +1737,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
49452 int err;
49453
49454 ihost = shost->shost_data;
49455- session->sid = atomic_add_return(1, &iscsi_session_nr);
49456+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
49457
49458 if (target_id == ISCSI_MAX_TARGET) {
49459 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
49460@@ -4103,7 +4103,7 @@ static __init int iscsi_transport_init(void)
49461 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
49462 ISCSI_TRANSPORT_VERSION);
49463
49464- atomic_set(&iscsi_session_nr, 0);
49465+ atomic_set_unchecked(&iscsi_session_nr, 0);
49466
49467 err = class_register(&iscsi_transport_class);
49468 if (err)
49469diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
49470index 2700a5a..752ec38 100644
49471--- a/drivers/scsi/scsi_transport_srp.c
49472+++ b/drivers/scsi/scsi_transport_srp.c
49473@@ -36,7 +36,7 @@
49474 #include "scsi_transport_srp_internal.h"
49475
49476 struct srp_host_attrs {
49477- atomic_t next_port_id;
49478+ atomic_unchecked_t next_port_id;
49479 };
49480 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
49481
49482@@ -94,7 +94,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
49483 struct Scsi_Host *shost = dev_to_shost(dev);
49484 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
49485
49486- atomic_set(&srp_host->next_port_id, 0);
49487+ atomic_set_unchecked(&srp_host->next_port_id, 0);
49488 return 0;
49489 }
49490
49491@@ -730,7 +730,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
49492 rport_fast_io_fail_timedout);
49493 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
49494
49495- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
49496+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
49497 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
49498
49499 transport_setup_device(&rport->dev);
49500diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
49501index 69725f7..03aaee1 100644
49502--- a/drivers/scsi/sd.c
49503+++ b/drivers/scsi/sd.c
49504@@ -2964,7 +2964,7 @@ static int sd_probe(struct device *dev)
49505 sdkp->disk = gd;
49506 sdkp->index = index;
49507 atomic_set(&sdkp->openers, 0);
49508- atomic_set(&sdkp->device->ioerr_cnt, 0);
49509+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
49510
49511 if (!sdp->request_queue->rq_timeout) {
49512 if (sdp->type != TYPE_MOD)
49513diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
49514index df5e961..df6b97f 100644
49515--- a/drivers/scsi/sg.c
49516+++ b/drivers/scsi/sg.c
49517@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
49518 sdp->disk->disk_name,
49519 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
49520 NULL,
49521- (char *)arg);
49522+ (char __user *)arg);
49523 case BLKTRACESTART:
49524 return blk_trace_startstop(sdp->device->request_queue, 1);
49525 case BLKTRACESTOP:
49526diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
49527index 349ebba..ff2a249 100644
49528--- a/drivers/spi/spi.c
49529+++ b/drivers/spi/spi.c
49530@@ -1945,7 +1945,7 @@ int spi_bus_unlock(struct spi_master *master)
49531 EXPORT_SYMBOL_GPL(spi_bus_unlock);
49532
49533 /* portable code must never pass more than 32 bytes */
49534-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
49535+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
49536
49537 static u8 *buf;
49538
49539diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
49540index 2c61783..4d49e4e 100644
49541--- a/drivers/staging/android/timed_output.c
49542+++ b/drivers/staging/android/timed_output.c
49543@@ -25,7 +25,7 @@
49544 #include "timed_output.h"
49545
49546 static struct class *timed_output_class;
49547-static atomic_t device_count;
49548+static atomic_unchecked_t device_count;
49549
49550 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
49551 char *buf)
49552@@ -63,7 +63,7 @@ static int create_timed_output_class(void)
49553 timed_output_class = class_create(THIS_MODULE, "timed_output");
49554 if (IS_ERR(timed_output_class))
49555 return PTR_ERR(timed_output_class);
49556- atomic_set(&device_count, 0);
49557+ atomic_set_unchecked(&device_count, 0);
49558 timed_output_class->dev_groups = timed_output_groups;
49559 }
49560
49561@@ -81,7 +81,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
49562 if (ret < 0)
49563 return ret;
49564
49565- tdev->index = atomic_inc_return(&device_count);
49566+ tdev->index = atomic_inc_return_unchecked(&device_count);
49567 tdev->dev = device_create(timed_output_class, NULL,
49568 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
49569 if (IS_ERR(tdev->dev))
49570diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
49571index c0f7cd7..5424212 100644
49572--- a/drivers/staging/gdm724x/gdm_tty.c
49573+++ b/drivers/staging/gdm724x/gdm_tty.c
49574@@ -45,7 +45,7 @@
49575 #define gdm_tty_send_control(n, r, v, d, l) (\
49576 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
49577
49578-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
49579+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
49580
49581 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
49582 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
49583diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
49584index 96e4eee..6d7c37e 100644
49585--- a/drivers/staging/imx-drm/imx-drm-core.c
49586+++ b/drivers/staging/imx-drm/imx-drm-core.c
49587@@ -510,7 +510,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
49588 goto err_busy;
49589 }
49590
49591- if (imxdrm->drm->open_count) {
49592+ if (local_read(&imxdrm->drm->open_count)) {
49593 ret = -EBUSY;
49594 goto err_busy;
49595 }
49596@@ -590,7 +590,7 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
49597
49598 mutex_lock(&imxdrm->mutex);
49599
49600- if (imxdrm->drm->open_count) {
49601+ if (local_read(&imxdrm->drm->open_count)) {
49602 ret = -EBUSY;
49603 goto err_busy;
49604 }
49605@@ -729,7 +729,7 @@ int imx_drm_add_connector(struct drm_connector *connector,
49606
49607 mutex_lock(&imxdrm->mutex);
49608
49609- if (imxdrm->drm->open_count) {
49610+ if (local_read(&imxdrm->drm->open_count)) {
49611 ret = -EBUSY;
49612 goto err_busy;
49613 }
49614diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
49615index b7613c8..c302392 100644
49616--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
49617+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
49618@@ -487,13 +487,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
49619 return 0;
49620 }
49621
49622-sfw_test_client_ops_t brw_test_client;
49623-void brw_init_test_client(void)
49624-{
49625- brw_test_client.tso_init = brw_client_init;
49626- brw_test_client.tso_fini = brw_client_fini;
49627- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
49628- brw_test_client.tso_done_rpc = brw_client_done_rpc;
49629+sfw_test_client_ops_t brw_test_client = {
49630+ .tso_init = brw_client_init,
49631+ .tso_fini = brw_client_fini,
49632+ .tso_prep_rpc = brw_client_prep_rpc,
49633+ .tso_done_rpc = brw_client_done_rpc,
49634 };
49635
49636 srpc_service_t brw_test_service;
49637diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
49638index 483c785..e1a2a7b 100644
49639--- a/drivers/staging/lustre/lnet/selftest/framework.c
49640+++ b/drivers/staging/lustre/lnet/selftest/framework.c
49641@@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] =
49642
49643 extern sfw_test_client_ops_t ping_test_client;
49644 extern srpc_service_t ping_test_service;
49645-extern void ping_init_test_client(void);
49646 extern void ping_init_test_service(void);
49647
49648 extern sfw_test_client_ops_t brw_test_client;
49649 extern srpc_service_t brw_test_service;
49650-extern void brw_init_test_client(void);
49651 extern void brw_init_test_service(void);
49652
49653
49654@@ -1684,12 +1682,10 @@ sfw_startup (void)
49655 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
49656 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
49657
49658- brw_init_test_client();
49659 brw_init_test_service();
49660 rc = sfw_register_test(&brw_test_service, &brw_test_client);
49661 LASSERT (rc == 0);
49662
49663- ping_init_test_client();
49664 ping_init_test_service();
49665 rc = sfw_register_test(&ping_test_service, &ping_test_client);
49666 LASSERT (rc == 0);
49667diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
49668index f0f9194..b589047 100644
49669--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
49670+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
49671@@ -210,14 +210,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
49672 return 0;
49673 }
49674
49675-sfw_test_client_ops_t ping_test_client;
49676-void ping_init_test_client(void)
49677-{
49678- ping_test_client.tso_init = ping_client_init;
49679- ping_test_client.tso_fini = ping_client_fini;
49680- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
49681- ping_test_client.tso_done_rpc = ping_client_done_rpc;
49682-}
49683+sfw_test_client_ops_t ping_test_client = {
49684+ .tso_init = ping_client_init,
49685+ .tso_fini = ping_client_fini,
49686+ .tso_prep_rpc = ping_client_prep_rpc,
49687+ .tso_done_rpc = ping_client_done_rpc,
49688+};
49689
49690 srpc_service_t ping_test_service;
49691 void ping_init_test_service(void)
49692diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
49693index bc2b82f..67fd598 100644
49694--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
49695+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
49696@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
49697 ldlm_completion_callback lcs_completion;
49698 ldlm_blocking_callback lcs_blocking;
49699 ldlm_glimpse_callback lcs_glimpse;
49700-};
49701+} __no_const;
49702
49703 /* ldlm_lockd.c */
49704 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
49705diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
49706index d0aea15..7af68e1 100644
49707--- a/drivers/staging/lustre/lustre/include/obd.h
49708+++ b/drivers/staging/lustre/lustre/include/obd.h
49709@@ -1417,7 +1417,7 @@ struct md_ops {
49710 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
49711 * wrapper function in include/linux/obd_class.h.
49712 */
49713-};
49714+} __no_const;
49715
49716 struct lsm_operations {
49717 void (*lsm_free)(struct lov_stripe_md *);
49718diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
49719index 39fcdac..222780f 100644
49720--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
49721+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
49722@@ -249,7 +249,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
49723 int added = (mode == LCK_NL);
49724 int overlaps = 0;
49725 int splitted = 0;
49726- const struct ldlm_callback_suite null_cbs = { NULL };
49727+ const struct ldlm_callback_suite null_cbs = { };
49728 int rc;
49729
49730 CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
49731diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
49732index fc6c977..df1f956 100644
49733--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
49734+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
49735@@ -219,7 +219,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb)
49736 int LL_PROC_PROTO(proc_console_max_delay_cs)
49737 {
49738 int rc, max_delay_cs;
49739- ctl_table_t dummy = *table;
49740+ ctl_table_no_const dummy = *table;
49741 cfs_duration_t d;
49742
49743 dummy.data = &max_delay_cs;
49744@@ -250,7 +250,7 @@ int LL_PROC_PROTO(proc_console_max_delay_cs)
49745 int LL_PROC_PROTO(proc_console_min_delay_cs)
49746 {
49747 int rc, min_delay_cs;
49748- ctl_table_t dummy = *table;
49749+ ctl_table_no_const dummy = *table;
49750 cfs_duration_t d;
49751
49752 dummy.data = &min_delay_cs;
49753@@ -281,7 +281,7 @@ int LL_PROC_PROTO(proc_console_min_delay_cs)
49754 int LL_PROC_PROTO(proc_console_backoff)
49755 {
49756 int rc, backoff;
49757- ctl_table_t dummy = *table;
49758+ ctl_table_no_const dummy = *table;
49759
49760 dummy.data = &backoff;
49761 dummy.proc_handler = &proc_dointvec;
49762diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
49763index f3108c7..cd4f9da 100644
49764--- a/drivers/staging/lustre/lustre/libcfs/module.c
49765+++ b/drivers/staging/lustre/lustre/libcfs/module.c
49766@@ -348,11 +348,11 @@ out:
49767
49768
49769 struct cfs_psdev_ops libcfs_psdev_ops = {
49770- libcfs_psdev_open,
49771- libcfs_psdev_release,
49772- NULL,
49773- NULL,
49774- libcfs_ioctl
49775+ .p_open = libcfs_psdev_open,
49776+ .p_close = libcfs_psdev_release,
49777+ .p_read = NULL,
49778+ .p_write = NULL,
49779+ .p_ioctl = libcfs_ioctl
49780 };
49781
49782 extern int insert_proc(void);
49783diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
49784index a4e0472..05d854c 100644
49785--- a/drivers/staging/lustre/lustre/llite/dir.c
49786+++ b/drivers/staging/lustre/lustre/llite/dir.c
49787@@ -660,7 +660,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
49788 int mode;
49789 int err;
49790
49791- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
49792+ mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR;
49793 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
49794 strlen(filename), mode, LUSTRE_OPC_MKDIR,
49795 lump);
49796diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
49797index 3675020..e80d92c 100644
49798--- a/drivers/staging/media/solo6x10/solo6x10-core.c
49799+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
49800@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
49801
49802 static int solo_sysfs_init(struct solo_dev *solo_dev)
49803 {
49804- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
49805+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
49806 struct device *dev = &solo_dev->dev;
49807 const char *driver;
49808 int i;
49809diff --git a/drivers/staging/media/solo6x10/solo6x10-g723.c b/drivers/staging/media/solo6x10/solo6x10-g723.c
49810index 1db18c7..35e6afc 100644
49811--- a/drivers/staging/media/solo6x10/solo6x10-g723.c
49812+++ b/drivers/staging/media/solo6x10/solo6x10-g723.c
49813@@ -355,7 +355,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
49814
49815 int solo_g723_init(struct solo_dev *solo_dev)
49816 {
49817- static struct snd_device_ops ops = { NULL };
49818+ static struct snd_device_ops ops = { };
49819 struct snd_card *card;
49820 struct snd_kcontrol_new kctl;
49821 char name[32];
49822diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
49823index 7f2f247..d999137 100644
49824--- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
49825+++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
49826@@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
49827
49828 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
49829 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
49830- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
49831+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
49832 if (p2m_id < 0)
49833 p2m_id = -p2m_id;
49834 }
49835diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
49836index f1bbb8c..a73eaba 100644
49837--- a/drivers/staging/media/solo6x10/solo6x10.h
49838+++ b/drivers/staging/media/solo6x10/solo6x10.h
49839@@ -237,7 +237,7 @@ struct solo_dev {
49840
49841 /* P2M DMA Engine */
49842 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
49843- atomic_t p2m_count;
49844+ atomic_unchecked_t p2m_count;
49845 int p2m_jiffies;
49846 unsigned int p2m_timeouts;
49847
49848diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
49849index 0315f60..ce93f406 100644
49850--- a/drivers/staging/octeon/ethernet-rx.c
49851+++ b/drivers/staging/octeon/ethernet-rx.c
49852@@ -418,11 +418,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
49853 /* Increment RX stats for virtual ports */
49854 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
49855 #ifdef CONFIG_64BIT
49856- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
49857- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
49858+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
49859+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
49860 #else
49861- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
49862- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
49863+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
49864+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
49865 #endif
49866 }
49867 netif_receive_skb(skb);
49868@@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
49869 dev->name);
49870 */
49871 #ifdef CONFIG_64BIT
49872- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
49873+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
49874 #else
49875- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
49876+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
49877 #endif
49878 dev_kfree_skb_irq(skb);
49879 }
49880diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
49881index bd6ca71..8f0961e 100644
49882--- a/drivers/staging/octeon/ethernet.c
49883+++ b/drivers/staging/octeon/ethernet.c
49884@@ -254,11 +254,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
49885 * since the RX tasklet also increments it.
49886 */
49887 #ifdef CONFIG_64BIT
49888- atomic64_add(rx_status.dropped_packets,
49889- (atomic64_t *)&priv->stats.rx_dropped);
49890+ atomic64_add_unchecked(rx_status.dropped_packets,
49891+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
49892 #else
49893- atomic_add(rx_status.dropped_packets,
49894- (atomic_t *)&priv->stats.rx_dropped);
49895+ atomic_add_unchecked(rx_status.dropped_packets,
49896+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
49897 #endif
49898 }
49899
49900diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
49901index 439c3c9..2d74293 100644
49902--- a/drivers/staging/rtl8188eu/include/hal_intf.h
49903+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
49904@@ -271,7 +271,7 @@ struct hal_ops {
49905 s32 (*c2h_handler)(struct adapter *padapter,
49906 struct c2h_evt_hdr *c2h_evt);
49907 c2h_id_filter c2h_id_filter_ccx;
49908-};
49909+} __no_const;
49910
49911 enum rt_eeprom_type {
49912 EEPROM_93C46,
49913diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
49914index eb6f0e5..e6a0958 100644
49915--- a/drivers/staging/rtl8188eu/include/rtw_io.h
49916+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
49917@@ -126,7 +126,7 @@ struct _io_ops {
49918 u32 (*_write_scsi)(struct intf_hdl *pintfhdl,u32 cnt, u8 *pmem);
49919 void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
49920 void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
49921-};
49922+} __no_const;
49923
49924 struct io_req {
49925 struct list_head list;
49926diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
49927index dc23395..cf7e9b1 100644
49928--- a/drivers/staging/rtl8712/rtl871x_io.h
49929+++ b/drivers/staging/rtl8712/rtl871x_io.h
49930@@ -108,7 +108,7 @@ struct _io_ops {
49931 u8 *pmem);
49932 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
49933 u8 *pmem);
49934-};
49935+} __no_const;
49936
49937 struct io_req {
49938 struct list_head list;
49939diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
49940index 1f5088b..0e59820 100644
49941--- a/drivers/staging/sbe-2t3e3/netdev.c
49942+++ b/drivers/staging/sbe-2t3e3/netdev.c
49943@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49944 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
49945
49946 if (rlen)
49947- if (copy_to_user(data, &resp, rlen))
49948+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
49949 return -EFAULT;
49950
49951 return 0;
49952diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
49953index a863a98..d272795 100644
49954--- a/drivers/staging/usbip/vhci.h
49955+++ b/drivers/staging/usbip/vhci.h
49956@@ -83,7 +83,7 @@ struct vhci_hcd {
49957 unsigned resuming:1;
49958 unsigned long re_timeout;
49959
49960- atomic_t seqnum;
49961+ atomic_unchecked_t seqnum;
49962
49963 /*
49964 * NOTE:
49965diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
49966index e810ad5..931336f 100644
49967--- a/drivers/staging/usbip/vhci_hcd.c
49968+++ b/drivers/staging/usbip/vhci_hcd.c
49969@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
49970
49971 spin_lock(&vdev->priv_lock);
49972
49973- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
49974+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
49975 if (priv->seqnum == 0xffff)
49976 dev_info(&urb->dev->dev, "seqnum max\n");
49977
49978@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
49979 return -ENOMEM;
49980 }
49981
49982- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
49983+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
49984 if (unlink->seqnum == 0xffff)
49985 pr_info("seqnum max\n");
49986
49987@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
49988 vdev->rhport = rhport;
49989 }
49990
49991- atomic_set(&vhci->seqnum, 0);
49992+ atomic_set_unchecked(&vhci->seqnum, 0);
49993 spin_lock_init(&vhci->lock);
49994
49995 hcd->power_budget = 0; /* no limit */
49996diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
49997index d07fcb5..358e1e1 100644
49998--- a/drivers/staging/usbip/vhci_rx.c
49999+++ b/drivers/staging/usbip/vhci_rx.c
50000@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
50001 if (!urb) {
50002 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
50003 pr_info("max seqnum %d\n",
50004- atomic_read(&the_controller->seqnum));
50005+ atomic_read_unchecked(&the_controller->seqnum));
50006 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
50007 return;
50008 }
50009diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
50010index ab8b2ba..99184aa 100644
50011--- a/drivers/staging/vt6655/hostap.c
50012+++ b/drivers/staging/vt6655/hostap.c
50013@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
50014 *
50015 */
50016
50017+static net_device_ops_no_const apdev_netdev_ops;
50018+
50019 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
50020 {
50021 PSDevice apdev_priv;
50022 struct net_device *dev = pDevice->dev;
50023 int ret;
50024- const struct net_device_ops apdev_netdev_ops = {
50025- .ndo_start_xmit = pDevice->tx_80211,
50026- };
50027
50028 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
50029
50030@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
50031 *apdev_priv = *pDevice;
50032 eth_hw_addr_inherit(pDevice->apdev, dev);
50033
50034+ /* only half broken now */
50035+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
50036 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
50037
50038 pDevice->apdev->type = ARPHRD_IEEE80211;
50039diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
50040index 67ba48b..24e602f 100644
50041--- a/drivers/staging/vt6656/hostap.c
50042+++ b/drivers/staging/vt6656/hostap.c
50043@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
50044 *
50045 */
50046
50047+static net_device_ops_no_const apdev_netdev_ops;
50048+
50049 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
50050 {
50051 struct vnt_private *apdev_priv;
50052 struct net_device *dev = pDevice->dev;
50053 int ret;
50054- const struct net_device_ops apdev_netdev_ops = {
50055- .ndo_start_xmit = pDevice->tx_80211,
50056- };
50057
50058 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
50059
50060@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
50061 *apdev_priv = *pDevice;
50062 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
50063
50064+ /* only half broken now */
50065+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
50066 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
50067
50068 pDevice->apdev->type = ARPHRD_IEEE80211;
50069diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
50070index 24884ca..26c8220 100644
50071--- a/drivers/target/sbp/sbp_target.c
50072+++ b/drivers/target/sbp/sbp_target.c
50073@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
50074
50075 #define SESSION_MAINTENANCE_INTERVAL HZ
50076
50077-static atomic_t login_id = ATOMIC_INIT(0);
50078+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
50079
50080 static void session_maintenance_work(struct work_struct *);
50081 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
50082@@ -444,7 +444,7 @@ static void sbp_management_request_login(
50083 login->lun = se_lun;
50084 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
50085 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
50086- login->login_id = atomic_inc_return(&login_id);
50087+ login->login_id = atomic_inc_return_unchecked(&login_id);
50088
50089 login->tgt_agt = sbp_target_agent_register(login);
50090 if (IS_ERR(login->tgt_agt)) {
50091diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
50092index d06de84..fd38c9b 100644
50093--- a/drivers/target/target_core_device.c
50094+++ b/drivers/target/target_core_device.c
50095@@ -1435,7 +1435,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
50096 spin_lock_init(&dev->se_tmr_lock);
50097 spin_lock_init(&dev->qf_cmd_lock);
50098 sema_init(&dev->caw_sem, 1);
50099- atomic_set(&dev->dev_ordered_id, 0);
50100+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
50101 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
50102 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
50103 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
50104diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
50105index dee2be1..f5fd8ca 100644
50106--- a/drivers/target/target_core_transport.c
50107+++ b/drivers/target/target_core_transport.c
50108@@ -1113,7 +1113,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
50109 * Used to determine when ORDERED commands should go from
50110 * Dormant to Active status.
50111 */
50112- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
50113+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
50114 smp_mb__after_atomic_inc();
50115 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
50116 cmd->se_ordered_id, cmd->sam_task_attr,
50117diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
50118index 33f83fe..d80f8e1 100644
50119--- a/drivers/tty/cyclades.c
50120+++ b/drivers/tty/cyclades.c
50121@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
50122 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
50123 info->port.count);
50124 #endif
50125- info->port.count++;
50126+ atomic_inc(&info->port.count);
50127 #ifdef CY_DEBUG_COUNT
50128 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
50129- current->pid, info->port.count);
50130+ current->pid, atomic_read(&info->port.count));
50131 #endif
50132
50133 /*
50134@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
50135 for (j = 0; j < cy_card[i].nports; j++) {
50136 info = &cy_card[i].ports[j];
50137
50138- if (info->port.count) {
50139+ if (atomic_read(&info->port.count)) {
50140 /* XXX is the ldisc num worth this? */
50141 struct tty_struct *tty;
50142 struct tty_ldisc *ld;
50143diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
50144index 9eba119..5070303 100644
50145--- a/drivers/tty/hvc/hvc_console.c
50146+++ b/drivers/tty/hvc/hvc_console.c
50147@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
50148
50149 spin_lock_irqsave(&hp->port.lock, flags);
50150 /* Check and then increment for fast path open. */
50151- if (hp->port.count++ > 0) {
50152+ if (atomic_inc_return(&hp->port.count) > 1) {
50153 spin_unlock_irqrestore(&hp->port.lock, flags);
50154 hvc_kick();
50155 return 0;
50156@@ -393,7 +393,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
50157
50158 spin_lock_irqsave(&hp->port.lock, flags);
50159
50160- if (--hp->port.count == 0) {
50161+ if (atomic_dec_return(&hp->port.count) == 0) {
50162 spin_unlock_irqrestore(&hp->port.lock, flags);
50163 /* We are done with the tty pointer now. */
50164 tty_port_tty_set(&hp->port, NULL);
50165@@ -415,9 +415,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
50166 */
50167 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
50168 } else {
50169- if (hp->port.count < 0)
50170+ if (atomic_read(&hp->port.count) < 0)
50171 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
50172- hp->vtermno, hp->port.count);
50173+ hp->vtermno, atomic_read(&hp->port.count));
50174 spin_unlock_irqrestore(&hp->port.lock, flags);
50175 }
50176 }
50177@@ -447,12 +447,12 @@ static void hvc_hangup(struct tty_struct *tty)
50178 * open->hangup case this can be called after the final close so prevent
50179 * that from happening for now.
50180 */
50181- if (hp->port.count <= 0) {
50182+ if (atomic_read(&hp->port.count) <= 0) {
50183 spin_unlock_irqrestore(&hp->port.lock, flags);
50184 return;
50185 }
50186
50187- hp->port.count = 0;
50188+ atomic_set(&hp->port.count, 0);
50189 spin_unlock_irqrestore(&hp->port.lock, flags);
50190 tty_port_tty_set(&hp->port, NULL);
50191
50192@@ -500,7 +500,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
50193 return -EPIPE;
50194
50195 /* FIXME what's this (unprotected) check for? */
50196- if (hp->port.count <= 0)
50197+ if (atomic_read(&hp->port.count) <= 0)
50198 return -EIO;
50199
50200 spin_lock_irqsave(&hp->lock, flags);
50201diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
50202index 81e939e..95ead10 100644
50203--- a/drivers/tty/hvc/hvcs.c
50204+++ b/drivers/tty/hvc/hvcs.c
50205@@ -83,6 +83,7 @@
50206 #include <asm/hvcserver.h>
50207 #include <asm/uaccess.h>
50208 #include <asm/vio.h>
50209+#include <asm/local.h>
50210
50211 /*
50212 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
50213@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
50214
50215 spin_lock_irqsave(&hvcsd->lock, flags);
50216
50217- if (hvcsd->port.count > 0) {
50218+ if (atomic_read(&hvcsd->port.count) > 0) {
50219 spin_unlock_irqrestore(&hvcsd->lock, flags);
50220 printk(KERN_INFO "HVCS: vterm state unchanged. "
50221 "The hvcs device node is still in use.\n");
50222@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
50223 }
50224 }
50225
50226- hvcsd->port.count = 0;
50227+ atomic_set(&hvcsd->port.count, 0);
50228 hvcsd->port.tty = tty;
50229 tty->driver_data = hvcsd;
50230
50231@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
50232 unsigned long flags;
50233
50234 spin_lock_irqsave(&hvcsd->lock, flags);
50235- hvcsd->port.count++;
50236+ atomic_inc(&hvcsd->port.count);
50237 hvcsd->todo_mask |= HVCS_SCHED_READ;
50238 spin_unlock_irqrestore(&hvcsd->lock, flags);
50239
50240@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
50241 hvcsd = tty->driver_data;
50242
50243 spin_lock_irqsave(&hvcsd->lock, flags);
50244- if (--hvcsd->port.count == 0) {
50245+ if (atomic_dec_and_test(&hvcsd->port.count)) {
50246
50247 vio_disable_interrupts(hvcsd->vdev);
50248
50249@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
50250
50251 free_irq(irq, hvcsd);
50252 return;
50253- } else if (hvcsd->port.count < 0) {
50254+ } else if (atomic_read(&hvcsd->port.count) < 0) {
50255 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
50256 " is missmanaged.\n",
50257- hvcsd->vdev->unit_address, hvcsd->port.count);
50258+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
50259 }
50260
50261 spin_unlock_irqrestore(&hvcsd->lock, flags);
50262@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
50263
50264 spin_lock_irqsave(&hvcsd->lock, flags);
50265 /* Preserve this so that we know how many kref refs to put */
50266- temp_open_count = hvcsd->port.count;
50267+ temp_open_count = atomic_read(&hvcsd->port.count);
50268
50269 /*
50270 * Don't kref put inside the spinlock because the destruction
50271@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
50272 tty->driver_data = NULL;
50273 hvcsd->port.tty = NULL;
50274
50275- hvcsd->port.count = 0;
50276+ atomic_set(&hvcsd->port.count, 0);
50277
50278 /* This will drop any buffered data on the floor which is OK in a hangup
50279 * scenario. */
50280@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
50281 * the middle of a write operation? This is a crummy place to do this
50282 * but we want to keep it all in the spinlock.
50283 */
50284- if (hvcsd->port.count <= 0) {
50285+ if (atomic_read(&hvcsd->port.count) <= 0) {
50286 spin_unlock_irqrestore(&hvcsd->lock, flags);
50287 return -ENODEV;
50288 }
50289@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
50290 {
50291 struct hvcs_struct *hvcsd = tty->driver_data;
50292
50293- if (!hvcsd || hvcsd->port.count <= 0)
50294+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
50295 return 0;
50296
50297 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
50298diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
50299index 4190199..06d5bfa 100644
50300--- a/drivers/tty/hvc/hvsi.c
50301+++ b/drivers/tty/hvc/hvsi.c
50302@@ -85,7 +85,7 @@ struct hvsi_struct {
50303 int n_outbuf;
50304 uint32_t vtermno;
50305 uint32_t virq;
50306- atomic_t seqno; /* HVSI packet sequence number */
50307+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
50308 uint16_t mctrl;
50309 uint8_t state; /* HVSI protocol state */
50310 uint8_t flags;
50311@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
50312
50313 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
50314 packet.hdr.len = sizeof(struct hvsi_query_response);
50315- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
50316+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
50317 packet.verb = VSV_SEND_VERSION_NUMBER;
50318 packet.u.version = HVSI_VERSION;
50319 packet.query_seqno = query_seqno+1;
50320@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
50321
50322 packet.hdr.type = VS_QUERY_PACKET_HEADER;
50323 packet.hdr.len = sizeof(struct hvsi_query);
50324- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
50325+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
50326 packet.verb = verb;
50327
50328 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
50329@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
50330 int wrote;
50331
50332 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
50333- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
50334+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
50335 packet.hdr.len = sizeof(struct hvsi_control);
50336 packet.verb = VSV_SET_MODEM_CTL;
50337 packet.mask = HVSI_TSDTR;
50338@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
50339 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
50340
50341 packet.hdr.type = VS_DATA_PACKET_HEADER;
50342- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
50343+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
50344 packet.hdr.len = count + sizeof(struct hvsi_header);
50345 memcpy(&packet.data, buf, count);
50346
50347@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
50348 struct hvsi_control packet __ALIGNED__;
50349
50350 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
50351- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
50352+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
50353 packet.hdr.len = 6;
50354 packet.verb = VSV_CLOSE_PROTOCOL;
50355
50356@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
50357
50358 tty_port_tty_set(&hp->port, tty);
50359 spin_lock_irqsave(&hp->lock, flags);
50360- hp->port.count++;
50361+ atomic_inc(&hp->port.count);
50362 atomic_set(&hp->seqno, 0);
50363 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
50364 spin_unlock_irqrestore(&hp->lock, flags);
50365@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
50366
50367 spin_lock_irqsave(&hp->lock, flags);
50368
50369- if (--hp->port.count == 0) {
50370+ if (atomic_dec_return(&hp->port.count) == 0) {
50371 tty_port_tty_set(&hp->port, NULL);
50372 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
50373
50374@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
50375
50376 spin_lock_irqsave(&hp->lock, flags);
50377 }
50378- } else if (hp->port.count < 0)
50379+ } else if (atomic_read(&hp->port.count) < 0)
50380 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
50381- hp - hvsi_ports, hp->port.count);
50382+ hp - hvsi_ports, atomic_read(&hp->port.count));
50383
50384 spin_unlock_irqrestore(&hp->lock, flags);
50385 }
50386@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
50387 tty_port_tty_set(&hp->port, NULL);
50388
50389 spin_lock_irqsave(&hp->lock, flags);
50390- hp->port.count = 0;
50391+ atomic_set(&hp->port.count, 0);
50392 hp->n_outbuf = 0;
50393 spin_unlock_irqrestore(&hp->lock, flags);
50394 }
50395diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
50396index 347050e..14f8fbf 100644
50397--- a/drivers/tty/hvc/hvsi_lib.c
50398+++ b/drivers/tty/hvc/hvsi_lib.c
50399@@ -9,7 +9,7 @@
50400
50401 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
50402 {
50403- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
50404+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
50405
50406 /* Assumes that always succeeds, works in practice */
50407 return pv->put_chars(pv->termno, (char *)packet, packet->len);
50408@@ -21,7 +21,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
50409
50410 /* Reset state */
50411 pv->established = 0;
50412- atomic_set(&pv->seqno, 0);
50413+ atomic_set_unchecked(&pv->seqno, 0);
50414
50415 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
50416
50417diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
50418index 8fd72ff..34a0bed 100644
50419--- a/drivers/tty/ipwireless/tty.c
50420+++ b/drivers/tty/ipwireless/tty.c
50421@@ -29,6 +29,7 @@
50422 #include <linux/tty_driver.h>
50423 #include <linux/tty_flip.h>
50424 #include <linux/uaccess.h>
50425+#include <asm/local.h>
50426
50427 #include "tty.h"
50428 #include "network.h"
50429@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
50430 mutex_unlock(&tty->ipw_tty_mutex);
50431 return -ENODEV;
50432 }
50433- if (tty->port.count == 0)
50434+ if (atomic_read(&tty->port.count) == 0)
50435 tty->tx_bytes_queued = 0;
50436
50437- tty->port.count++;
50438+ atomic_inc(&tty->port.count);
50439
50440 tty->port.tty = linux_tty;
50441 linux_tty->driver_data = tty;
50442@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
50443
50444 static void do_ipw_close(struct ipw_tty *tty)
50445 {
50446- tty->port.count--;
50447-
50448- if (tty->port.count == 0) {
50449+ if (atomic_dec_return(&tty->port.count) == 0) {
50450 struct tty_struct *linux_tty = tty->port.tty;
50451
50452 if (linux_tty != NULL) {
50453@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
50454 return;
50455
50456 mutex_lock(&tty->ipw_tty_mutex);
50457- if (tty->port.count == 0) {
50458+ if (atomic_read(&tty->port.count) == 0) {
50459 mutex_unlock(&tty->ipw_tty_mutex);
50460 return;
50461 }
50462@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
50463
50464 mutex_lock(&tty->ipw_tty_mutex);
50465
50466- if (!tty->port.count) {
50467+ if (!atomic_read(&tty->port.count)) {
50468 mutex_unlock(&tty->ipw_tty_mutex);
50469 return;
50470 }
50471@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
50472 return -ENODEV;
50473
50474 mutex_lock(&tty->ipw_tty_mutex);
50475- if (!tty->port.count) {
50476+ if (!atomic_read(&tty->port.count)) {
50477 mutex_unlock(&tty->ipw_tty_mutex);
50478 return -EINVAL;
50479 }
50480@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
50481 if (!tty)
50482 return -ENODEV;
50483
50484- if (!tty->port.count)
50485+ if (!atomic_read(&tty->port.count))
50486 return -EINVAL;
50487
50488 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
50489@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
50490 if (!tty)
50491 return 0;
50492
50493- if (!tty->port.count)
50494+ if (!atomic_read(&tty->port.count))
50495 return 0;
50496
50497 return tty->tx_bytes_queued;
50498@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
50499 if (!tty)
50500 return -ENODEV;
50501
50502- if (!tty->port.count)
50503+ if (!atomic_read(&tty->port.count))
50504 return -EINVAL;
50505
50506 return get_control_lines(tty);
50507@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
50508 if (!tty)
50509 return -ENODEV;
50510
50511- if (!tty->port.count)
50512+ if (!atomic_read(&tty->port.count))
50513 return -EINVAL;
50514
50515 return set_control_lines(tty, set, clear);
50516@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
50517 if (!tty)
50518 return -ENODEV;
50519
50520- if (!tty->port.count)
50521+ if (!atomic_read(&tty->port.count))
50522 return -EINVAL;
50523
50524 /* FIXME: Exactly how is the tty object locked here .. */
50525@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
50526 * are gone */
50527 mutex_lock(&ttyj->ipw_tty_mutex);
50528 }
50529- while (ttyj->port.count)
50530+ while (atomic_read(&ttyj->port.count))
50531 do_ipw_close(ttyj);
50532 ipwireless_disassociate_network_ttys(network,
50533 ttyj->channel_idx);
50534diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
50535index 1deaca4..c8582d4 100644
50536--- a/drivers/tty/moxa.c
50537+++ b/drivers/tty/moxa.c
50538@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
50539 }
50540
50541 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
50542- ch->port.count++;
50543+ atomic_inc(&ch->port.count);
50544 tty->driver_data = ch;
50545 tty_port_tty_set(&ch->port, tty);
50546 mutex_lock(&ch->port.mutex);
50547diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
50548index 5056090..c80ca04 100644
50549--- a/drivers/tty/n_gsm.c
50550+++ b/drivers/tty/n_gsm.c
50551@@ -1643,7 +1643,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
50552 spin_lock_init(&dlci->lock);
50553 mutex_init(&dlci->mutex);
50554 dlci->fifo = &dlci->_fifo;
50555- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
50556+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
50557 kfree(dlci);
50558 return NULL;
50559 }
50560@@ -2946,7 +2946,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
50561 struct gsm_dlci *dlci = tty->driver_data;
50562 struct tty_port *port = &dlci->port;
50563
50564- port->count++;
50565+ atomic_inc(&port->count);
50566 dlci_get(dlci);
50567 dlci_get(dlci->gsm->dlci[0]);
50568 mux_get(dlci->gsm);
50569diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
50570index 4c10837..a40ec45 100644
50571--- a/drivers/tty/n_tty.c
50572+++ b/drivers/tty/n_tty.c
50573@@ -114,7 +114,7 @@ struct n_tty_data {
50574 int minimum_to_wake;
50575
50576 /* consumer-published */
50577- size_t read_tail;
50578+ size_t read_tail __intentional_overflow(-1);
50579 size_t line_start;
50580
50581 /* protected by output lock */
50582@@ -2504,6 +2504,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
50583 {
50584 *ops = tty_ldisc_N_TTY;
50585 ops->owner = NULL;
50586- ops->refcount = ops->flags = 0;
50587+ atomic_set(&ops->refcount, 0);
50588+ ops->flags = 0;
50589 }
50590 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
50591diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
50592index 25c9bc7..24077b7 100644
50593--- a/drivers/tty/pty.c
50594+++ b/drivers/tty/pty.c
50595@@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
50596 panic("Couldn't register Unix98 pts driver");
50597
50598 /* Now create the /dev/ptmx special device */
50599+ pax_open_kernel();
50600 tty_default_fops(&ptmx_fops);
50601- ptmx_fops.open = ptmx_open;
50602+ *(void **)&ptmx_fops.open = ptmx_open;
50603+ pax_close_kernel();
50604
50605 cdev_init(&ptmx_cdev, &ptmx_fops);
50606 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
50607diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
50608index 354564e..fe50d9a 100644
50609--- a/drivers/tty/rocket.c
50610+++ b/drivers/tty/rocket.c
50611@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
50612 tty->driver_data = info;
50613 tty_port_tty_set(port, tty);
50614
50615- if (port->count++ == 0) {
50616+ if (atomic_inc_return(&port->count) == 1) {
50617 atomic_inc(&rp_num_ports_open);
50618
50619 #ifdef ROCKET_DEBUG_OPEN
50620@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
50621 #endif
50622 }
50623 #ifdef ROCKET_DEBUG_OPEN
50624- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
50625+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
50626 #endif
50627
50628 /*
50629@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
50630 spin_unlock_irqrestore(&info->port.lock, flags);
50631 return;
50632 }
50633- if (info->port.count)
50634+ if (atomic_read(&info->port.count))
50635 atomic_dec(&rp_num_ports_open);
50636 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
50637 spin_unlock_irqrestore(&info->port.lock, flags);
50638diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
50639index 1274499..f541382 100644
50640--- a/drivers/tty/serial/ioc4_serial.c
50641+++ b/drivers/tty/serial/ioc4_serial.c
50642@@ -437,7 +437,7 @@ struct ioc4_soft {
50643 } is_intr_info[MAX_IOC4_INTR_ENTS];
50644
50645 /* Number of entries active in the above array */
50646- atomic_t is_num_intrs;
50647+ atomic_unchecked_t is_num_intrs;
50648 } is_intr_type[IOC4_NUM_INTR_TYPES];
50649
50650 /* is_ir_lock must be held while
50651@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
50652 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
50653 || (type == IOC4_OTHER_INTR_TYPE)));
50654
50655- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
50656+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
50657 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
50658
50659 /* Save off the lower level interrupt handler */
50660@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
50661
50662 soft = arg;
50663 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
50664- num_intrs = (int)atomic_read(
50665+ num_intrs = (int)atomic_read_unchecked(
50666 &soft->is_intr_type[intr_type].is_num_intrs);
50667
50668 this_mir = this_ir = pending_intrs(soft, intr_type);
50669diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
50670index a260cde..6b2b5ce 100644
50671--- a/drivers/tty/serial/kgdboc.c
50672+++ b/drivers/tty/serial/kgdboc.c
50673@@ -24,8 +24,9 @@
50674 #define MAX_CONFIG_LEN 40
50675
50676 static struct kgdb_io kgdboc_io_ops;
50677+static struct kgdb_io kgdboc_io_ops_console;
50678
50679-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
50680+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
50681 static int configured = -1;
50682
50683 static char config[MAX_CONFIG_LEN];
50684@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
50685 kgdboc_unregister_kbd();
50686 if (configured == 1)
50687 kgdb_unregister_io_module(&kgdboc_io_ops);
50688+ else if (configured == 2)
50689+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
50690 }
50691
50692 static int configure_kgdboc(void)
50693@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
50694 int err;
50695 char *cptr = config;
50696 struct console *cons;
50697+ int is_console = 0;
50698
50699 err = kgdboc_option_setup(config);
50700 if (err || !strlen(config) || isspace(config[0]))
50701 goto noconfig;
50702
50703 err = -ENODEV;
50704- kgdboc_io_ops.is_console = 0;
50705 kgdb_tty_driver = NULL;
50706
50707 kgdboc_use_kms = 0;
50708@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
50709 int idx;
50710 if (cons->device && cons->device(cons, &idx) == p &&
50711 idx == tty_line) {
50712- kgdboc_io_ops.is_console = 1;
50713+ is_console = 1;
50714 break;
50715 }
50716 cons = cons->next;
50717@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
50718 kgdb_tty_line = tty_line;
50719
50720 do_register:
50721- err = kgdb_register_io_module(&kgdboc_io_ops);
50722+ if (is_console) {
50723+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
50724+ configured = 2;
50725+ } else {
50726+ err = kgdb_register_io_module(&kgdboc_io_ops);
50727+ configured = 1;
50728+ }
50729 if (err)
50730 goto noconfig;
50731
50732@@ -205,8 +214,6 @@ do_register:
50733 if (err)
50734 goto nmi_con_failed;
50735
50736- configured = 1;
50737-
50738 return 0;
50739
50740 nmi_con_failed:
50741@@ -223,7 +230,7 @@ noconfig:
50742 static int __init init_kgdboc(void)
50743 {
50744 /* Already configured? */
50745- if (configured == 1)
50746+ if (configured >= 1)
50747 return 0;
50748
50749 return configure_kgdboc();
50750@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
50751 if (config[len - 1] == '\n')
50752 config[len - 1] = '\0';
50753
50754- if (configured == 1)
50755+ if (configured >= 1)
50756 cleanup_kgdboc();
50757
50758 /* Go and configure with the new params. */
50759@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
50760 .post_exception = kgdboc_post_exp_handler,
50761 };
50762
50763+static struct kgdb_io kgdboc_io_ops_console = {
50764+ .name = "kgdboc",
50765+ .read_char = kgdboc_get_char,
50766+ .write_char = kgdboc_put_char,
50767+ .pre_exception = kgdboc_pre_exp_handler,
50768+ .post_exception = kgdboc_post_exp_handler,
50769+ .is_console = 1
50770+};
50771+
50772 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
50773 /* This is only available if kgdboc is a built in for early debugging */
50774 static int __init kgdboc_early_init(char *opt)
50775diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
50776index b5d779c..3622cfe 100644
50777--- a/drivers/tty/serial/msm_serial.c
50778+++ b/drivers/tty/serial/msm_serial.c
50779@@ -897,7 +897,7 @@ static struct uart_driver msm_uart_driver = {
50780 .cons = MSM_CONSOLE,
50781 };
50782
50783-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
50784+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
50785
50786 static const struct of_device_id msm_uartdm_table[] = {
50787 { .compatible = "qcom,msm-uartdm" },
50788@@ -912,7 +912,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
50789 int irq;
50790
50791 if (pdev->id == -1)
50792- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
50793+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
50794
50795 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
50796 return -ENXIO;
50797diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
50798index c1af04d..0815c8a 100644
50799--- a/drivers/tty/serial/samsung.c
50800+++ b/drivers/tty/serial/samsung.c
50801@@ -463,11 +463,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
50802 }
50803 }
50804
50805+static int s3c64xx_serial_startup(struct uart_port *port);
50806 static int s3c24xx_serial_startup(struct uart_port *port)
50807 {
50808 struct s3c24xx_uart_port *ourport = to_ourport(port);
50809 int ret;
50810
50811+ /* Startup sequence is different for s3c64xx and higher SoC's */
50812+ if (s3c24xx_serial_has_interrupt_mask(port))
50813+ return s3c64xx_serial_startup(port);
50814+
50815 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
50816 port->mapbase, port->membase);
50817
50818@@ -1141,10 +1146,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
50819 /* setup info for port */
50820 port->dev = &platdev->dev;
50821
50822- /* Startup sequence is different for s3c64xx and higher SoC's */
50823- if (s3c24xx_serial_has_interrupt_mask(port))
50824- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
50825-
50826 port->uartclk = 1;
50827
50828 if (cfg->uart_flags & UPF_CONS_FLOW) {
50829diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
50830index 0f02351..07c59c5 100644
50831--- a/drivers/tty/serial/serial_core.c
50832+++ b/drivers/tty/serial/serial_core.c
50833@@ -1448,7 +1448,7 @@ static void uart_hangup(struct tty_struct *tty)
50834 uart_flush_buffer(tty);
50835 uart_shutdown(tty, state);
50836 spin_lock_irqsave(&port->lock, flags);
50837- port->count = 0;
50838+ atomic_set(&port->count, 0);
50839 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
50840 spin_unlock_irqrestore(&port->lock, flags);
50841 tty_port_tty_set(port, NULL);
50842@@ -1544,7 +1544,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
50843 goto end;
50844 }
50845
50846- port->count++;
50847+ atomic_inc(&port->count);
50848 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
50849 retval = -ENXIO;
50850 goto err_dec_count;
50851@@ -1572,7 +1572,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
50852 /*
50853 * Make sure the device is in D0 state.
50854 */
50855- if (port->count == 1)
50856+ if (atomic_read(&port->count) == 1)
50857 uart_change_pm(state, UART_PM_STATE_ON);
50858
50859 /*
50860@@ -1590,7 +1590,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
50861 end:
50862 return retval;
50863 err_dec_count:
50864- port->count--;
50865+ atomic_inc(&port->count);
50866 mutex_unlock(&port->mutex);
50867 goto end;
50868 }
50869diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
50870index e1ce141..6d4ed80 100644
50871--- a/drivers/tty/synclink.c
50872+++ b/drivers/tty/synclink.c
50873@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
50874
50875 if (debug_level >= DEBUG_LEVEL_INFO)
50876 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
50877- __FILE__,__LINE__, info->device_name, info->port.count);
50878+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
50879
50880 if (tty_port_close_start(&info->port, tty, filp) == 0)
50881 goto cleanup;
50882@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
50883 cleanup:
50884 if (debug_level >= DEBUG_LEVEL_INFO)
50885 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
50886- tty->driver->name, info->port.count);
50887+ tty->driver->name, atomic_read(&info->port.count));
50888
50889 } /* end of mgsl_close() */
50890
50891@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
50892
50893 mgsl_flush_buffer(tty);
50894 shutdown(info);
50895-
50896- info->port.count = 0;
50897+
50898+ atomic_set(&info->port.count, 0);
50899 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
50900 info->port.tty = NULL;
50901
50902@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
50903
50904 if (debug_level >= DEBUG_LEVEL_INFO)
50905 printk("%s(%d):block_til_ready before block on %s count=%d\n",
50906- __FILE__,__LINE__, tty->driver->name, port->count );
50907+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50908
50909 spin_lock_irqsave(&info->irq_spinlock, flags);
50910 if (!tty_hung_up_p(filp)) {
50911 extra_count = true;
50912- port->count--;
50913+ atomic_dec(&port->count);
50914 }
50915 spin_unlock_irqrestore(&info->irq_spinlock, flags);
50916 port->blocked_open++;
50917@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
50918
50919 if (debug_level >= DEBUG_LEVEL_INFO)
50920 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
50921- __FILE__,__LINE__, tty->driver->name, port->count );
50922+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50923
50924 tty_unlock(tty);
50925 schedule();
50926@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
50927
50928 /* FIXME: Racy on hangup during close wait */
50929 if (extra_count)
50930- port->count++;
50931+ atomic_inc(&port->count);
50932 port->blocked_open--;
50933
50934 if (debug_level >= DEBUG_LEVEL_INFO)
50935 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
50936- __FILE__,__LINE__, tty->driver->name, port->count );
50937+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
50938
50939 if (!retval)
50940 port->flags |= ASYNC_NORMAL_ACTIVE;
50941@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
50942
50943 if (debug_level >= DEBUG_LEVEL_INFO)
50944 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
50945- __FILE__,__LINE__,tty->driver->name, info->port.count);
50946+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
50947
50948 /* If port is closing, signal caller to try again */
50949 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
50950@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
50951 spin_unlock_irqrestore(&info->netlock, flags);
50952 goto cleanup;
50953 }
50954- info->port.count++;
50955+ atomic_inc(&info->port.count);
50956 spin_unlock_irqrestore(&info->netlock, flags);
50957
50958- if (info->port.count == 1) {
50959+ if (atomic_read(&info->port.count) == 1) {
50960 /* 1st open on this device, init hardware */
50961 retval = startup(info);
50962 if (retval < 0)
50963@@ -3446,8 +3446,8 @@ cleanup:
50964 if (retval) {
50965 if (tty->count == 1)
50966 info->port.tty = NULL; /* tty layer will release tty struct */
50967- if(info->port.count)
50968- info->port.count--;
50969+ if (atomic_read(&info->port.count))
50970+ atomic_dec(&info->port.count);
50971 }
50972
50973 return retval;
50974@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
50975 unsigned short new_crctype;
50976
50977 /* return error if TTY interface open */
50978- if (info->port.count)
50979+ if (atomic_read(&info->port.count))
50980 return -EBUSY;
50981
50982 switch (encoding)
50983@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
50984
50985 /* arbitrate between network and tty opens */
50986 spin_lock_irqsave(&info->netlock, flags);
50987- if (info->port.count != 0 || info->netcount != 0) {
50988+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
50989 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
50990 spin_unlock_irqrestore(&info->netlock, flags);
50991 return -EBUSY;
50992@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
50993 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
50994
50995 /* return error if TTY interface open */
50996- if (info->port.count)
50997+ if (atomic_read(&info->port.count))
50998 return -EBUSY;
50999
51000 if (cmd != SIOCWANDEV)
51001diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
51002index 1abf946..1ee34fc 100644
51003--- a/drivers/tty/synclink_gt.c
51004+++ b/drivers/tty/synclink_gt.c
51005@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
51006 tty->driver_data = info;
51007 info->port.tty = tty;
51008
51009- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
51010+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
51011
51012 /* If port is closing, signal caller to try again */
51013 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
51014@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
51015 mutex_unlock(&info->port.mutex);
51016 goto cleanup;
51017 }
51018- info->port.count++;
51019+ atomic_inc(&info->port.count);
51020 spin_unlock_irqrestore(&info->netlock, flags);
51021
51022- if (info->port.count == 1) {
51023+ if (atomic_read(&info->port.count) == 1) {
51024 /* 1st open on this device, init hardware */
51025 retval = startup(info);
51026 if (retval < 0) {
51027@@ -715,8 +715,8 @@ cleanup:
51028 if (retval) {
51029 if (tty->count == 1)
51030 info->port.tty = NULL; /* tty layer will release tty struct */
51031- if(info->port.count)
51032- info->port.count--;
51033+ if(atomic_read(&info->port.count))
51034+ atomic_dec(&info->port.count);
51035 }
51036
51037 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
51038@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
51039
51040 if (sanity_check(info, tty->name, "close"))
51041 return;
51042- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
51043+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
51044
51045 if (tty_port_close_start(&info->port, tty, filp) == 0)
51046 goto cleanup;
51047@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
51048 tty_port_close_end(&info->port, tty);
51049 info->port.tty = NULL;
51050 cleanup:
51051- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
51052+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
51053 }
51054
51055 static void hangup(struct tty_struct *tty)
51056@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
51057 shutdown(info);
51058
51059 spin_lock_irqsave(&info->port.lock, flags);
51060- info->port.count = 0;
51061+ atomic_set(&info->port.count, 0);
51062 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
51063 info->port.tty = NULL;
51064 spin_unlock_irqrestore(&info->port.lock, flags);
51065@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
51066 unsigned short new_crctype;
51067
51068 /* return error if TTY interface open */
51069- if (info->port.count)
51070+ if (atomic_read(&info->port.count))
51071 return -EBUSY;
51072
51073 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
51074@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
51075
51076 /* arbitrate between network and tty opens */
51077 spin_lock_irqsave(&info->netlock, flags);
51078- if (info->port.count != 0 || info->netcount != 0) {
51079+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
51080 DBGINFO(("%s hdlc_open busy\n", dev->name));
51081 spin_unlock_irqrestore(&info->netlock, flags);
51082 return -EBUSY;
51083@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
51084 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
51085
51086 /* return error if TTY interface open */
51087- if (info->port.count)
51088+ if (atomic_read(&info->port.count))
51089 return -EBUSY;
51090
51091 if (cmd != SIOCWANDEV)
51092@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
51093 if (port == NULL)
51094 continue;
51095 spin_lock(&port->lock);
51096- if ((port->port.count || port->netcount) &&
51097+ if ((atomic_read(&port->port.count) || port->netcount) &&
51098 port->pending_bh && !port->bh_running &&
51099 !port->bh_requested) {
51100 DBGISR(("%s bh queued\n", port->device_name));
51101@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51102 spin_lock_irqsave(&info->lock, flags);
51103 if (!tty_hung_up_p(filp)) {
51104 extra_count = true;
51105- port->count--;
51106+ atomic_dec(&port->count);
51107 }
51108 spin_unlock_irqrestore(&info->lock, flags);
51109 port->blocked_open++;
51110@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51111 remove_wait_queue(&port->open_wait, &wait);
51112
51113 if (extra_count)
51114- port->count++;
51115+ atomic_inc(&port->count);
51116 port->blocked_open--;
51117
51118 if (!retval)
51119diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
51120index dc6e969..5dc8786 100644
51121--- a/drivers/tty/synclinkmp.c
51122+++ b/drivers/tty/synclinkmp.c
51123@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
51124
51125 if (debug_level >= DEBUG_LEVEL_INFO)
51126 printk("%s(%d):%s open(), old ref count = %d\n",
51127- __FILE__,__LINE__,tty->driver->name, info->port.count);
51128+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
51129
51130 /* If port is closing, signal caller to try again */
51131 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
51132@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
51133 spin_unlock_irqrestore(&info->netlock, flags);
51134 goto cleanup;
51135 }
51136- info->port.count++;
51137+ atomic_inc(&info->port.count);
51138 spin_unlock_irqrestore(&info->netlock, flags);
51139
51140- if (info->port.count == 1) {
51141+ if (atomic_read(&info->port.count) == 1) {
51142 /* 1st open on this device, init hardware */
51143 retval = startup(info);
51144 if (retval < 0)
51145@@ -796,8 +796,8 @@ cleanup:
51146 if (retval) {
51147 if (tty->count == 1)
51148 info->port.tty = NULL; /* tty layer will release tty struct */
51149- if(info->port.count)
51150- info->port.count--;
51151+ if(atomic_read(&info->port.count))
51152+ atomic_dec(&info->port.count);
51153 }
51154
51155 return retval;
51156@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
51157
51158 if (debug_level >= DEBUG_LEVEL_INFO)
51159 printk("%s(%d):%s close() entry, count=%d\n",
51160- __FILE__,__LINE__, info->device_name, info->port.count);
51161+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
51162
51163 if (tty_port_close_start(&info->port, tty, filp) == 0)
51164 goto cleanup;
51165@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
51166 cleanup:
51167 if (debug_level >= DEBUG_LEVEL_INFO)
51168 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
51169- tty->driver->name, info->port.count);
51170+ tty->driver->name, atomic_read(&info->port.count));
51171 }
51172
51173 /* Called by tty_hangup() when a hangup is signaled.
51174@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
51175 shutdown(info);
51176
51177 spin_lock_irqsave(&info->port.lock, flags);
51178- info->port.count = 0;
51179+ atomic_set(&info->port.count, 0);
51180 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
51181 info->port.tty = NULL;
51182 spin_unlock_irqrestore(&info->port.lock, flags);
51183@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
51184 unsigned short new_crctype;
51185
51186 /* return error if TTY interface open */
51187- if (info->port.count)
51188+ if (atomic_read(&info->port.count))
51189 return -EBUSY;
51190
51191 switch (encoding)
51192@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
51193
51194 /* arbitrate between network and tty opens */
51195 spin_lock_irqsave(&info->netlock, flags);
51196- if (info->port.count != 0 || info->netcount != 0) {
51197+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
51198 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
51199 spin_unlock_irqrestore(&info->netlock, flags);
51200 return -EBUSY;
51201@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
51202 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
51203
51204 /* return error if TTY interface open */
51205- if (info->port.count)
51206+ if (atomic_read(&info->port.count))
51207 return -EBUSY;
51208
51209 if (cmd != SIOCWANDEV)
51210@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
51211 * do not request bottom half processing if the
51212 * device is not open in a normal mode.
51213 */
51214- if ( port && (port->port.count || port->netcount) &&
51215+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
51216 port->pending_bh && !port->bh_running &&
51217 !port->bh_requested ) {
51218 if ( debug_level >= DEBUG_LEVEL_ISR )
51219@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51220
51221 if (debug_level >= DEBUG_LEVEL_INFO)
51222 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
51223- __FILE__,__LINE__, tty->driver->name, port->count );
51224+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
51225
51226 spin_lock_irqsave(&info->lock, flags);
51227 if (!tty_hung_up_p(filp)) {
51228 extra_count = true;
51229- port->count--;
51230+ atomic_dec(&port->count);
51231 }
51232 spin_unlock_irqrestore(&info->lock, flags);
51233 port->blocked_open++;
51234@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51235
51236 if (debug_level >= DEBUG_LEVEL_INFO)
51237 printk("%s(%d):%s block_til_ready() count=%d\n",
51238- __FILE__,__LINE__, tty->driver->name, port->count );
51239+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
51240
51241 tty_unlock(tty);
51242 schedule();
51243@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
51244 remove_wait_queue(&port->open_wait, &wait);
51245
51246 if (extra_count)
51247- port->count++;
51248+ atomic_inc(&port->count);
51249 port->blocked_open--;
51250
51251 if (debug_level >= DEBUG_LEVEL_INFO)
51252 printk("%s(%d):%s block_til_ready() after, count=%d\n",
51253- __FILE__,__LINE__, tty->driver->name, port->count );
51254+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
51255
51256 if (!retval)
51257 port->flags |= ASYNC_NORMAL_ACTIVE;
51258diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
51259index ce396ec..04a37be 100644
51260--- a/drivers/tty/sysrq.c
51261+++ b/drivers/tty/sysrq.c
51262@@ -1075,7 +1075,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
51263 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
51264 size_t count, loff_t *ppos)
51265 {
51266- if (count) {
51267+ if (count && capable(CAP_SYS_ADMIN)) {
51268 char c;
51269
51270 if (get_user(c, buf))
51271diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
51272index c74a00a..02cf211a 100644
51273--- a/drivers/tty/tty_io.c
51274+++ b/drivers/tty/tty_io.c
51275@@ -3474,7 +3474,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
51276
51277 void tty_default_fops(struct file_operations *fops)
51278 {
51279- *fops = tty_fops;
51280+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
51281 }
51282
51283 /*
51284diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
51285index 6458e11..6cfc218 100644
51286--- a/drivers/tty/tty_ldisc.c
51287+++ b/drivers/tty/tty_ldisc.c
51288@@ -72,7 +72,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
51289 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
51290 tty_ldiscs[disc] = new_ldisc;
51291 new_ldisc->num = disc;
51292- new_ldisc->refcount = 0;
51293+ atomic_set(&new_ldisc->refcount, 0);
51294 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
51295
51296 return ret;
51297@@ -100,7 +100,7 @@ int tty_unregister_ldisc(int disc)
51298 return -EINVAL;
51299
51300 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
51301- if (tty_ldiscs[disc]->refcount)
51302+ if (atomic_read(&tty_ldiscs[disc]->refcount))
51303 ret = -EBUSY;
51304 else
51305 tty_ldiscs[disc] = NULL;
51306@@ -121,7 +121,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
51307 if (ldops) {
51308 ret = ERR_PTR(-EAGAIN);
51309 if (try_module_get(ldops->owner)) {
51310- ldops->refcount++;
51311+ atomic_inc(&ldops->refcount);
51312 ret = ldops;
51313 }
51314 }
51315@@ -134,7 +134,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
51316 unsigned long flags;
51317
51318 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
51319- ldops->refcount--;
51320+ atomic_dec(&ldops->refcount);
51321 module_put(ldops->owner);
51322 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
51323 }
51324diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
51325index c94d234..8210f2d 100644
51326--- a/drivers/tty/tty_port.c
51327+++ b/drivers/tty/tty_port.c
51328@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
51329 unsigned long flags;
51330
51331 spin_lock_irqsave(&port->lock, flags);
51332- port->count = 0;
51333+ atomic_set(&port->count, 0);
51334 port->flags &= ~ASYNC_NORMAL_ACTIVE;
51335 tty = port->tty;
51336 if (tty)
51337@@ -394,7 +394,7 @@ int tty_port_block_til_ready(struct tty_port *port,
51338 /* The port lock protects the port counts */
51339 spin_lock_irqsave(&port->lock, flags);
51340 if (!tty_hung_up_p(filp))
51341- port->count--;
51342+ atomic_dec(&port->count);
51343 port->blocked_open++;
51344 spin_unlock_irqrestore(&port->lock, flags);
51345
51346@@ -436,7 +436,7 @@ int tty_port_block_til_ready(struct tty_port *port,
51347 we must not mess that up further */
51348 spin_lock_irqsave(&port->lock, flags);
51349 if (!tty_hung_up_p(filp))
51350- port->count++;
51351+ atomic_inc(&port->count);
51352 port->blocked_open--;
51353 if (retval == 0)
51354 port->flags |= ASYNC_NORMAL_ACTIVE;
51355@@ -470,19 +470,19 @@ int tty_port_close_start(struct tty_port *port,
51356 return 0;
51357 }
51358
51359- if (tty->count == 1 && port->count != 1) {
51360+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
51361 printk(KERN_WARNING
51362 "tty_port_close_start: tty->count = 1 port count = %d.\n",
51363- port->count);
51364- port->count = 1;
51365+ atomic_read(&port->count));
51366+ atomic_set(&port->count, 1);
51367 }
51368- if (--port->count < 0) {
51369+ if (atomic_dec_return(&port->count) < 0) {
51370 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
51371- port->count);
51372- port->count = 0;
51373+ atomic_read(&port->count));
51374+ atomic_set(&port->count, 0);
51375 }
51376
51377- if (port->count) {
51378+ if (atomic_read(&port->count)) {
51379 spin_unlock_irqrestore(&port->lock, flags);
51380 return 0;
51381 }
51382@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
51383 {
51384 spin_lock_irq(&port->lock);
51385 if (!tty_hung_up_p(filp))
51386- ++port->count;
51387+ atomic_inc(&port->count);
51388 spin_unlock_irq(&port->lock);
51389 tty_port_tty_set(port, tty);
51390
51391diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
51392index d0e3a44..5f8b754 100644
51393--- a/drivers/tty/vt/keyboard.c
51394+++ b/drivers/tty/vt/keyboard.c
51395@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
51396 kbd->kbdmode == VC_OFF) &&
51397 value != KVAL(K_SAK))
51398 return; /* SAK is allowed even in raw mode */
51399+
51400+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
51401+ {
51402+ void *func = fn_handler[value];
51403+ if (func == fn_show_state || func == fn_show_ptregs ||
51404+ func == fn_show_mem)
51405+ return;
51406+ }
51407+#endif
51408+
51409 fn_handler[value](vc);
51410 }
51411
51412@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
51413 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
51414 return -EFAULT;
51415
51416- if (!capable(CAP_SYS_TTY_CONFIG))
51417- perm = 0;
51418-
51419 switch (cmd) {
51420 case KDGKBENT:
51421 /* Ensure another thread doesn't free it under us */
51422@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
51423 spin_unlock_irqrestore(&kbd_event_lock, flags);
51424 return put_user(val, &user_kbe->kb_value);
51425 case KDSKBENT:
51426+ if (!capable(CAP_SYS_TTY_CONFIG))
51427+ perm = 0;
51428+
51429 if (!perm)
51430 return -EPERM;
51431 if (!i && v == K_NOSUCHMAP) {
51432@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
51433 int i, j, k;
51434 int ret;
51435
51436- if (!capable(CAP_SYS_TTY_CONFIG))
51437- perm = 0;
51438-
51439 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
51440 if (!kbs) {
51441 ret = -ENOMEM;
51442@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
51443 kfree(kbs);
51444 return ((p && *p) ? -EOVERFLOW : 0);
51445 case KDSKBSENT:
51446+ if (!capable(CAP_SYS_TTY_CONFIG))
51447+ perm = 0;
51448+
51449 if (!perm) {
51450 ret = -EPERM;
51451 goto reterr;
51452diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
51453index a673e5b..36e5d32 100644
51454--- a/drivers/uio/uio.c
51455+++ b/drivers/uio/uio.c
51456@@ -25,6 +25,7 @@
51457 #include <linux/kobject.h>
51458 #include <linux/cdev.h>
51459 #include <linux/uio_driver.h>
51460+#include <asm/local.h>
51461
51462 #define UIO_MAX_DEVICES (1U << MINORBITS)
51463
51464@@ -32,7 +33,7 @@ struct uio_device {
51465 struct module *owner;
51466 struct device *dev;
51467 int minor;
51468- atomic_t event;
51469+ atomic_unchecked_t event;
51470 struct fasync_struct *async_queue;
51471 wait_queue_head_t wait;
51472 struct uio_info *info;
51473@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
51474 struct device_attribute *attr, char *buf)
51475 {
51476 struct uio_device *idev = dev_get_drvdata(dev);
51477- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
51478+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
51479 }
51480 static DEVICE_ATTR_RO(event);
51481
51482@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
51483 {
51484 struct uio_device *idev = info->uio_dev;
51485
51486- atomic_inc(&idev->event);
51487+ atomic_inc_unchecked(&idev->event);
51488 wake_up_interruptible(&idev->wait);
51489 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
51490 }
51491@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
51492 }
51493
51494 listener->dev = idev;
51495- listener->event_count = atomic_read(&idev->event);
51496+ listener->event_count = atomic_read_unchecked(&idev->event);
51497 filep->private_data = listener;
51498
51499 if (idev->info->open) {
51500@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
51501 return -EIO;
51502
51503 poll_wait(filep, &idev->wait, wait);
51504- if (listener->event_count != atomic_read(&idev->event))
51505+ if (listener->event_count != atomic_read_unchecked(&idev->event))
51506 return POLLIN | POLLRDNORM;
51507 return 0;
51508 }
51509@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
51510 do {
51511 set_current_state(TASK_INTERRUPTIBLE);
51512
51513- event_count = atomic_read(&idev->event);
51514+ event_count = atomic_read_unchecked(&idev->event);
51515 if (event_count != listener->event_count) {
51516 if (copy_to_user(buf, &event_count, count))
51517 retval = -EFAULT;
51518@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
51519 static int uio_find_mem_index(struct vm_area_struct *vma)
51520 {
51521 struct uio_device *idev = vma->vm_private_data;
51522+ unsigned long size;
51523
51524 if (vma->vm_pgoff < MAX_UIO_MAPS) {
51525- if (idev->info->mem[vma->vm_pgoff].size == 0)
51526+ size = idev->info->mem[vma->vm_pgoff].size;
51527+ if (size == 0)
51528+ return -1;
51529+ if (vma->vm_end - vma->vm_start > size)
51530 return -1;
51531 return (int)vma->vm_pgoff;
51532 }
51533@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
51534 idev->owner = owner;
51535 idev->info = info;
51536 init_waitqueue_head(&idev->wait);
51537- atomic_set(&idev->event, 0);
51538+ atomic_set_unchecked(&idev->event, 0);
51539
51540 ret = uio_get_minor(idev);
51541 if (ret)
51542diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
51543index 8a7eb77..c00402f 100644
51544--- a/drivers/usb/atm/cxacru.c
51545+++ b/drivers/usb/atm/cxacru.c
51546@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
51547 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
51548 if (ret < 2)
51549 return -EINVAL;
51550- if (index < 0 || index > 0x7f)
51551+ if (index > 0x7f)
51552 return -EINVAL;
51553 pos += tmp;
51554
51555diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
51556index 25a7bfc..57f3cf5 100644
51557--- a/drivers/usb/atm/usbatm.c
51558+++ b/drivers/usb/atm/usbatm.c
51559@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
51560 if (printk_ratelimit())
51561 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
51562 __func__, vpi, vci);
51563- atomic_inc(&vcc->stats->rx_err);
51564+ atomic_inc_unchecked(&vcc->stats->rx_err);
51565 return;
51566 }
51567
51568@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
51569 if (length > ATM_MAX_AAL5_PDU) {
51570 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
51571 __func__, length, vcc);
51572- atomic_inc(&vcc->stats->rx_err);
51573+ atomic_inc_unchecked(&vcc->stats->rx_err);
51574 goto out;
51575 }
51576
51577@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
51578 if (sarb->len < pdu_length) {
51579 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
51580 __func__, pdu_length, sarb->len, vcc);
51581- atomic_inc(&vcc->stats->rx_err);
51582+ atomic_inc_unchecked(&vcc->stats->rx_err);
51583 goto out;
51584 }
51585
51586 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
51587 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
51588 __func__, vcc);
51589- atomic_inc(&vcc->stats->rx_err);
51590+ atomic_inc_unchecked(&vcc->stats->rx_err);
51591 goto out;
51592 }
51593
51594@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
51595 if (printk_ratelimit())
51596 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
51597 __func__, length);
51598- atomic_inc(&vcc->stats->rx_drop);
51599+ atomic_inc_unchecked(&vcc->stats->rx_drop);
51600 goto out;
51601 }
51602
51603@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
51604
51605 vcc->push(vcc, skb);
51606
51607- atomic_inc(&vcc->stats->rx);
51608+ atomic_inc_unchecked(&vcc->stats->rx);
51609 out:
51610 skb_trim(sarb, 0);
51611 }
51612@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
51613 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
51614
51615 usbatm_pop(vcc, skb);
51616- atomic_inc(&vcc->stats->tx);
51617+ atomic_inc_unchecked(&vcc->stats->tx);
51618
51619 skb = skb_dequeue(&instance->sndqueue);
51620 }
51621@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
51622 if (!left--)
51623 return sprintf(page,
51624 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
51625- atomic_read(&atm_dev->stats.aal5.tx),
51626- atomic_read(&atm_dev->stats.aal5.tx_err),
51627- atomic_read(&atm_dev->stats.aal5.rx),
51628- atomic_read(&atm_dev->stats.aal5.rx_err),
51629- atomic_read(&atm_dev->stats.aal5.rx_drop));
51630+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
51631+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
51632+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
51633+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
51634+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
51635
51636 if (!left--) {
51637 if (instance->disconnected)
51638diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
51639index 2a3bbdf..91d72cf 100644
51640--- a/drivers/usb/core/devices.c
51641+++ b/drivers/usb/core/devices.c
51642@@ -126,7 +126,7 @@ static const char format_endpt[] =
51643 * time it gets called.
51644 */
51645 static struct device_connect_event {
51646- atomic_t count;
51647+ atomic_unchecked_t count;
51648 wait_queue_head_t wait;
51649 } device_event = {
51650 .count = ATOMIC_INIT(1),
51651@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
51652
51653 void usbfs_conn_disc_event(void)
51654 {
51655- atomic_add(2, &device_event.count);
51656+ atomic_add_unchecked(2, &device_event.count);
51657 wake_up(&device_event.wait);
51658 }
51659
51660@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
51661
51662 poll_wait(file, &device_event.wait, wait);
51663
51664- event_count = atomic_read(&device_event.count);
51665+ event_count = atomic_read_unchecked(&device_event.count);
51666 if (file->f_version != event_count) {
51667 file->f_version = event_count;
51668 return POLLIN | POLLRDNORM;
51669diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
51670index 967152a..16fa2e5 100644
51671--- a/drivers/usb/core/devio.c
51672+++ b/drivers/usb/core/devio.c
51673@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
51674 struct dev_state *ps = file->private_data;
51675 struct usb_device *dev = ps->dev;
51676 ssize_t ret = 0;
51677- unsigned len;
51678+ size_t len;
51679 loff_t pos;
51680 int i;
51681
51682@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
51683 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
51684 struct usb_config_descriptor *config =
51685 (struct usb_config_descriptor *)dev->rawdescriptors[i];
51686- unsigned int length = le16_to_cpu(config->wTotalLength);
51687+ size_t length = le16_to_cpu(config->wTotalLength);
51688
51689 if (*ppos < pos + length) {
51690
51691 /* The descriptor may claim to be longer than it
51692 * really is. Here is the actual allocated length. */
51693- unsigned alloclen =
51694+ size_t alloclen =
51695 le16_to_cpu(dev->config[i].desc.wTotalLength);
51696
51697- len = length - (*ppos - pos);
51698+ len = length + pos - *ppos;
51699 if (len > nbytes)
51700 len = nbytes;
51701
51702 /* Simply don't write (skip over) unallocated parts */
51703 if (alloclen > (*ppos - pos)) {
51704- alloclen -= (*ppos - pos);
51705+ alloclen = alloclen + pos - *ppos;
51706 if (copy_to_user(buf,
51707 dev->rawdescriptors[i] + (*ppos - pos),
51708 min(len, alloclen))) {
51709diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
51710index d39106c..bfe13a4 100644
51711--- a/drivers/usb/core/hcd.c
51712+++ b/drivers/usb/core/hcd.c
51713@@ -1549,7 +1549,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
51714 */
51715 usb_get_urb(urb);
51716 atomic_inc(&urb->use_count);
51717- atomic_inc(&urb->dev->urbnum);
51718+ atomic_inc_unchecked(&urb->dev->urbnum);
51719 usbmon_urb_submit(&hcd->self, urb);
51720
51721 /* NOTE requirements on root-hub callers (usbfs and the hub
51722@@ -1576,7 +1576,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
51723 urb->hcpriv = NULL;
51724 INIT_LIST_HEAD(&urb->urb_list);
51725 atomic_dec(&urb->use_count);
51726- atomic_dec(&urb->dev->urbnum);
51727+ atomic_dec_unchecked(&urb->dev->urbnum);
51728 if (atomic_read(&urb->reject))
51729 wake_up(&usb_kill_urb_queue);
51730 usb_put_urb(urb);
51731diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
51732index ebcd3bf..be93a64 100644
51733--- a/drivers/usb/core/hub.c
51734+++ b/drivers/usb/core/hub.c
51735@@ -27,6 +27,7 @@
51736 #include <linux/freezer.h>
51737 #include <linux/random.h>
51738 #include <linux/pm_qos.h>
51739+#include <linux/grsecurity.h>
51740
51741 #include <asm/uaccess.h>
51742 #include <asm/byteorder.h>
51743@@ -4437,6 +4438,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
51744 goto done;
51745 return;
51746 }
51747+
51748+ if (gr_handle_new_usb())
51749+ goto done;
51750+
51751 if (hub_is_superspeed(hub->hdev))
51752 unit_load = 150;
51753 else
51754diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
51755index bb31597..6c5ef8b 100644
51756--- a/drivers/usb/core/message.c
51757+++ b/drivers/usb/core/message.c
51758@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
51759 * Return: If successful, the number of bytes transferred. Otherwise, a negative
51760 * error number.
51761 */
51762-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
51763+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
51764 __u8 requesttype, __u16 value, __u16 index, void *data,
51765 __u16 size, int timeout)
51766 {
51767@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
51768 * If successful, 0. Otherwise a negative error number. The number of actual
51769 * bytes transferred will be stored in the @actual_length paramater.
51770 */
51771-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
51772+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
51773 void *data, int len, int *actual_length, int timeout)
51774 {
51775 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
51776@@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
51777 * bytes transferred will be stored in the @actual_length paramater.
51778 *
51779 */
51780-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
51781+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
51782 void *data, int len, int *actual_length, int timeout)
51783 {
51784 struct urb *urb;
51785diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
51786index 52a97ad..e73330f 100644
51787--- a/drivers/usb/core/sysfs.c
51788+++ b/drivers/usb/core/sysfs.c
51789@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
51790 struct usb_device *udev;
51791
51792 udev = to_usb_device(dev);
51793- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
51794+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
51795 }
51796 static DEVICE_ATTR_RO(urbnum);
51797
51798diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
51799index 4d11449..f4ccabf 100644
51800--- a/drivers/usb/core/usb.c
51801+++ b/drivers/usb/core/usb.c
51802@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
51803 set_dev_node(&dev->dev, dev_to_node(bus->controller));
51804 dev->state = USB_STATE_ATTACHED;
51805 dev->lpm_disable_count = 1;
51806- atomic_set(&dev->urbnum, 0);
51807+ atomic_set_unchecked(&dev->urbnum, 0);
51808
51809 INIT_LIST_HEAD(&dev->ep0.urb_list);
51810 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
51811diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
51812index 02e44fc..3c4fe64 100644
51813--- a/drivers/usb/dwc3/gadget.c
51814+++ b/drivers/usb/dwc3/gadget.c
51815@@ -532,8 +532,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
51816 if (!usb_endpoint_xfer_isoc(desc))
51817 return 0;
51818
51819- memset(&trb_link, 0, sizeof(trb_link));
51820-
51821 /* Link TRB for ISOC. The HWO bit is never reset */
51822 trb_st_hw = &dep->trb_pool[0];
51823
51824diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
51825index 8cfc319..4868255 100644
51826--- a/drivers/usb/early/ehci-dbgp.c
51827+++ b/drivers/usb/early/ehci-dbgp.c
51828@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
51829
51830 #ifdef CONFIG_KGDB
51831 static struct kgdb_io kgdbdbgp_io_ops;
51832-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
51833+static struct kgdb_io kgdbdbgp_io_ops_console;
51834+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
51835 #else
51836 #define dbgp_kgdb_mode (0)
51837 #endif
51838@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
51839 .write_char = kgdbdbgp_write_char,
51840 };
51841
51842+static struct kgdb_io kgdbdbgp_io_ops_console = {
51843+ .name = "kgdbdbgp",
51844+ .read_char = kgdbdbgp_read_char,
51845+ .write_char = kgdbdbgp_write_char,
51846+ .is_console = 1
51847+};
51848+
51849 static int kgdbdbgp_wait_time;
51850
51851 static int __init kgdbdbgp_parse_config(char *str)
51852@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
51853 ptr++;
51854 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
51855 }
51856- kgdb_register_io_module(&kgdbdbgp_io_ops);
51857- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
51858+ if (early_dbgp_console.index != -1)
51859+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
51860+ else
51861+ kgdb_register_io_module(&kgdbdbgp_io_ops);
51862
51863 return 0;
51864 }
51865diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
51866index b369292..9f3ba40 100644
51867--- a/drivers/usb/gadget/u_serial.c
51868+++ b/drivers/usb/gadget/u_serial.c
51869@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
51870 spin_lock_irq(&port->port_lock);
51871
51872 /* already open? Great. */
51873- if (port->port.count) {
51874+ if (atomic_read(&port->port.count)) {
51875 status = 0;
51876- port->port.count++;
51877+ atomic_inc(&port->port.count);
51878
51879 /* currently opening/closing? wait ... */
51880 } else if (port->openclose) {
51881@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
51882 tty->driver_data = port;
51883 port->port.tty = tty;
51884
51885- port->port.count = 1;
51886+ atomic_set(&port->port.count, 1);
51887 port->openclose = false;
51888
51889 /* if connected, start the I/O stream */
51890@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
51891
51892 spin_lock_irq(&port->port_lock);
51893
51894- if (port->port.count != 1) {
51895- if (port->port.count == 0)
51896+ if (atomic_read(&port->port.count) != 1) {
51897+ if (atomic_read(&port->port.count) == 0)
51898 WARN_ON(1);
51899 else
51900- --port->port.count;
51901+ atomic_dec(&port->port.count);
51902 goto exit;
51903 }
51904
51905@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
51906 * and sleep if necessary
51907 */
51908 port->openclose = true;
51909- port->port.count = 0;
51910+ atomic_set(&port->port.count, 0);
51911
51912 gser = port->port_usb;
51913 if (gser && gser->disconnect)
51914@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
51915 int cond;
51916
51917 spin_lock_irq(&port->port_lock);
51918- cond = (port->port.count == 0) && !port->openclose;
51919+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
51920 spin_unlock_irq(&port->port_lock);
51921 return cond;
51922 }
51923@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
51924 /* if it's already open, start I/O ... and notify the serial
51925 * protocol about open/close status (connect/disconnect).
51926 */
51927- if (port->port.count) {
51928+ if (atomic_read(&port->port.count)) {
51929 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
51930 gs_start_io(port);
51931 if (gser->connect)
51932@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
51933
51934 port->port_usb = NULL;
51935 gser->ioport = NULL;
51936- if (port->port.count > 0 || port->openclose) {
51937+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
51938 wake_up_interruptible(&port->drain_wait);
51939 if (port->port.tty)
51940 tty_hangup(port->port.tty);
51941@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
51942
51943 /* finally, free any unused/unusable I/O buffers */
51944 spin_lock_irqsave(&port->port_lock, flags);
51945- if (port->port.count == 0 && !port->openclose)
51946+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
51947 gs_buf_free(&port->port_write_buf);
51948 gs_free_requests(gser->out, &port->read_pool, NULL);
51949 gs_free_requests(gser->out, &port->read_queue, NULL);
51950diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
51951index 1bb85be..29e28d9 100644
51952--- a/drivers/usb/host/ehci-hub.c
51953+++ b/drivers/usb/host/ehci-hub.c
51954@@ -780,7 +780,7 @@ static struct urb *request_single_step_set_feature_urb(
51955 urb->transfer_flags = URB_DIR_IN;
51956 usb_get_urb(urb);
51957 atomic_inc(&urb->use_count);
51958- atomic_inc(&urb->dev->urbnum);
51959+ atomic_inc_unchecked(&urb->dev->urbnum);
51960 urb->setup_dma = dma_map_single(
51961 hcd->self.controller,
51962 urb->setup_packet,
51963@@ -847,7 +847,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
51964 urb->status = -EINPROGRESS;
51965 usb_get_urb(urb);
51966 atomic_inc(&urb->use_count);
51967- atomic_inc(&urb->dev->urbnum);
51968+ atomic_inc_unchecked(&urb->dev->urbnum);
51969 retval = submit_single_step_set_feature(hcd, urb, 0);
51970 if (!retval && !wait_for_completion_timeout(&done,
51971 msecs_to_jiffies(2000))) {
51972diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
51973index ba6a5d6..f88f7f3 100644
51974--- a/drivers/usb/misc/appledisplay.c
51975+++ b/drivers/usb/misc/appledisplay.c
51976@@ -83,7 +83,7 @@ struct appledisplay {
51977 spinlock_t lock;
51978 };
51979
51980-static atomic_t count_displays = ATOMIC_INIT(0);
51981+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
51982 static struct workqueue_struct *wq;
51983
51984 static void appledisplay_complete(struct urb *urb)
51985@@ -281,7 +281,7 @@ static int appledisplay_probe(struct usb_interface *iface,
51986
51987 /* Register backlight device */
51988 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
51989- atomic_inc_return(&count_displays) - 1);
51990+ atomic_inc_return_unchecked(&count_displays) - 1);
51991 memset(&props, 0, sizeof(struct backlight_properties));
51992 props.type = BACKLIGHT_RAW;
51993 props.max_brightness = 0xff;
51994diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
51995index c69bb50..215ef37 100644
51996--- a/drivers/usb/serial/console.c
51997+++ b/drivers/usb/serial/console.c
51998@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
51999
52000 info->port = port;
52001
52002- ++port->port.count;
52003+ atomic_inc(&port->port.count);
52004 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
52005 if (serial->type->set_termios) {
52006 /*
52007@@ -170,7 +170,7 @@ static int usb_console_setup(struct console *co, char *options)
52008 }
52009 /* Now that any required fake tty operations are completed restore
52010 * the tty port count */
52011- --port->port.count;
52012+ atomic_dec(&port->port.count);
52013 /* The console is special in terms of closing the device so
52014 * indicate this port is now acting as a system console. */
52015 port->port.console = 1;
52016@@ -183,7 +183,7 @@ static int usb_console_setup(struct console *co, char *options)
52017 free_tty:
52018 kfree(tty);
52019 reset_open_count:
52020- port->port.count = 0;
52021+ atomic_set(&port->port.count, 0);
52022 usb_autopm_put_interface(serial->interface);
52023 error_get_interface:
52024 usb_serial_put(serial);
52025@@ -194,7 +194,7 @@ static int usb_console_setup(struct console *co, char *options)
52026 static void usb_console_write(struct console *co,
52027 const char *buf, unsigned count)
52028 {
52029- static struct usbcons_info *info = &usbcons_info;
52030+ struct usbcons_info *info = &usbcons_info;
52031 struct usb_serial_port *port = info->port;
52032 struct usb_serial *serial;
52033 int retval = -ENODEV;
52034diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
52035index 75f70f0..d467e1a 100644
52036--- a/drivers/usb/storage/usb.h
52037+++ b/drivers/usb/storage/usb.h
52038@@ -63,7 +63,7 @@ struct us_unusual_dev {
52039 __u8 useProtocol;
52040 __u8 useTransport;
52041 int (*initFunction)(struct us_data *);
52042-};
52043+} __do_const;
52044
52045
52046 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
52047diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
52048index e614f02..3fd60e2 100644
52049--- a/drivers/usb/wusbcore/wa-hc.h
52050+++ b/drivers/usb/wusbcore/wa-hc.h
52051@@ -225,7 +225,7 @@ struct wahc {
52052 spinlock_t xfer_list_lock;
52053 struct work_struct xfer_enqueue_work;
52054 struct work_struct xfer_error_work;
52055- atomic_t xfer_id_count;
52056+ atomic_unchecked_t xfer_id_count;
52057
52058 kernel_ulong_t quirks;
52059 };
52060@@ -287,7 +287,7 @@ static inline void wa_init(struct wahc *wa)
52061 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
52062 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
52063 wa->dto_in_use = 0;
52064- atomic_set(&wa->xfer_id_count, 1);
52065+ atomic_set_unchecked(&wa->xfer_id_count, 1);
52066 }
52067
52068 /**
52069diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
52070index ed5abe8..7036400 100644
52071--- a/drivers/usb/wusbcore/wa-xfer.c
52072+++ b/drivers/usb/wusbcore/wa-xfer.c
52073@@ -312,7 +312,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
52074 */
52075 static void wa_xfer_id_init(struct wa_xfer *xfer)
52076 {
52077- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
52078+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
52079 }
52080
52081 /* Return the xfer's ID. */
52082diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
52083index 1eab4ac..e21efc9 100644
52084--- a/drivers/vfio/vfio.c
52085+++ b/drivers/vfio/vfio.c
52086@@ -488,7 +488,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
52087 return 0;
52088
52089 /* TODO Prevent device auto probing */
52090- WARN("Device %s added to live group %d!\n", dev_name(dev),
52091+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
52092 iommu_group_id(group->iommu_group));
52093
52094 return 0;
52095diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
52096index 5174eba..451e6bc 100644
52097--- a/drivers/vhost/vringh.c
52098+++ b/drivers/vhost/vringh.c
52099@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
52100 /* Userspace access helpers: in this case, addresses are really userspace. */
52101 static inline int getu16_user(u16 *val, const u16 *p)
52102 {
52103- return get_user(*val, (__force u16 __user *)p);
52104+ return get_user(*val, (u16 __force_user *)p);
52105 }
52106
52107 static inline int putu16_user(u16 *p, u16 val)
52108 {
52109- return put_user(val, (__force u16 __user *)p);
52110+ return put_user(val, (u16 __force_user *)p);
52111 }
52112
52113 static inline int copydesc_user(void *dst, const void *src, size_t len)
52114 {
52115- return copy_from_user(dst, (__force void __user *)src, len) ?
52116+ return copy_from_user(dst, (void __force_user *)src, len) ?
52117 -EFAULT : 0;
52118 }
52119
52120@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
52121 const struct vring_used_elem *src,
52122 unsigned int num)
52123 {
52124- return copy_to_user((__force void __user *)dst, src,
52125+ return copy_to_user((void __force_user *)dst, src,
52126 sizeof(*dst) * num) ? -EFAULT : 0;
52127 }
52128
52129 static inline int xfer_from_user(void *src, void *dst, size_t len)
52130 {
52131- return copy_from_user(dst, (__force void __user *)src, len) ?
52132+ return copy_from_user(dst, (void __force_user *)src, len) ?
52133 -EFAULT : 0;
52134 }
52135
52136 static inline int xfer_to_user(void *dst, void *src, size_t len)
52137 {
52138- return copy_to_user((__force void __user *)dst, src, len) ?
52139+ return copy_to_user((void __force_user *)dst, src, len) ?
52140 -EFAULT : 0;
52141 }
52142
52143@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
52144 vrh->last_used_idx = 0;
52145 vrh->vring.num = num;
52146 /* vring expects kernel addresses, but only used via accessors. */
52147- vrh->vring.desc = (__force struct vring_desc *)desc;
52148- vrh->vring.avail = (__force struct vring_avail *)avail;
52149- vrh->vring.used = (__force struct vring_used *)used;
52150+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
52151+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
52152+ vrh->vring.used = (__force_kernel struct vring_used *)used;
52153 return 0;
52154 }
52155 EXPORT_SYMBOL(vringh_init_user);
52156@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
52157
52158 static inline int putu16_kern(u16 *p, u16 val)
52159 {
52160- ACCESS_ONCE(*p) = val;
52161+ ACCESS_ONCE_RW(*p) = val;
52162 return 0;
52163 }
52164
52165diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
52166index 1b0b233..6f34c2c 100644
52167--- a/drivers/video/arcfb.c
52168+++ b/drivers/video/arcfb.c
52169@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
52170 return -ENOSPC;
52171
52172 err = 0;
52173- if ((count + p) > fbmemlength) {
52174+ if (count > (fbmemlength - p)) {
52175 count = fbmemlength - p;
52176 err = -ENOSPC;
52177 }
52178diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
52179index 12ca031..84a8a74 100644
52180--- a/drivers/video/aty/aty128fb.c
52181+++ b/drivers/video/aty/aty128fb.c
52182@@ -149,7 +149,7 @@ enum {
52183 };
52184
52185 /* Must match above enum */
52186-static char * const r128_family[] = {
52187+static const char * const r128_family[] = {
52188 "AGP",
52189 "PCI",
52190 "PRO AGP",
52191diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
52192index 28fafbf..ae91651 100644
52193--- a/drivers/video/aty/atyfb_base.c
52194+++ b/drivers/video/aty/atyfb_base.c
52195@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
52196 par->accel_flags = var->accel_flags; /* hack */
52197
52198 if (var->accel_flags) {
52199- info->fbops->fb_sync = atyfb_sync;
52200+ pax_open_kernel();
52201+ *(void **)&info->fbops->fb_sync = atyfb_sync;
52202+ pax_close_kernel();
52203 info->flags &= ~FBINFO_HWACCEL_DISABLED;
52204 } else {
52205- info->fbops->fb_sync = NULL;
52206+ pax_open_kernel();
52207+ *(void **)&info->fbops->fb_sync = NULL;
52208+ pax_close_kernel();
52209 info->flags |= FBINFO_HWACCEL_DISABLED;
52210 }
52211
52212diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
52213index 95ec042..e6affdd 100644
52214--- a/drivers/video/aty/mach64_cursor.c
52215+++ b/drivers/video/aty/mach64_cursor.c
52216@@ -7,6 +7,7 @@
52217 #include <linux/string.h>
52218
52219 #include <asm/io.h>
52220+#include <asm/pgtable.h>
52221
52222 #ifdef __sparc__
52223 #include <asm/fbio.h>
52224@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
52225 info->sprite.buf_align = 16; /* and 64 lines tall. */
52226 info->sprite.flags = FB_PIXMAP_IO;
52227
52228- info->fbops->fb_cursor = atyfb_cursor;
52229+ pax_open_kernel();
52230+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
52231+ pax_close_kernel();
52232
52233 return 0;
52234 }
52235diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
52236index 7592cc2..92feb56 100644
52237--- a/drivers/video/backlight/kb3886_bl.c
52238+++ b/drivers/video/backlight/kb3886_bl.c
52239@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
52240 static unsigned long kb3886bl_flags;
52241 #define KB3886BL_SUSPENDED 0x01
52242
52243-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
52244+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
52245 {
52246 .ident = "Sahara Touch-iT",
52247 .matches = {
52248diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
52249index 900aa4e..6d49418 100644
52250--- a/drivers/video/fb_defio.c
52251+++ b/drivers/video/fb_defio.c
52252@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
52253
52254 BUG_ON(!fbdefio);
52255 mutex_init(&fbdefio->lock);
52256- info->fbops->fb_mmap = fb_deferred_io_mmap;
52257+ pax_open_kernel();
52258+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
52259+ pax_close_kernel();
52260 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
52261 INIT_LIST_HEAD(&fbdefio->pagelist);
52262 if (fbdefio->delay == 0) /* set a default of 1 s */
52263@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
52264 page->mapping = NULL;
52265 }
52266
52267- info->fbops->fb_mmap = NULL;
52268+ *(void **)&info->fbops->fb_mmap = NULL;
52269 mutex_destroy(&fbdefio->lock);
52270 }
52271 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
52272diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
52273index 010d191..7b8235a 100644
52274--- a/drivers/video/fbmem.c
52275+++ b/drivers/video/fbmem.c
52276@@ -433,7 +433,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
52277 image->dx += image->width + 8;
52278 }
52279 } else if (rotate == FB_ROTATE_UD) {
52280- for (x = 0; x < num && image->dx >= 0; x++) {
52281+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
52282 info->fbops->fb_imageblit(info, image);
52283 image->dx -= image->width + 8;
52284 }
52285@@ -445,7 +445,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
52286 image->dy += image->height + 8;
52287 }
52288 } else if (rotate == FB_ROTATE_CCW) {
52289- for (x = 0; x < num && image->dy >= 0; x++) {
52290+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
52291 info->fbops->fb_imageblit(info, image);
52292 image->dy -= image->height + 8;
52293 }
52294@@ -1179,7 +1179,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
52295 return -EFAULT;
52296 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
52297 return -EINVAL;
52298- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
52299+ if (con2fb.framebuffer >= FB_MAX)
52300 return -EINVAL;
52301 if (!registered_fb[con2fb.framebuffer])
52302 request_module("fb%d", con2fb.framebuffer);
52303@@ -1300,7 +1300,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
52304 __u32 data;
52305 int err;
52306
52307- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
52308+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
52309
52310 data = (__u32) (unsigned long) fix->smem_start;
52311 err |= put_user(data, &fix32->smem_start);
52312diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
52313index 130708f..cdac1a9 100644
52314--- a/drivers/video/hyperv_fb.c
52315+++ b/drivers/video/hyperv_fb.c
52316@@ -233,7 +233,7 @@ static uint screen_fb_size;
52317 static inline int synthvid_send(struct hv_device *hdev,
52318 struct synthvid_msg *msg)
52319 {
52320- static atomic64_t request_id = ATOMIC64_INIT(0);
52321+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
52322 int ret;
52323
52324 msg->pipe_hdr.type = PIPE_MSG_DATA;
52325@@ -241,7 +241,7 @@ static inline int synthvid_send(struct hv_device *hdev,
52326
52327 ret = vmbus_sendpacket(hdev->channel, msg,
52328 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
52329- atomic64_inc_return(&request_id),
52330+ atomic64_inc_return_unchecked(&request_id),
52331 VM_PKT_DATA_INBAND, 0);
52332
52333 if (ret)
52334diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
52335index 7672d2e..b56437f 100644
52336--- a/drivers/video/i810/i810_accel.c
52337+++ b/drivers/video/i810/i810_accel.c
52338@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
52339 }
52340 }
52341 printk("ringbuffer lockup!!!\n");
52342+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
52343 i810_report_error(mmio);
52344 par->dev_flags |= LOCKUP;
52345 info->pixmap.scan_align = 1;
52346diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
52347index 3c14e43..2630570 100644
52348--- a/drivers/video/logo/logo_linux_clut224.ppm
52349+++ b/drivers/video/logo/logo_linux_clut224.ppm
52350@@ -2,1603 +2,1123 @@ P3
52351 # Standard 224-color Linux logo
52352 80 80
52353 255
52354- 0 0 0 0 0 0 0 0 0 0 0 0
52355- 0 0 0 0 0 0 0 0 0 0 0 0
52356- 0 0 0 0 0 0 0 0 0 0 0 0
52357- 0 0 0 0 0 0 0 0 0 0 0 0
52358- 0 0 0 0 0 0 0 0 0 0 0 0
52359- 0 0 0 0 0 0 0 0 0 0 0 0
52360- 0 0 0 0 0 0 0 0 0 0 0 0
52361- 0 0 0 0 0 0 0 0 0 0 0 0
52362- 0 0 0 0 0 0 0 0 0 0 0 0
52363- 6 6 6 6 6 6 10 10 10 10 10 10
52364- 10 10 10 6 6 6 6 6 6 6 6 6
52365- 0 0 0 0 0 0 0 0 0 0 0 0
52366- 0 0 0 0 0 0 0 0 0 0 0 0
52367- 0 0 0 0 0 0 0 0 0 0 0 0
52368- 0 0 0 0 0 0 0 0 0 0 0 0
52369- 0 0 0 0 0 0 0 0 0 0 0 0
52370- 0 0 0 0 0 0 0 0 0 0 0 0
52371- 0 0 0 0 0 0 0 0 0 0 0 0
52372- 0 0 0 0 0 0 0 0 0 0 0 0
52373- 0 0 0 0 0 0 0 0 0 0 0 0
52374- 0 0 0 0 0 0 0 0 0 0 0 0
52375- 0 0 0 0 0 0 0 0 0 0 0 0
52376- 0 0 0 0 0 0 0 0 0 0 0 0
52377- 0 0 0 0 0 0 0 0 0 0 0 0
52378- 0 0 0 0 0 0 0 0 0 0 0 0
52379- 0 0 0 0 0 0 0 0 0 0 0 0
52380- 0 0 0 0 0 0 0 0 0 0 0 0
52381- 0 0 0 0 0 0 0 0 0 0 0 0
52382- 0 0 0 6 6 6 10 10 10 14 14 14
52383- 22 22 22 26 26 26 30 30 30 34 34 34
52384- 30 30 30 30 30 30 26 26 26 18 18 18
52385- 14 14 14 10 10 10 6 6 6 0 0 0
52386- 0 0 0 0 0 0 0 0 0 0 0 0
52387- 0 0 0 0 0 0 0 0 0 0 0 0
52388- 0 0 0 0 0 0 0 0 0 0 0 0
52389- 0 0 0 0 0 0 0 0 0 0 0 0
52390- 0 0 0 0 0 0 0 0 0 0 0 0
52391- 0 0 0 0 0 0 0 0 0 0 0 0
52392- 0 0 0 0 0 0 0 0 0 0 0 0
52393- 0 0 0 0 0 0 0 0 0 0 0 0
52394- 0 0 0 0 0 0 0 0 0 0 0 0
52395- 0 0 0 0 0 1 0 0 1 0 0 0
52396- 0 0 0 0 0 0 0 0 0 0 0 0
52397- 0 0 0 0 0 0 0 0 0 0 0 0
52398- 0 0 0 0 0 0 0 0 0 0 0 0
52399- 0 0 0 0 0 0 0 0 0 0 0 0
52400- 0 0 0 0 0 0 0 0 0 0 0 0
52401- 0 0 0 0 0 0 0 0 0 0 0 0
52402- 6 6 6 14 14 14 26 26 26 42 42 42
52403- 54 54 54 66 66 66 78 78 78 78 78 78
52404- 78 78 78 74 74 74 66 66 66 54 54 54
52405- 42 42 42 26 26 26 18 18 18 10 10 10
52406- 6 6 6 0 0 0 0 0 0 0 0 0
52407- 0 0 0 0 0 0 0 0 0 0 0 0
52408- 0 0 0 0 0 0 0 0 0 0 0 0
52409- 0 0 0 0 0 0 0 0 0 0 0 0
52410- 0 0 0 0 0 0 0 0 0 0 0 0
52411- 0 0 0 0 0 0 0 0 0 0 0 0
52412- 0 0 0 0 0 0 0 0 0 0 0 0
52413- 0 0 0 0 0 0 0 0 0 0 0 0
52414- 0 0 0 0 0 0 0 0 0 0 0 0
52415- 0 0 1 0 0 0 0 0 0 0 0 0
52416- 0 0 0 0 0 0 0 0 0 0 0 0
52417- 0 0 0 0 0 0 0 0 0 0 0 0
52418- 0 0 0 0 0 0 0 0 0 0 0 0
52419- 0 0 0 0 0 0 0 0 0 0 0 0
52420- 0 0 0 0 0 0 0 0 0 0 0 0
52421- 0 0 0 0 0 0 0 0 0 10 10 10
52422- 22 22 22 42 42 42 66 66 66 86 86 86
52423- 66 66 66 38 38 38 38 38 38 22 22 22
52424- 26 26 26 34 34 34 54 54 54 66 66 66
52425- 86 86 86 70 70 70 46 46 46 26 26 26
52426- 14 14 14 6 6 6 0 0 0 0 0 0
52427- 0 0 0 0 0 0 0 0 0 0 0 0
52428- 0 0 0 0 0 0 0 0 0 0 0 0
52429- 0 0 0 0 0 0 0 0 0 0 0 0
52430- 0 0 0 0 0 0 0 0 0 0 0 0
52431- 0 0 0 0 0 0 0 0 0 0 0 0
52432- 0 0 0 0 0 0 0 0 0 0 0 0
52433- 0 0 0 0 0 0 0 0 0 0 0 0
52434- 0 0 0 0 0 0 0 0 0 0 0 0
52435- 0 0 1 0 0 1 0 0 1 0 0 0
52436- 0 0 0 0 0 0 0 0 0 0 0 0
52437- 0 0 0 0 0 0 0 0 0 0 0 0
52438- 0 0 0 0 0 0 0 0 0 0 0 0
52439- 0 0 0 0 0 0 0 0 0 0 0 0
52440- 0 0 0 0 0 0 0 0 0 0 0 0
52441- 0 0 0 0 0 0 10 10 10 26 26 26
52442- 50 50 50 82 82 82 58 58 58 6 6 6
52443- 2 2 6 2 2 6 2 2 6 2 2 6
52444- 2 2 6 2 2 6 2 2 6 2 2 6
52445- 6 6 6 54 54 54 86 86 86 66 66 66
52446- 38 38 38 18 18 18 6 6 6 0 0 0
52447- 0 0 0 0 0 0 0 0 0 0 0 0
52448- 0 0 0 0 0 0 0 0 0 0 0 0
52449- 0 0 0 0 0 0 0 0 0 0 0 0
52450- 0 0 0 0 0 0 0 0 0 0 0 0
52451- 0 0 0 0 0 0 0 0 0 0 0 0
52452- 0 0 0 0 0 0 0 0 0 0 0 0
52453- 0 0 0 0 0 0 0 0 0 0 0 0
52454- 0 0 0 0 0 0 0 0 0 0 0 0
52455- 0 0 0 0 0 0 0 0 0 0 0 0
52456- 0 0 0 0 0 0 0 0 0 0 0 0
52457- 0 0 0 0 0 0 0 0 0 0 0 0
52458- 0 0 0 0 0 0 0 0 0 0 0 0
52459- 0 0 0 0 0 0 0 0 0 0 0 0
52460- 0 0 0 0 0 0 0 0 0 0 0 0
52461- 0 0 0 6 6 6 22 22 22 50 50 50
52462- 78 78 78 34 34 34 2 2 6 2 2 6
52463- 2 2 6 2 2 6 2 2 6 2 2 6
52464- 2 2 6 2 2 6 2 2 6 2 2 6
52465- 2 2 6 2 2 6 6 6 6 70 70 70
52466- 78 78 78 46 46 46 22 22 22 6 6 6
52467- 0 0 0 0 0 0 0 0 0 0 0 0
52468- 0 0 0 0 0 0 0 0 0 0 0 0
52469- 0 0 0 0 0 0 0 0 0 0 0 0
52470- 0 0 0 0 0 0 0 0 0 0 0 0
52471- 0 0 0 0 0 0 0 0 0 0 0 0
52472- 0 0 0 0 0 0 0 0 0 0 0 0
52473- 0 0 0 0 0 0 0 0 0 0 0 0
52474- 0 0 0 0 0 0 0 0 0 0 0 0
52475- 0 0 1 0 0 1 0 0 1 0 0 0
52476- 0 0 0 0 0 0 0 0 0 0 0 0
52477- 0 0 0 0 0 0 0 0 0 0 0 0
52478- 0 0 0 0 0 0 0 0 0 0 0 0
52479- 0 0 0 0 0 0 0 0 0 0 0 0
52480- 0 0 0 0 0 0 0 0 0 0 0 0
52481- 6 6 6 18 18 18 42 42 42 82 82 82
52482- 26 26 26 2 2 6 2 2 6 2 2 6
52483- 2 2 6 2 2 6 2 2 6 2 2 6
52484- 2 2 6 2 2 6 2 2 6 14 14 14
52485- 46 46 46 34 34 34 6 6 6 2 2 6
52486- 42 42 42 78 78 78 42 42 42 18 18 18
52487- 6 6 6 0 0 0 0 0 0 0 0 0
52488- 0 0 0 0 0 0 0 0 0 0 0 0
52489- 0 0 0 0 0 0 0 0 0 0 0 0
52490- 0 0 0 0 0 0 0 0 0 0 0 0
52491- 0 0 0 0 0 0 0 0 0 0 0 0
52492- 0 0 0 0 0 0 0 0 0 0 0 0
52493- 0 0 0 0 0 0 0 0 0 0 0 0
52494- 0 0 0 0 0 0 0 0 0 0 0 0
52495- 0 0 1 0 0 0 0 0 1 0 0 0
52496- 0 0 0 0 0 0 0 0 0 0 0 0
52497- 0 0 0 0 0 0 0 0 0 0 0 0
52498- 0 0 0 0 0 0 0 0 0 0 0 0
52499- 0 0 0 0 0 0 0 0 0 0 0 0
52500- 0 0 0 0 0 0 0 0 0 0 0 0
52501- 10 10 10 30 30 30 66 66 66 58 58 58
52502- 2 2 6 2 2 6 2 2 6 2 2 6
52503- 2 2 6 2 2 6 2 2 6 2 2 6
52504- 2 2 6 2 2 6 2 2 6 26 26 26
52505- 86 86 86 101 101 101 46 46 46 10 10 10
52506- 2 2 6 58 58 58 70 70 70 34 34 34
52507- 10 10 10 0 0 0 0 0 0 0 0 0
52508- 0 0 0 0 0 0 0 0 0 0 0 0
52509- 0 0 0 0 0 0 0 0 0 0 0 0
52510- 0 0 0 0 0 0 0 0 0 0 0 0
52511- 0 0 0 0 0 0 0 0 0 0 0 0
52512- 0 0 0 0 0 0 0 0 0 0 0 0
52513- 0 0 0 0 0 0 0 0 0 0 0 0
52514- 0 0 0 0 0 0 0 0 0 0 0 0
52515- 0 0 1 0 0 1 0 0 1 0 0 0
52516- 0 0 0 0 0 0 0 0 0 0 0 0
52517- 0 0 0 0 0 0 0 0 0 0 0 0
52518- 0 0 0 0 0 0 0 0 0 0 0 0
52519- 0 0 0 0 0 0 0 0 0 0 0 0
52520- 0 0 0 0 0 0 0 0 0 0 0 0
52521- 14 14 14 42 42 42 86 86 86 10 10 10
52522- 2 2 6 2 2 6 2 2 6 2 2 6
52523- 2 2 6 2 2 6 2 2 6 2 2 6
52524- 2 2 6 2 2 6 2 2 6 30 30 30
52525- 94 94 94 94 94 94 58 58 58 26 26 26
52526- 2 2 6 6 6 6 78 78 78 54 54 54
52527- 22 22 22 6 6 6 0 0 0 0 0 0
52528- 0 0 0 0 0 0 0 0 0 0 0 0
52529- 0 0 0 0 0 0 0 0 0 0 0 0
52530- 0 0 0 0 0 0 0 0 0 0 0 0
52531- 0 0 0 0 0 0 0 0 0 0 0 0
52532- 0 0 0 0 0 0 0 0 0 0 0 0
52533- 0 0 0 0 0 0 0 0 0 0 0 0
52534- 0 0 0 0 0 0 0 0 0 0 0 0
52535- 0 0 0 0 0 0 0 0 0 0 0 0
52536- 0 0 0 0 0 0 0 0 0 0 0 0
52537- 0 0 0 0 0 0 0 0 0 0 0 0
52538- 0 0 0 0 0 0 0 0 0 0 0 0
52539- 0 0 0 0 0 0 0 0 0 0 0 0
52540- 0 0 0 0 0 0 0 0 0 6 6 6
52541- 22 22 22 62 62 62 62 62 62 2 2 6
52542- 2 2 6 2 2 6 2 2 6 2 2 6
52543- 2 2 6 2 2 6 2 2 6 2 2 6
52544- 2 2 6 2 2 6 2 2 6 26 26 26
52545- 54 54 54 38 38 38 18 18 18 10 10 10
52546- 2 2 6 2 2 6 34 34 34 82 82 82
52547- 38 38 38 14 14 14 0 0 0 0 0 0
52548- 0 0 0 0 0 0 0 0 0 0 0 0
52549- 0 0 0 0 0 0 0 0 0 0 0 0
52550- 0 0 0 0 0 0 0 0 0 0 0 0
52551- 0 0 0 0 0 0 0 0 0 0 0 0
52552- 0 0 0 0 0 0 0 0 0 0 0 0
52553- 0 0 0 0 0 0 0 0 0 0 0 0
52554- 0 0 0 0 0 0 0 0 0 0 0 0
52555- 0 0 0 0 0 1 0 0 1 0 0 0
52556- 0 0 0 0 0 0 0 0 0 0 0 0
52557- 0 0 0 0 0 0 0 0 0 0 0 0
52558- 0 0 0 0 0 0 0 0 0 0 0 0
52559- 0 0 0 0 0 0 0 0 0 0 0 0
52560- 0 0 0 0 0 0 0 0 0 6 6 6
52561- 30 30 30 78 78 78 30 30 30 2 2 6
52562- 2 2 6 2 2 6 2 2 6 2 2 6
52563- 2 2 6 2 2 6 2 2 6 2 2 6
52564- 2 2 6 2 2 6 2 2 6 10 10 10
52565- 10 10 10 2 2 6 2 2 6 2 2 6
52566- 2 2 6 2 2 6 2 2 6 78 78 78
52567- 50 50 50 18 18 18 6 6 6 0 0 0
52568- 0 0 0 0 0 0 0 0 0 0 0 0
52569- 0 0 0 0 0 0 0 0 0 0 0 0
52570- 0 0 0 0 0 0 0 0 0 0 0 0
52571- 0 0 0 0 0 0 0 0 0 0 0 0
52572- 0 0 0 0 0 0 0 0 0 0 0 0
52573- 0 0 0 0 0 0 0 0 0 0 0 0
52574- 0 0 0 0 0 0 0 0 0 0 0 0
52575- 0 0 1 0 0 0 0 0 0 0 0 0
52576- 0 0 0 0 0 0 0 0 0 0 0 0
52577- 0 0 0 0 0 0 0 0 0 0 0 0
52578- 0 0 0 0 0 0 0 0 0 0 0 0
52579- 0 0 0 0 0 0 0 0 0 0 0 0
52580- 0 0 0 0 0 0 0 0 0 10 10 10
52581- 38 38 38 86 86 86 14 14 14 2 2 6
52582- 2 2 6 2 2 6 2 2 6 2 2 6
52583- 2 2 6 2 2 6 2 2 6 2 2 6
52584- 2 2 6 2 2 6 2 2 6 2 2 6
52585- 2 2 6 2 2 6 2 2 6 2 2 6
52586- 2 2 6 2 2 6 2 2 6 54 54 54
52587- 66 66 66 26 26 26 6 6 6 0 0 0
52588- 0 0 0 0 0 0 0 0 0 0 0 0
52589- 0 0 0 0 0 0 0 0 0 0 0 0
52590- 0 0 0 0 0 0 0 0 0 0 0 0
52591- 0 0 0 0 0 0 0 0 0 0 0 0
52592- 0 0 0 0 0 0 0 0 0 0 0 0
52593- 0 0 0 0 0 0 0 0 0 0 0 0
52594- 0 0 0 0 0 0 0 0 0 0 0 0
52595- 0 0 0 0 0 1 0 0 1 0 0 0
52596- 0 0 0 0 0 0 0 0 0 0 0 0
52597- 0 0 0 0 0 0 0 0 0 0 0 0
52598- 0 0 0 0 0 0 0 0 0 0 0 0
52599- 0 0 0 0 0 0 0 0 0 0 0 0
52600- 0 0 0 0 0 0 0 0 0 14 14 14
52601- 42 42 42 82 82 82 2 2 6 2 2 6
52602- 2 2 6 6 6 6 10 10 10 2 2 6
52603- 2 2 6 2 2 6 2 2 6 2 2 6
52604- 2 2 6 2 2 6 2 2 6 6 6 6
52605- 14 14 14 10 10 10 2 2 6 2 2 6
52606- 2 2 6 2 2 6 2 2 6 18 18 18
52607- 82 82 82 34 34 34 10 10 10 0 0 0
52608- 0 0 0 0 0 0 0 0 0 0 0 0
52609- 0 0 0 0 0 0 0 0 0 0 0 0
52610- 0 0 0 0 0 0 0 0 0 0 0 0
52611- 0 0 0 0 0 0 0 0 0 0 0 0
52612- 0 0 0 0 0 0 0 0 0 0 0 0
52613- 0 0 0 0 0 0 0 0 0 0 0 0
52614- 0 0 0 0 0 0 0 0 0 0 0 0
52615- 0 0 1 0 0 0 0 0 0 0 0 0
52616- 0 0 0 0 0 0 0 0 0 0 0 0
52617- 0 0 0 0 0 0 0 0 0 0 0 0
52618- 0 0 0 0 0 0 0 0 0 0 0 0
52619- 0 0 0 0 0 0 0 0 0 0 0 0
52620- 0 0 0 0 0 0 0 0 0 14 14 14
52621- 46 46 46 86 86 86 2 2 6 2 2 6
52622- 6 6 6 6 6 6 22 22 22 34 34 34
52623- 6 6 6 2 2 6 2 2 6 2 2 6
52624- 2 2 6 2 2 6 18 18 18 34 34 34
52625- 10 10 10 50 50 50 22 22 22 2 2 6
52626- 2 2 6 2 2 6 2 2 6 10 10 10
52627- 86 86 86 42 42 42 14 14 14 0 0 0
52628- 0 0 0 0 0 0 0 0 0 0 0 0
52629- 0 0 0 0 0 0 0 0 0 0 0 0
52630- 0 0 0 0 0 0 0 0 0 0 0 0
52631- 0 0 0 0 0 0 0 0 0 0 0 0
52632- 0 0 0 0 0 0 0 0 0 0 0 0
52633- 0 0 0 0 0 0 0 0 0 0 0 0
52634- 0 0 0 0 0 0 0 0 0 0 0 0
52635- 0 0 1 0 0 1 0 0 1 0 0 0
52636- 0 0 0 0 0 0 0 0 0 0 0 0
52637- 0 0 0 0 0 0 0 0 0 0 0 0
52638- 0 0 0 0 0 0 0 0 0 0 0 0
52639- 0 0 0 0 0 0 0 0 0 0 0 0
52640- 0 0 0 0 0 0 0 0 0 14 14 14
52641- 46 46 46 86 86 86 2 2 6 2 2 6
52642- 38 38 38 116 116 116 94 94 94 22 22 22
52643- 22 22 22 2 2 6 2 2 6 2 2 6
52644- 14 14 14 86 86 86 138 138 138 162 162 162
52645-154 154 154 38 38 38 26 26 26 6 6 6
52646- 2 2 6 2 2 6 2 2 6 2 2 6
52647- 86 86 86 46 46 46 14 14 14 0 0 0
52648- 0 0 0 0 0 0 0 0 0 0 0 0
52649- 0 0 0 0 0 0 0 0 0 0 0 0
52650- 0 0 0 0 0 0 0 0 0 0 0 0
52651- 0 0 0 0 0 0 0 0 0 0 0 0
52652- 0 0 0 0 0 0 0 0 0 0 0 0
52653- 0 0 0 0 0 0 0 0 0 0 0 0
52654- 0 0 0 0 0 0 0 0 0 0 0 0
52655- 0 0 0 0 0 0 0 0 0 0 0 0
52656- 0 0 0 0 0 0 0 0 0 0 0 0
52657- 0 0 0 0 0 0 0 0 0 0 0 0
52658- 0 0 0 0 0 0 0 0 0 0 0 0
52659- 0 0 0 0 0 0 0 0 0 0 0 0
52660- 0 0 0 0 0 0 0 0 0 14 14 14
52661- 46 46 46 86 86 86 2 2 6 14 14 14
52662-134 134 134 198 198 198 195 195 195 116 116 116
52663- 10 10 10 2 2 6 2 2 6 6 6 6
52664-101 98 89 187 187 187 210 210 210 218 218 218
52665-214 214 214 134 134 134 14 14 14 6 6 6
52666- 2 2 6 2 2 6 2 2 6 2 2 6
52667- 86 86 86 50 50 50 18 18 18 6 6 6
52668- 0 0 0 0 0 0 0 0 0 0 0 0
52669- 0 0 0 0 0 0 0 0 0 0 0 0
52670- 0 0 0 0 0 0 0 0 0 0 0 0
52671- 0 0 0 0 0 0 0 0 0 0 0 0
52672- 0 0 0 0 0 0 0 0 0 0 0 0
52673- 0 0 0 0 0 0 0 0 0 0 0 0
52674- 0 0 0 0 0 0 0 0 1 0 0 0
52675- 0 0 1 0 0 1 0 0 1 0 0 0
52676- 0 0 0 0 0 0 0 0 0 0 0 0
52677- 0 0 0 0 0 0 0 0 0 0 0 0
52678- 0 0 0 0 0 0 0 0 0 0 0 0
52679- 0 0 0 0 0 0 0 0 0 0 0 0
52680- 0 0 0 0 0 0 0 0 0 14 14 14
52681- 46 46 46 86 86 86 2 2 6 54 54 54
52682-218 218 218 195 195 195 226 226 226 246 246 246
52683- 58 58 58 2 2 6 2 2 6 30 30 30
52684-210 210 210 253 253 253 174 174 174 123 123 123
52685-221 221 221 234 234 234 74 74 74 2 2 6
52686- 2 2 6 2 2 6 2 2 6 2 2 6
52687- 70 70 70 58 58 58 22 22 22 6 6 6
52688- 0 0 0 0 0 0 0 0 0 0 0 0
52689- 0 0 0 0 0 0 0 0 0 0 0 0
52690- 0 0 0 0 0 0 0 0 0 0 0 0
52691- 0 0 0 0 0 0 0 0 0 0 0 0
52692- 0 0 0 0 0 0 0 0 0 0 0 0
52693- 0 0 0 0 0 0 0 0 0 0 0 0
52694- 0 0 0 0 0 0 0 0 0 0 0 0
52695- 0 0 0 0 0 0 0 0 0 0 0 0
52696- 0 0 0 0 0 0 0 0 0 0 0 0
52697- 0 0 0 0 0 0 0 0 0 0 0 0
52698- 0 0 0 0 0 0 0 0 0 0 0 0
52699- 0 0 0 0 0 0 0 0 0 0 0 0
52700- 0 0 0 0 0 0 0 0 0 14 14 14
52701- 46 46 46 82 82 82 2 2 6 106 106 106
52702-170 170 170 26 26 26 86 86 86 226 226 226
52703-123 123 123 10 10 10 14 14 14 46 46 46
52704-231 231 231 190 190 190 6 6 6 70 70 70
52705- 90 90 90 238 238 238 158 158 158 2 2 6
52706- 2 2 6 2 2 6 2 2 6 2 2 6
52707- 70 70 70 58 58 58 22 22 22 6 6 6
52708- 0 0 0 0 0 0 0 0 0 0 0 0
52709- 0 0 0 0 0 0 0 0 0 0 0 0
52710- 0 0 0 0 0 0 0 0 0 0 0 0
52711- 0 0 0 0 0 0 0 0 0 0 0 0
52712- 0 0 0 0 0 0 0 0 0 0 0 0
52713- 0 0 0 0 0 0 0 0 0 0 0 0
52714- 0 0 0 0 0 0 0 0 1 0 0 0
52715- 0 0 1 0 0 1 0 0 1 0 0 0
52716- 0 0 0 0 0 0 0 0 0 0 0 0
52717- 0 0 0 0 0 0 0 0 0 0 0 0
52718- 0 0 0 0 0 0 0 0 0 0 0 0
52719- 0 0 0 0 0 0 0 0 0 0 0 0
52720- 0 0 0 0 0 0 0 0 0 14 14 14
52721- 42 42 42 86 86 86 6 6 6 116 116 116
52722-106 106 106 6 6 6 70 70 70 149 149 149
52723-128 128 128 18 18 18 38 38 38 54 54 54
52724-221 221 221 106 106 106 2 2 6 14 14 14
52725- 46 46 46 190 190 190 198 198 198 2 2 6
52726- 2 2 6 2 2 6 2 2 6 2 2 6
52727- 74 74 74 62 62 62 22 22 22 6 6 6
52728- 0 0 0 0 0 0 0 0 0 0 0 0
52729- 0 0 0 0 0 0 0 0 0 0 0 0
52730- 0 0 0 0 0 0 0 0 0 0 0 0
52731- 0 0 0 0 0 0 0 0 0 0 0 0
52732- 0 0 0 0 0 0 0 0 0 0 0 0
52733- 0 0 0 0 0 0 0 0 0 0 0 0
52734- 0 0 0 0 0 0 0 0 1 0 0 0
52735- 0 0 1 0 0 0 0 0 1 0 0 0
52736- 0 0 0 0 0 0 0 0 0 0 0 0
52737- 0 0 0 0 0 0 0 0 0 0 0 0
52738- 0 0 0 0 0 0 0 0 0 0 0 0
52739- 0 0 0 0 0 0 0 0 0 0 0 0
52740- 0 0 0 0 0 0 0 0 0 14 14 14
52741- 42 42 42 94 94 94 14 14 14 101 101 101
52742-128 128 128 2 2 6 18 18 18 116 116 116
52743-118 98 46 121 92 8 121 92 8 98 78 10
52744-162 162 162 106 106 106 2 2 6 2 2 6
52745- 2 2 6 195 195 195 195 195 195 6 6 6
52746- 2 2 6 2 2 6 2 2 6 2 2 6
52747- 74 74 74 62 62 62 22 22 22 6 6 6
52748- 0 0 0 0 0 0 0 0 0 0 0 0
52749- 0 0 0 0 0 0 0 0 0 0 0 0
52750- 0 0 0 0 0 0 0 0 0 0 0 0
52751- 0 0 0 0 0 0 0 0 0 0 0 0
52752- 0 0 0 0 0 0 0 0 0 0 0 0
52753- 0 0 0 0 0 0 0 0 0 0 0 0
52754- 0 0 0 0 0 0 0 0 1 0 0 1
52755- 0 0 1 0 0 0 0 0 1 0 0 0
52756- 0 0 0 0 0 0 0 0 0 0 0 0
52757- 0 0 0 0 0 0 0 0 0 0 0 0
52758- 0 0 0 0 0 0 0 0 0 0 0 0
52759- 0 0 0 0 0 0 0 0 0 0 0 0
52760- 0 0 0 0 0 0 0 0 0 10 10 10
52761- 38 38 38 90 90 90 14 14 14 58 58 58
52762-210 210 210 26 26 26 54 38 6 154 114 10
52763-226 170 11 236 186 11 225 175 15 184 144 12
52764-215 174 15 175 146 61 37 26 9 2 2 6
52765- 70 70 70 246 246 246 138 138 138 2 2 6
52766- 2 2 6 2 2 6 2 2 6 2 2 6
52767- 70 70 70 66 66 66 26 26 26 6 6 6
52768- 0 0 0 0 0 0 0 0 0 0 0 0
52769- 0 0 0 0 0 0 0 0 0 0 0 0
52770- 0 0 0 0 0 0 0 0 0 0 0 0
52771- 0 0 0 0 0 0 0 0 0 0 0 0
52772- 0 0 0 0 0 0 0 0 0 0 0 0
52773- 0 0 0 0 0 0 0 0 0 0 0 0
52774- 0 0 0 0 0 0 0 0 0 0 0 0
52775- 0 0 0 0 0 0 0 0 0 0 0 0
52776- 0 0 0 0 0 0 0 0 0 0 0 0
52777- 0 0 0 0 0 0 0 0 0 0 0 0
52778- 0 0 0 0 0 0 0 0 0 0 0 0
52779- 0 0 0 0 0 0 0 0 0 0 0 0
52780- 0 0 0 0 0 0 0 0 0 10 10 10
52781- 38 38 38 86 86 86 14 14 14 10 10 10
52782-195 195 195 188 164 115 192 133 9 225 175 15
52783-239 182 13 234 190 10 232 195 16 232 200 30
52784-245 207 45 241 208 19 232 195 16 184 144 12
52785-218 194 134 211 206 186 42 42 42 2 2 6
52786- 2 2 6 2 2 6 2 2 6 2 2 6
52787- 50 50 50 74 74 74 30 30 30 6 6 6
52788- 0 0 0 0 0 0 0 0 0 0 0 0
52789- 0 0 0 0 0 0 0 0 0 0 0 0
52790- 0 0 0 0 0 0 0 0 0 0 0 0
52791- 0 0 0 0 0 0 0 0 0 0 0 0
52792- 0 0 0 0 0 0 0 0 0 0 0 0
52793- 0 0 0 0 0 0 0 0 0 0 0 0
52794- 0 0 0 0 0 0 0 0 0 0 0 0
52795- 0 0 0 0 0 0 0 0 0 0 0 0
52796- 0 0 0 0 0 0 0 0 0 0 0 0
52797- 0 0 0 0 0 0 0 0 0 0 0 0
52798- 0 0 0 0 0 0 0 0 0 0 0 0
52799- 0 0 0 0 0 0 0 0 0 0 0 0
52800- 0 0 0 0 0 0 0 0 0 10 10 10
52801- 34 34 34 86 86 86 14 14 14 2 2 6
52802-121 87 25 192 133 9 219 162 10 239 182 13
52803-236 186 11 232 195 16 241 208 19 244 214 54
52804-246 218 60 246 218 38 246 215 20 241 208 19
52805-241 208 19 226 184 13 121 87 25 2 2 6
52806- 2 2 6 2 2 6 2 2 6 2 2 6
52807- 50 50 50 82 82 82 34 34 34 10 10 10
52808- 0 0 0 0 0 0 0 0 0 0 0 0
52809- 0 0 0 0 0 0 0 0 0 0 0 0
52810- 0 0 0 0 0 0 0 0 0 0 0 0
52811- 0 0 0 0 0 0 0 0 0 0 0 0
52812- 0 0 0 0 0 0 0 0 0 0 0 0
52813- 0 0 0 0 0 0 0 0 0 0 0 0
52814- 0 0 0 0 0 0 0 0 0 0 0 0
52815- 0 0 0 0 0 0 0 0 0 0 0 0
52816- 0 0 0 0 0 0 0 0 0 0 0 0
52817- 0 0 0 0 0 0 0 0 0 0 0 0
52818- 0 0 0 0 0 0 0 0 0 0 0 0
52819- 0 0 0 0 0 0 0 0 0 0 0 0
52820- 0 0 0 0 0 0 0 0 0 10 10 10
52821- 34 34 34 82 82 82 30 30 30 61 42 6
52822-180 123 7 206 145 10 230 174 11 239 182 13
52823-234 190 10 238 202 15 241 208 19 246 218 74
52824-246 218 38 246 215 20 246 215 20 246 215 20
52825-226 184 13 215 174 15 184 144 12 6 6 6
52826- 2 2 6 2 2 6 2 2 6 2 2 6
52827- 26 26 26 94 94 94 42 42 42 14 14 14
52828- 0 0 0 0 0 0 0 0 0 0 0 0
52829- 0 0 0 0 0 0 0 0 0 0 0 0
52830- 0 0 0 0 0 0 0 0 0 0 0 0
52831- 0 0 0 0 0 0 0 0 0 0 0 0
52832- 0 0 0 0 0 0 0 0 0 0 0 0
52833- 0 0 0 0 0 0 0 0 0 0 0 0
52834- 0 0 0 0 0 0 0 0 0 0 0 0
52835- 0 0 0 0 0 0 0 0 0 0 0 0
52836- 0 0 0 0 0 0 0 0 0 0 0 0
52837- 0 0 0 0 0 0 0 0 0 0 0 0
52838- 0 0 0 0 0 0 0 0 0 0 0 0
52839- 0 0 0 0 0 0 0 0 0 0 0 0
52840- 0 0 0 0 0 0 0 0 0 10 10 10
52841- 30 30 30 78 78 78 50 50 50 104 69 6
52842-192 133 9 216 158 10 236 178 12 236 186 11
52843-232 195 16 241 208 19 244 214 54 245 215 43
52844-246 215 20 246 215 20 241 208 19 198 155 10
52845-200 144 11 216 158 10 156 118 10 2 2 6
52846- 2 2 6 2 2 6 2 2 6 2 2 6
52847- 6 6 6 90 90 90 54 54 54 18 18 18
52848- 6 6 6 0 0 0 0 0 0 0 0 0
52849- 0 0 0 0 0 0 0 0 0 0 0 0
52850- 0 0 0 0 0 0 0 0 0 0 0 0
52851- 0 0 0 0 0 0 0 0 0 0 0 0
52852- 0 0 0 0 0 0 0 0 0 0 0 0
52853- 0 0 0 0 0 0 0 0 0 0 0 0
52854- 0 0 0 0 0 0 0 0 0 0 0 0
52855- 0 0 0 0 0 0 0 0 0 0 0 0
52856- 0 0 0 0 0 0 0 0 0 0 0 0
52857- 0 0 0 0 0 0 0 0 0 0 0 0
52858- 0 0 0 0 0 0 0 0 0 0 0 0
52859- 0 0 0 0 0 0 0 0 0 0 0 0
52860- 0 0 0 0 0 0 0 0 0 10 10 10
52861- 30 30 30 78 78 78 46 46 46 22 22 22
52862-137 92 6 210 162 10 239 182 13 238 190 10
52863-238 202 15 241 208 19 246 215 20 246 215 20
52864-241 208 19 203 166 17 185 133 11 210 150 10
52865-216 158 10 210 150 10 102 78 10 2 2 6
52866- 6 6 6 54 54 54 14 14 14 2 2 6
52867- 2 2 6 62 62 62 74 74 74 30 30 30
52868- 10 10 10 0 0 0 0 0 0 0 0 0
52869- 0 0 0 0 0 0 0 0 0 0 0 0
52870- 0 0 0 0 0 0 0 0 0 0 0 0
52871- 0 0 0 0 0 0 0 0 0 0 0 0
52872- 0 0 0 0 0 0 0 0 0 0 0 0
52873- 0 0 0 0 0 0 0 0 0 0 0 0
52874- 0 0 0 0 0 0 0 0 0 0 0 0
52875- 0 0 0 0 0 0 0 0 0 0 0 0
52876- 0 0 0 0 0 0 0 0 0 0 0 0
52877- 0 0 0 0 0 0 0 0 0 0 0 0
52878- 0 0 0 0 0 0 0 0 0 0 0 0
52879- 0 0 0 0 0 0 0 0 0 0 0 0
52880- 0 0 0 0 0 0 0 0 0 10 10 10
52881- 34 34 34 78 78 78 50 50 50 6 6 6
52882- 94 70 30 139 102 15 190 146 13 226 184 13
52883-232 200 30 232 195 16 215 174 15 190 146 13
52884-168 122 10 192 133 9 210 150 10 213 154 11
52885-202 150 34 182 157 106 101 98 89 2 2 6
52886- 2 2 6 78 78 78 116 116 116 58 58 58
52887- 2 2 6 22 22 22 90 90 90 46 46 46
52888- 18 18 18 6 6 6 0 0 0 0 0 0
52889- 0 0 0 0 0 0 0 0 0 0 0 0
52890- 0 0 0 0 0 0 0 0 0 0 0 0
52891- 0 0 0 0 0 0 0 0 0 0 0 0
52892- 0 0 0 0 0 0 0 0 0 0 0 0
52893- 0 0 0 0 0 0 0 0 0 0 0 0
52894- 0 0 0 0 0 0 0 0 0 0 0 0
52895- 0 0 0 0 0 0 0 0 0 0 0 0
52896- 0 0 0 0 0 0 0 0 0 0 0 0
52897- 0 0 0 0 0 0 0 0 0 0 0 0
52898- 0 0 0 0 0 0 0 0 0 0 0 0
52899- 0 0 0 0 0 0 0 0 0 0 0 0
52900- 0 0 0 0 0 0 0 0 0 10 10 10
52901- 38 38 38 86 86 86 50 50 50 6 6 6
52902-128 128 128 174 154 114 156 107 11 168 122 10
52903-198 155 10 184 144 12 197 138 11 200 144 11
52904-206 145 10 206 145 10 197 138 11 188 164 115
52905-195 195 195 198 198 198 174 174 174 14 14 14
52906- 2 2 6 22 22 22 116 116 116 116 116 116
52907- 22 22 22 2 2 6 74 74 74 70 70 70
52908- 30 30 30 10 10 10 0 0 0 0 0 0
52909- 0 0 0 0 0 0 0 0 0 0 0 0
52910- 0 0 0 0 0 0 0 0 0 0 0 0
52911- 0 0 0 0 0 0 0 0 0 0 0 0
52912- 0 0 0 0 0 0 0 0 0 0 0 0
52913- 0 0 0 0 0 0 0 0 0 0 0 0
52914- 0 0 0 0 0 0 0 0 0 0 0 0
52915- 0 0 0 0 0 0 0 0 0 0 0 0
52916- 0 0 0 0 0 0 0 0 0 0 0 0
52917- 0 0 0 0 0 0 0 0 0 0 0 0
52918- 0 0 0 0 0 0 0 0 0 0 0 0
52919- 0 0 0 0 0 0 0 0 0 0 0 0
52920- 0 0 0 0 0 0 6 6 6 18 18 18
52921- 50 50 50 101 101 101 26 26 26 10 10 10
52922-138 138 138 190 190 190 174 154 114 156 107 11
52923-197 138 11 200 144 11 197 138 11 192 133 9
52924-180 123 7 190 142 34 190 178 144 187 187 187
52925-202 202 202 221 221 221 214 214 214 66 66 66
52926- 2 2 6 2 2 6 50 50 50 62 62 62
52927- 6 6 6 2 2 6 10 10 10 90 90 90
52928- 50 50 50 18 18 18 6 6 6 0 0 0
52929- 0 0 0 0 0 0 0 0 0 0 0 0
52930- 0 0 0 0 0 0 0 0 0 0 0 0
52931- 0 0 0 0 0 0 0 0 0 0 0 0
52932- 0 0 0 0 0 0 0 0 0 0 0 0
52933- 0 0 0 0 0 0 0 0 0 0 0 0
52934- 0 0 0 0 0 0 0 0 0 0 0 0
52935- 0 0 0 0 0 0 0 0 0 0 0 0
52936- 0 0 0 0 0 0 0 0 0 0 0 0
52937- 0 0 0 0 0 0 0 0 0 0 0 0
52938- 0 0 0 0 0 0 0 0 0 0 0 0
52939- 0 0 0 0 0 0 0 0 0 0 0 0
52940- 0 0 0 0 0 0 10 10 10 34 34 34
52941- 74 74 74 74 74 74 2 2 6 6 6 6
52942-144 144 144 198 198 198 190 190 190 178 166 146
52943-154 121 60 156 107 11 156 107 11 168 124 44
52944-174 154 114 187 187 187 190 190 190 210 210 210
52945-246 246 246 253 253 253 253 253 253 182 182 182
52946- 6 6 6 2 2 6 2 2 6 2 2 6
52947- 2 2 6 2 2 6 2 2 6 62 62 62
52948- 74 74 74 34 34 34 14 14 14 0 0 0
52949- 0 0 0 0 0 0 0 0 0 0 0 0
52950- 0 0 0 0 0 0 0 0 0 0 0 0
52951- 0 0 0 0 0 0 0 0 0 0 0 0
52952- 0 0 0 0 0 0 0 0 0 0 0 0
52953- 0 0 0 0 0 0 0 0 0 0 0 0
52954- 0 0 0 0 0 0 0 0 0 0 0 0
52955- 0 0 0 0 0 0 0 0 0 0 0 0
52956- 0 0 0 0 0 0 0 0 0 0 0 0
52957- 0 0 0 0 0 0 0 0 0 0 0 0
52958- 0 0 0 0 0 0 0 0 0 0 0 0
52959- 0 0 0 0 0 0 0 0 0 0 0 0
52960- 0 0 0 10 10 10 22 22 22 54 54 54
52961- 94 94 94 18 18 18 2 2 6 46 46 46
52962-234 234 234 221 221 221 190 190 190 190 190 190
52963-190 190 190 187 187 187 187 187 187 190 190 190
52964-190 190 190 195 195 195 214 214 214 242 242 242
52965-253 253 253 253 253 253 253 253 253 253 253 253
52966- 82 82 82 2 2 6 2 2 6 2 2 6
52967- 2 2 6 2 2 6 2 2 6 14 14 14
52968- 86 86 86 54 54 54 22 22 22 6 6 6
52969- 0 0 0 0 0 0 0 0 0 0 0 0
52970- 0 0 0 0 0 0 0 0 0 0 0 0
52971- 0 0 0 0 0 0 0 0 0 0 0 0
52972- 0 0 0 0 0 0 0 0 0 0 0 0
52973- 0 0 0 0 0 0 0 0 0 0 0 0
52974- 0 0 0 0 0 0 0 0 0 0 0 0
52975- 0 0 0 0 0 0 0 0 0 0 0 0
52976- 0 0 0 0 0 0 0 0 0 0 0 0
52977- 0 0 0 0 0 0 0 0 0 0 0 0
52978- 0 0 0 0 0 0 0 0 0 0 0 0
52979- 0 0 0 0 0 0 0 0 0 0 0 0
52980- 6 6 6 18 18 18 46 46 46 90 90 90
52981- 46 46 46 18 18 18 6 6 6 182 182 182
52982-253 253 253 246 246 246 206 206 206 190 190 190
52983-190 190 190 190 190 190 190 190 190 190 190 190
52984-206 206 206 231 231 231 250 250 250 253 253 253
52985-253 253 253 253 253 253 253 253 253 253 253 253
52986-202 202 202 14 14 14 2 2 6 2 2 6
52987- 2 2 6 2 2 6 2 2 6 2 2 6
52988- 42 42 42 86 86 86 42 42 42 18 18 18
52989- 6 6 6 0 0 0 0 0 0 0 0 0
52990- 0 0 0 0 0 0 0 0 0 0 0 0
52991- 0 0 0 0 0 0 0 0 0 0 0 0
52992- 0 0 0 0 0 0 0 0 0 0 0 0
52993- 0 0 0 0 0 0 0 0 0 0 0 0
52994- 0 0 0 0 0 0 0 0 0 0 0 0
52995- 0 0 0 0 0 0 0 0 0 0 0 0
52996- 0 0 0 0 0 0 0 0 0 0 0 0
52997- 0 0 0 0 0 0 0 0 0 0 0 0
52998- 0 0 0 0 0 0 0 0 0 0 0 0
52999- 0 0 0 0 0 0 0 0 0 6 6 6
53000- 14 14 14 38 38 38 74 74 74 66 66 66
53001- 2 2 6 6 6 6 90 90 90 250 250 250
53002-253 253 253 253 253 253 238 238 238 198 198 198
53003-190 190 190 190 190 190 195 195 195 221 221 221
53004-246 246 246 253 253 253 253 253 253 253 253 253
53005-253 253 253 253 253 253 253 253 253 253 253 253
53006-253 253 253 82 82 82 2 2 6 2 2 6
53007- 2 2 6 2 2 6 2 2 6 2 2 6
53008- 2 2 6 78 78 78 70 70 70 34 34 34
53009- 14 14 14 6 6 6 0 0 0 0 0 0
53010- 0 0 0 0 0 0 0 0 0 0 0 0
53011- 0 0 0 0 0 0 0 0 0 0 0 0
53012- 0 0 0 0 0 0 0 0 0 0 0 0
53013- 0 0 0 0 0 0 0 0 0 0 0 0
53014- 0 0 0 0 0 0 0 0 0 0 0 0
53015- 0 0 0 0 0 0 0 0 0 0 0 0
53016- 0 0 0 0 0 0 0 0 0 0 0 0
53017- 0 0 0 0 0 0 0 0 0 0 0 0
53018- 0 0 0 0 0 0 0 0 0 0 0 0
53019- 0 0 0 0 0 0 0 0 0 14 14 14
53020- 34 34 34 66 66 66 78 78 78 6 6 6
53021- 2 2 6 18 18 18 218 218 218 253 253 253
53022-253 253 253 253 253 253 253 253 253 246 246 246
53023-226 226 226 231 231 231 246 246 246 253 253 253
53024-253 253 253 253 253 253 253 253 253 253 253 253
53025-253 253 253 253 253 253 253 253 253 253 253 253
53026-253 253 253 178 178 178 2 2 6 2 2 6
53027- 2 2 6 2 2 6 2 2 6 2 2 6
53028- 2 2 6 18 18 18 90 90 90 62 62 62
53029- 30 30 30 10 10 10 0 0 0 0 0 0
53030- 0 0 0 0 0 0 0 0 0 0 0 0
53031- 0 0 0 0 0 0 0 0 0 0 0 0
53032- 0 0 0 0 0 0 0 0 0 0 0 0
53033- 0 0 0 0 0 0 0 0 0 0 0 0
53034- 0 0 0 0 0 0 0 0 0 0 0 0
53035- 0 0 0 0 0 0 0 0 0 0 0 0
53036- 0 0 0 0 0 0 0 0 0 0 0 0
53037- 0 0 0 0 0 0 0 0 0 0 0 0
53038- 0 0 0 0 0 0 0 0 0 0 0 0
53039- 0 0 0 0 0 0 10 10 10 26 26 26
53040- 58 58 58 90 90 90 18 18 18 2 2 6
53041- 2 2 6 110 110 110 253 253 253 253 253 253
53042-253 253 253 253 253 253 253 253 253 253 253 253
53043-250 250 250 253 253 253 253 253 253 253 253 253
53044-253 253 253 253 253 253 253 253 253 253 253 253
53045-253 253 253 253 253 253 253 253 253 253 253 253
53046-253 253 253 231 231 231 18 18 18 2 2 6
53047- 2 2 6 2 2 6 2 2 6 2 2 6
53048- 2 2 6 2 2 6 18 18 18 94 94 94
53049- 54 54 54 26 26 26 10 10 10 0 0 0
53050- 0 0 0 0 0 0 0 0 0 0 0 0
53051- 0 0 0 0 0 0 0 0 0 0 0 0
53052- 0 0 0 0 0 0 0 0 0 0 0 0
53053- 0 0 0 0 0 0 0 0 0 0 0 0
53054- 0 0 0 0 0 0 0 0 0 0 0 0
53055- 0 0 0 0 0 0 0 0 0 0 0 0
53056- 0 0 0 0 0 0 0 0 0 0 0 0
53057- 0 0 0 0 0 0 0 0 0 0 0 0
53058- 0 0 0 0 0 0 0 0 0 0 0 0
53059- 0 0 0 6 6 6 22 22 22 50 50 50
53060- 90 90 90 26 26 26 2 2 6 2 2 6
53061- 14 14 14 195 195 195 250 250 250 253 253 253
53062-253 253 253 253 253 253 253 253 253 253 253 253
53063-253 253 253 253 253 253 253 253 253 253 253 253
53064-253 253 253 253 253 253 253 253 253 253 253 253
53065-253 253 253 253 253 253 253 253 253 253 253 253
53066-250 250 250 242 242 242 54 54 54 2 2 6
53067- 2 2 6 2 2 6 2 2 6 2 2 6
53068- 2 2 6 2 2 6 2 2 6 38 38 38
53069- 86 86 86 50 50 50 22 22 22 6 6 6
53070- 0 0 0 0 0 0 0 0 0 0 0 0
53071- 0 0 0 0 0 0 0 0 0 0 0 0
53072- 0 0 0 0 0 0 0 0 0 0 0 0
53073- 0 0 0 0 0 0 0 0 0 0 0 0
53074- 0 0 0 0 0 0 0 0 0 0 0 0
53075- 0 0 0 0 0 0 0 0 0 0 0 0
53076- 0 0 0 0 0 0 0 0 0 0 0 0
53077- 0 0 0 0 0 0 0 0 0 0 0 0
53078- 0 0 0 0 0 0 0 0 0 0 0 0
53079- 6 6 6 14 14 14 38 38 38 82 82 82
53080- 34 34 34 2 2 6 2 2 6 2 2 6
53081- 42 42 42 195 195 195 246 246 246 253 253 253
53082-253 253 253 253 253 253 253 253 253 250 250 250
53083-242 242 242 242 242 242 250 250 250 253 253 253
53084-253 253 253 253 253 253 253 253 253 253 253 253
53085-253 253 253 250 250 250 246 246 246 238 238 238
53086-226 226 226 231 231 231 101 101 101 6 6 6
53087- 2 2 6 2 2 6 2 2 6 2 2 6
53088- 2 2 6 2 2 6 2 2 6 2 2 6
53089- 38 38 38 82 82 82 42 42 42 14 14 14
53090- 6 6 6 0 0 0 0 0 0 0 0 0
53091- 0 0 0 0 0 0 0 0 0 0 0 0
53092- 0 0 0 0 0 0 0 0 0 0 0 0
53093- 0 0 0 0 0 0 0 0 0 0 0 0
53094- 0 0 0 0 0 0 0 0 0 0 0 0
53095- 0 0 0 0 0 0 0 0 0 0 0 0
53096- 0 0 0 0 0 0 0 0 0 0 0 0
53097- 0 0 0 0 0 0 0 0 0 0 0 0
53098- 0 0 0 0 0 0 0 0 0 0 0 0
53099- 10 10 10 26 26 26 62 62 62 66 66 66
53100- 2 2 6 2 2 6 2 2 6 6 6 6
53101- 70 70 70 170 170 170 206 206 206 234 234 234
53102-246 246 246 250 250 250 250 250 250 238 238 238
53103-226 226 226 231 231 231 238 238 238 250 250 250
53104-250 250 250 250 250 250 246 246 246 231 231 231
53105-214 214 214 206 206 206 202 202 202 202 202 202
53106-198 198 198 202 202 202 182 182 182 18 18 18
53107- 2 2 6 2 2 6 2 2 6 2 2 6
53108- 2 2 6 2 2 6 2 2 6 2 2 6
53109- 2 2 6 62 62 62 66 66 66 30 30 30
53110- 10 10 10 0 0 0 0 0 0 0 0 0
53111- 0 0 0 0 0 0 0 0 0 0 0 0
53112- 0 0 0 0 0 0 0 0 0 0 0 0
53113- 0 0 0 0 0 0 0 0 0 0 0 0
53114- 0 0 0 0 0 0 0 0 0 0 0 0
53115- 0 0 0 0 0 0 0 0 0 0 0 0
53116- 0 0 0 0 0 0 0 0 0 0 0 0
53117- 0 0 0 0 0 0 0 0 0 0 0 0
53118- 0 0 0 0 0 0 0 0 0 0 0 0
53119- 14 14 14 42 42 42 82 82 82 18 18 18
53120- 2 2 6 2 2 6 2 2 6 10 10 10
53121- 94 94 94 182 182 182 218 218 218 242 242 242
53122-250 250 250 253 253 253 253 253 253 250 250 250
53123-234 234 234 253 253 253 253 253 253 253 253 253
53124-253 253 253 253 253 253 253 253 253 246 246 246
53125-238 238 238 226 226 226 210 210 210 202 202 202
53126-195 195 195 195 195 195 210 210 210 158 158 158
53127- 6 6 6 14 14 14 50 50 50 14 14 14
53128- 2 2 6 2 2 6 2 2 6 2 2 6
53129- 2 2 6 6 6 6 86 86 86 46 46 46
53130- 18 18 18 6 6 6 0 0 0 0 0 0
53131- 0 0 0 0 0 0 0 0 0 0 0 0
53132- 0 0 0 0 0 0 0 0 0 0 0 0
53133- 0 0 0 0 0 0 0 0 0 0 0 0
53134- 0 0 0 0 0 0 0 0 0 0 0 0
53135- 0 0 0 0 0 0 0 0 0 0 0 0
53136- 0 0 0 0 0 0 0 0 0 0 0 0
53137- 0 0 0 0 0 0 0 0 0 0 0 0
53138- 0 0 0 0 0 0 0 0 0 6 6 6
53139- 22 22 22 54 54 54 70 70 70 2 2 6
53140- 2 2 6 10 10 10 2 2 6 22 22 22
53141-166 166 166 231 231 231 250 250 250 253 253 253
53142-253 253 253 253 253 253 253 253 253 250 250 250
53143-242 242 242 253 253 253 253 253 253 253 253 253
53144-253 253 253 253 253 253 253 253 253 253 253 253
53145-253 253 253 253 253 253 253 253 253 246 246 246
53146-231 231 231 206 206 206 198 198 198 226 226 226
53147- 94 94 94 2 2 6 6 6 6 38 38 38
53148- 30 30 30 2 2 6 2 2 6 2 2 6
53149- 2 2 6 2 2 6 62 62 62 66 66 66
53150- 26 26 26 10 10 10 0 0 0 0 0 0
53151- 0 0 0 0 0 0 0 0 0 0 0 0
53152- 0 0 0 0 0 0 0 0 0 0 0 0
53153- 0 0 0 0 0 0 0 0 0 0 0 0
53154- 0 0 0 0 0 0 0 0 0 0 0 0
53155- 0 0 0 0 0 0 0 0 0 0 0 0
53156- 0 0 0 0 0 0 0 0 0 0 0 0
53157- 0 0 0 0 0 0 0 0 0 0 0 0
53158- 0 0 0 0 0 0 0 0 0 10 10 10
53159- 30 30 30 74 74 74 50 50 50 2 2 6
53160- 26 26 26 26 26 26 2 2 6 106 106 106
53161-238 238 238 253 253 253 253 253 253 253 253 253
53162-253 253 253 253 253 253 253 253 253 253 253 253
53163-253 253 253 253 253 253 253 253 253 253 253 253
53164-253 253 253 253 253 253 253 253 253 253 253 253
53165-253 253 253 253 253 253 253 253 253 253 253 253
53166-253 253 253 246 246 246 218 218 218 202 202 202
53167-210 210 210 14 14 14 2 2 6 2 2 6
53168- 30 30 30 22 22 22 2 2 6 2 2 6
53169- 2 2 6 2 2 6 18 18 18 86 86 86
53170- 42 42 42 14 14 14 0 0 0 0 0 0
53171- 0 0 0 0 0 0 0 0 0 0 0 0
53172- 0 0 0 0 0 0 0 0 0 0 0 0
53173- 0 0 0 0 0 0 0 0 0 0 0 0
53174- 0 0 0 0 0 0 0 0 0 0 0 0
53175- 0 0 0 0 0 0 0 0 0 0 0 0
53176- 0 0 0 0 0 0 0 0 0 0 0 0
53177- 0 0 0 0 0 0 0 0 0 0 0 0
53178- 0 0 0 0 0 0 0 0 0 14 14 14
53179- 42 42 42 90 90 90 22 22 22 2 2 6
53180- 42 42 42 2 2 6 18 18 18 218 218 218
53181-253 253 253 253 253 253 253 253 253 253 253 253
53182-253 253 253 253 253 253 253 253 253 253 253 253
53183-253 253 253 253 253 253 253 253 253 253 253 253
53184-253 253 253 253 253 253 253 253 253 253 253 253
53185-253 253 253 253 253 253 253 253 253 253 253 253
53186-253 253 253 253 253 253 250 250 250 221 221 221
53187-218 218 218 101 101 101 2 2 6 14 14 14
53188- 18 18 18 38 38 38 10 10 10 2 2 6
53189- 2 2 6 2 2 6 2 2 6 78 78 78
53190- 58 58 58 22 22 22 6 6 6 0 0 0
53191- 0 0 0 0 0 0 0 0 0 0 0 0
53192- 0 0 0 0 0 0 0 0 0 0 0 0
53193- 0 0 0 0 0 0 0 0 0 0 0 0
53194- 0 0 0 0 0 0 0 0 0 0 0 0
53195- 0 0 0 0 0 0 0 0 0 0 0 0
53196- 0 0 0 0 0 0 0 0 0 0 0 0
53197- 0 0 0 0 0 0 0 0 0 0 0 0
53198- 0 0 0 0 0 0 6 6 6 18 18 18
53199- 54 54 54 82 82 82 2 2 6 26 26 26
53200- 22 22 22 2 2 6 123 123 123 253 253 253
53201-253 253 253 253 253 253 253 253 253 253 253 253
53202-253 253 253 253 253 253 253 253 253 253 253 253
53203-253 253 253 253 253 253 253 253 253 253 253 253
53204-253 253 253 253 253 253 253 253 253 253 253 253
53205-253 253 253 253 253 253 253 253 253 253 253 253
53206-253 253 253 253 253 253 253 253 253 250 250 250
53207-238 238 238 198 198 198 6 6 6 38 38 38
53208- 58 58 58 26 26 26 38 38 38 2 2 6
53209- 2 2 6 2 2 6 2 2 6 46 46 46
53210- 78 78 78 30 30 30 10 10 10 0 0 0
53211- 0 0 0 0 0 0 0 0 0 0 0 0
53212- 0 0 0 0 0 0 0 0 0 0 0 0
53213- 0 0 0 0 0 0 0 0 0 0 0 0
53214- 0 0 0 0 0 0 0 0 0 0 0 0
53215- 0 0 0 0 0 0 0 0 0 0 0 0
53216- 0 0 0 0 0 0 0 0 0 0 0 0
53217- 0 0 0 0 0 0 0 0 0 0 0 0
53218- 0 0 0 0 0 0 10 10 10 30 30 30
53219- 74 74 74 58 58 58 2 2 6 42 42 42
53220- 2 2 6 22 22 22 231 231 231 253 253 253
53221-253 253 253 253 253 253 253 253 253 253 253 253
53222-253 253 253 253 253 253 253 253 253 250 250 250
53223-253 253 253 253 253 253 253 253 253 253 253 253
53224-253 253 253 253 253 253 253 253 253 253 253 253
53225-253 253 253 253 253 253 253 253 253 253 253 253
53226-253 253 253 253 253 253 253 253 253 253 253 253
53227-253 253 253 246 246 246 46 46 46 38 38 38
53228- 42 42 42 14 14 14 38 38 38 14 14 14
53229- 2 2 6 2 2 6 2 2 6 6 6 6
53230- 86 86 86 46 46 46 14 14 14 0 0 0
53231- 0 0 0 0 0 0 0 0 0 0 0 0
53232- 0 0 0 0 0 0 0 0 0 0 0 0
53233- 0 0 0 0 0 0 0 0 0 0 0 0
53234- 0 0 0 0 0 0 0 0 0 0 0 0
53235- 0 0 0 0 0 0 0 0 0 0 0 0
53236- 0 0 0 0 0 0 0 0 0 0 0 0
53237- 0 0 0 0 0 0 0 0 0 0 0 0
53238- 0 0 0 6 6 6 14 14 14 42 42 42
53239- 90 90 90 18 18 18 18 18 18 26 26 26
53240- 2 2 6 116 116 116 253 253 253 253 253 253
53241-253 253 253 253 253 253 253 253 253 253 253 253
53242-253 253 253 253 253 253 250 250 250 238 238 238
53243-253 253 253 253 253 253 253 253 253 253 253 253
53244-253 253 253 253 253 253 253 253 253 253 253 253
53245-253 253 253 253 253 253 253 253 253 253 253 253
53246-253 253 253 253 253 253 253 253 253 253 253 253
53247-253 253 253 253 253 253 94 94 94 6 6 6
53248- 2 2 6 2 2 6 10 10 10 34 34 34
53249- 2 2 6 2 2 6 2 2 6 2 2 6
53250- 74 74 74 58 58 58 22 22 22 6 6 6
53251- 0 0 0 0 0 0 0 0 0 0 0 0
53252- 0 0 0 0 0 0 0 0 0 0 0 0
53253- 0 0 0 0 0 0 0 0 0 0 0 0
53254- 0 0 0 0 0 0 0 0 0 0 0 0
53255- 0 0 0 0 0 0 0 0 0 0 0 0
53256- 0 0 0 0 0 0 0 0 0 0 0 0
53257- 0 0 0 0 0 0 0 0 0 0 0 0
53258- 0 0 0 10 10 10 26 26 26 66 66 66
53259- 82 82 82 2 2 6 38 38 38 6 6 6
53260- 14 14 14 210 210 210 253 253 253 253 253 253
53261-253 253 253 253 253 253 253 253 253 253 253 253
53262-253 253 253 253 253 253 246 246 246 242 242 242
53263-253 253 253 253 253 253 253 253 253 253 253 253
53264-253 253 253 253 253 253 253 253 253 253 253 253
53265-253 253 253 253 253 253 253 253 253 253 253 253
53266-253 253 253 253 253 253 253 253 253 253 253 253
53267-253 253 253 253 253 253 144 144 144 2 2 6
53268- 2 2 6 2 2 6 2 2 6 46 46 46
53269- 2 2 6 2 2 6 2 2 6 2 2 6
53270- 42 42 42 74 74 74 30 30 30 10 10 10
53271- 0 0 0 0 0 0 0 0 0 0 0 0
53272- 0 0 0 0 0 0 0 0 0 0 0 0
53273- 0 0 0 0 0 0 0 0 0 0 0 0
53274- 0 0 0 0 0 0 0 0 0 0 0 0
53275- 0 0 0 0 0 0 0 0 0 0 0 0
53276- 0 0 0 0 0 0 0 0 0 0 0 0
53277- 0 0 0 0 0 0 0 0 0 0 0 0
53278- 6 6 6 14 14 14 42 42 42 90 90 90
53279- 26 26 26 6 6 6 42 42 42 2 2 6
53280- 74 74 74 250 250 250 253 253 253 253 253 253
53281-253 253 253 253 253 253 253 253 253 253 253 253
53282-253 253 253 253 253 253 242 242 242 242 242 242
53283-253 253 253 253 253 253 253 253 253 253 253 253
53284-253 253 253 253 253 253 253 253 253 253 253 253
53285-253 253 253 253 253 253 253 253 253 253 253 253
53286-253 253 253 253 253 253 253 253 253 253 253 253
53287-253 253 253 253 253 253 182 182 182 2 2 6
53288- 2 2 6 2 2 6 2 2 6 46 46 46
53289- 2 2 6 2 2 6 2 2 6 2 2 6
53290- 10 10 10 86 86 86 38 38 38 10 10 10
53291- 0 0 0 0 0 0 0 0 0 0 0 0
53292- 0 0 0 0 0 0 0 0 0 0 0 0
53293- 0 0 0 0 0 0 0 0 0 0 0 0
53294- 0 0 0 0 0 0 0 0 0 0 0 0
53295- 0 0 0 0 0 0 0 0 0 0 0 0
53296- 0 0 0 0 0 0 0 0 0 0 0 0
53297- 0 0 0 0 0 0 0 0 0 0 0 0
53298- 10 10 10 26 26 26 66 66 66 82 82 82
53299- 2 2 6 22 22 22 18 18 18 2 2 6
53300-149 149 149 253 253 253 253 253 253 253 253 253
53301-253 253 253 253 253 253 253 253 253 253 253 253
53302-253 253 253 253 253 253 234 234 234 242 242 242
53303-253 253 253 253 253 253 253 253 253 253 253 253
53304-253 253 253 253 253 253 253 253 253 253 253 253
53305-253 253 253 253 253 253 253 253 253 253 253 253
53306-253 253 253 253 253 253 253 253 253 253 253 253
53307-253 253 253 253 253 253 206 206 206 2 2 6
53308- 2 2 6 2 2 6 2 2 6 38 38 38
53309- 2 2 6 2 2 6 2 2 6 2 2 6
53310- 6 6 6 86 86 86 46 46 46 14 14 14
53311- 0 0 0 0 0 0 0 0 0 0 0 0
53312- 0 0 0 0 0 0 0 0 0 0 0 0
53313- 0 0 0 0 0 0 0 0 0 0 0 0
53314- 0 0 0 0 0 0 0 0 0 0 0 0
53315- 0 0 0 0 0 0 0 0 0 0 0 0
53316- 0 0 0 0 0 0 0 0 0 0 0 0
53317- 0 0 0 0 0 0 0 0 0 6 6 6
53318- 18 18 18 46 46 46 86 86 86 18 18 18
53319- 2 2 6 34 34 34 10 10 10 6 6 6
53320-210 210 210 253 253 253 253 253 253 253 253 253
53321-253 253 253 253 253 253 253 253 253 253 253 253
53322-253 253 253 253 253 253 234 234 234 242 242 242
53323-253 253 253 253 253 253 253 253 253 253 253 253
53324-253 253 253 253 253 253 253 253 253 253 253 253
53325-253 253 253 253 253 253 253 253 253 253 253 253
53326-253 253 253 253 253 253 253 253 253 253 253 253
53327-253 253 253 253 253 253 221 221 221 6 6 6
53328- 2 2 6 2 2 6 6 6 6 30 30 30
53329- 2 2 6 2 2 6 2 2 6 2 2 6
53330- 2 2 6 82 82 82 54 54 54 18 18 18
53331- 6 6 6 0 0 0 0 0 0 0 0 0
53332- 0 0 0 0 0 0 0 0 0 0 0 0
53333- 0 0 0 0 0 0 0 0 0 0 0 0
53334- 0 0 0 0 0 0 0 0 0 0 0 0
53335- 0 0 0 0 0 0 0 0 0 0 0 0
53336- 0 0 0 0 0 0 0 0 0 0 0 0
53337- 0 0 0 0 0 0 0 0 0 10 10 10
53338- 26 26 26 66 66 66 62 62 62 2 2 6
53339- 2 2 6 38 38 38 10 10 10 26 26 26
53340-238 238 238 253 253 253 253 253 253 253 253 253
53341-253 253 253 253 253 253 253 253 253 253 253 253
53342-253 253 253 253 253 253 231 231 231 238 238 238
53343-253 253 253 253 253 253 253 253 253 253 253 253
53344-253 253 253 253 253 253 253 253 253 253 253 253
53345-253 253 253 253 253 253 253 253 253 253 253 253
53346-253 253 253 253 253 253 253 253 253 253 253 253
53347-253 253 253 253 253 253 231 231 231 6 6 6
53348- 2 2 6 2 2 6 10 10 10 30 30 30
53349- 2 2 6 2 2 6 2 2 6 2 2 6
53350- 2 2 6 66 66 66 58 58 58 22 22 22
53351- 6 6 6 0 0 0 0 0 0 0 0 0
53352- 0 0 0 0 0 0 0 0 0 0 0 0
53353- 0 0 0 0 0 0 0 0 0 0 0 0
53354- 0 0 0 0 0 0 0 0 0 0 0 0
53355- 0 0 0 0 0 0 0 0 0 0 0 0
53356- 0 0 0 0 0 0 0 0 0 0 0 0
53357- 0 0 0 0 0 0 0 0 0 10 10 10
53358- 38 38 38 78 78 78 6 6 6 2 2 6
53359- 2 2 6 46 46 46 14 14 14 42 42 42
53360-246 246 246 253 253 253 253 253 253 253 253 253
53361-253 253 253 253 253 253 253 253 253 253 253 253
53362-253 253 253 253 253 253 231 231 231 242 242 242
53363-253 253 253 253 253 253 253 253 253 253 253 253
53364-253 253 253 253 253 253 253 253 253 253 253 253
53365-253 253 253 253 253 253 253 253 253 253 253 253
53366-253 253 253 253 253 253 253 253 253 253 253 253
53367-253 253 253 253 253 253 234 234 234 10 10 10
53368- 2 2 6 2 2 6 22 22 22 14 14 14
53369- 2 2 6 2 2 6 2 2 6 2 2 6
53370- 2 2 6 66 66 66 62 62 62 22 22 22
53371- 6 6 6 0 0 0 0 0 0 0 0 0
53372- 0 0 0 0 0 0 0 0 0 0 0 0
53373- 0 0 0 0 0 0 0 0 0 0 0 0
53374- 0 0 0 0 0 0 0 0 0 0 0 0
53375- 0 0 0 0 0 0 0 0 0 0 0 0
53376- 0 0 0 0 0 0 0 0 0 0 0 0
53377- 0 0 0 0 0 0 6 6 6 18 18 18
53378- 50 50 50 74 74 74 2 2 6 2 2 6
53379- 14 14 14 70 70 70 34 34 34 62 62 62
53380-250 250 250 253 253 253 253 253 253 253 253 253
53381-253 253 253 253 253 253 253 253 253 253 253 253
53382-253 253 253 253 253 253 231 231 231 246 246 246
53383-253 253 253 253 253 253 253 253 253 253 253 253
53384-253 253 253 253 253 253 253 253 253 253 253 253
53385-253 253 253 253 253 253 253 253 253 253 253 253
53386-253 253 253 253 253 253 253 253 253 253 253 253
53387-253 253 253 253 253 253 234 234 234 14 14 14
53388- 2 2 6 2 2 6 30 30 30 2 2 6
53389- 2 2 6 2 2 6 2 2 6 2 2 6
53390- 2 2 6 66 66 66 62 62 62 22 22 22
53391- 6 6 6 0 0 0 0 0 0 0 0 0
53392- 0 0 0 0 0 0 0 0 0 0 0 0
53393- 0 0 0 0 0 0 0 0 0 0 0 0
53394- 0 0 0 0 0 0 0 0 0 0 0 0
53395- 0 0 0 0 0 0 0 0 0 0 0 0
53396- 0 0 0 0 0 0 0 0 0 0 0 0
53397- 0 0 0 0 0 0 6 6 6 18 18 18
53398- 54 54 54 62 62 62 2 2 6 2 2 6
53399- 2 2 6 30 30 30 46 46 46 70 70 70
53400-250 250 250 253 253 253 253 253 253 253 253 253
53401-253 253 253 253 253 253 253 253 253 253 253 253
53402-253 253 253 253 253 253 231 231 231 246 246 246
53403-253 253 253 253 253 253 253 253 253 253 253 253
53404-253 253 253 253 253 253 253 253 253 253 253 253
53405-253 253 253 253 253 253 253 253 253 253 253 253
53406-253 253 253 253 253 253 253 253 253 253 253 253
53407-253 253 253 253 253 253 226 226 226 10 10 10
53408- 2 2 6 6 6 6 30 30 30 2 2 6
53409- 2 2 6 2 2 6 2 2 6 2 2 6
53410- 2 2 6 66 66 66 58 58 58 22 22 22
53411- 6 6 6 0 0 0 0 0 0 0 0 0
53412- 0 0 0 0 0 0 0 0 0 0 0 0
53413- 0 0 0 0 0 0 0 0 0 0 0 0
53414- 0 0 0 0 0 0 0 0 0 0 0 0
53415- 0 0 0 0 0 0 0 0 0 0 0 0
53416- 0 0 0 0 0 0 0 0 0 0 0 0
53417- 0 0 0 0 0 0 6 6 6 22 22 22
53418- 58 58 58 62 62 62 2 2 6 2 2 6
53419- 2 2 6 2 2 6 30 30 30 78 78 78
53420-250 250 250 253 253 253 253 253 253 253 253 253
53421-253 253 253 253 253 253 253 253 253 253 253 253
53422-253 253 253 253 253 253 231 231 231 246 246 246
53423-253 253 253 253 253 253 253 253 253 253 253 253
53424-253 253 253 253 253 253 253 253 253 253 253 253
53425-253 253 253 253 253 253 253 253 253 253 253 253
53426-253 253 253 253 253 253 253 253 253 253 253 253
53427-253 253 253 253 253 253 206 206 206 2 2 6
53428- 22 22 22 34 34 34 18 14 6 22 22 22
53429- 26 26 26 18 18 18 6 6 6 2 2 6
53430- 2 2 6 82 82 82 54 54 54 18 18 18
53431- 6 6 6 0 0 0 0 0 0 0 0 0
53432- 0 0 0 0 0 0 0 0 0 0 0 0
53433- 0 0 0 0 0 0 0 0 0 0 0 0
53434- 0 0 0 0 0 0 0 0 0 0 0 0
53435- 0 0 0 0 0 0 0 0 0 0 0 0
53436- 0 0 0 0 0 0 0 0 0 0 0 0
53437- 0 0 0 0 0 0 6 6 6 26 26 26
53438- 62 62 62 106 106 106 74 54 14 185 133 11
53439-210 162 10 121 92 8 6 6 6 62 62 62
53440-238 238 238 253 253 253 253 253 253 253 253 253
53441-253 253 253 253 253 253 253 253 253 253 253 253
53442-253 253 253 253 253 253 231 231 231 246 246 246
53443-253 253 253 253 253 253 253 253 253 253 253 253
53444-253 253 253 253 253 253 253 253 253 253 253 253
53445-253 253 253 253 253 253 253 253 253 253 253 253
53446-253 253 253 253 253 253 253 253 253 253 253 253
53447-253 253 253 253 253 253 158 158 158 18 18 18
53448- 14 14 14 2 2 6 2 2 6 2 2 6
53449- 6 6 6 18 18 18 66 66 66 38 38 38
53450- 6 6 6 94 94 94 50 50 50 18 18 18
53451- 6 6 6 0 0 0 0 0 0 0 0 0
53452- 0 0 0 0 0 0 0 0 0 0 0 0
53453- 0 0 0 0 0 0 0 0 0 0 0 0
53454- 0 0 0 0 0 0 0 0 0 0 0 0
53455- 0 0 0 0 0 0 0 0 0 0 0 0
53456- 0 0 0 0 0 0 0 0 0 6 6 6
53457- 10 10 10 10 10 10 18 18 18 38 38 38
53458- 78 78 78 142 134 106 216 158 10 242 186 14
53459-246 190 14 246 190 14 156 118 10 10 10 10
53460- 90 90 90 238 238 238 253 253 253 253 253 253
53461-253 253 253 253 253 253 253 253 253 253 253 253
53462-253 253 253 253 253 253 231 231 231 250 250 250
53463-253 253 253 253 253 253 253 253 253 253 253 253
53464-253 253 253 253 253 253 253 253 253 253 253 253
53465-253 253 253 253 253 253 253 253 253 253 253 253
53466-253 253 253 253 253 253 253 253 253 246 230 190
53467-238 204 91 238 204 91 181 142 44 37 26 9
53468- 2 2 6 2 2 6 2 2 6 2 2 6
53469- 2 2 6 2 2 6 38 38 38 46 46 46
53470- 26 26 26 106 106 106 54 54 54 18 18 18
53471- 6 6 6 0 0 0 0 0 0 0 0 0
53472- 0 0 0 0 0 0 0 0 0 0 0 0
53473- 0 0 0 0 0 0 0 0 0 0 0 0
53474- 0 0 0 0 0 0 0 0 0 0 0 0
53475- 0 0 0 0 0 0 0 0 0 0 0 0
53476- 0 0 0 6 6 6 14 14 14 22 22 22
53477- 30 30 30 38 38 38 50 50 50 70 70 70
53478-106 106 106 190 142 34 226 170 11 242 186 14
53479-246 190 14 246 190 14 246 190 14 154 114 10
53480- 6 6 6 74 74 74 226 226 226 253 253 253
53481-253 253 253 253 253 253 253 253 253 253 253 253
53482-253 253 253 253 253 253 231 231 231 250 250 250
53483-253 253 253 253 253 253 253 253 253 253 253 253
53484-253 253 253 253 253 253 253 253 253 253 253 253
53485-253 253 253 253 253 253 253 253 253 253 253 253
53486-253 253 253 253 253 253 253 253 253 228 184 62
53487-241 196 14 241 208 19 232 195 16 38 30 10
53488- 2 2 6 2 2 6 2 2 6 2 2 6
53489- 2 2 6 6 6 6 30 30 30 26 26 26
53490-203 166 17 154 142 90 66 66 66 26 26 26
53491- 6 6 6 0 0 0 0 0 0 0 0 0
53492- 0 0 0 0 0 0 0 0 0 0 0 0
53493- 0 0 0 0 0 0 0 0 0 0 0 0
53494- 0 0 0 0 0 0 0 0 0 0 0 0
53495- 0 0 0 0 0 0 0 0 0 0 0 0
53496- 6 6 6 18 18 18 38 38 38 58 58 58
53497- 78 78 78 86 86 86 101 101 101 123 123 123
53498-175 146 61 210 150 10 234 174 13 246 186 14
53499-246 190 14 246 190 14 246 190 14 238 190 10
53500-102 78 10 2 2 6 46 46 46 198 198 198
53501-253 253 253 253 253 253 253 253 253 253 253 253
53502-253 253 253 253 253 253 234 234 234 242 242 242
53503-253 253 253 253 253 253 253 253 253 253 253 253
53504-253 253 253 253 253 253 253 253 253 253 253 253
53505-253 253 253 253 253 253 253 253 253 253 253 253
53506-253 253 253 253 253 253 253 253 253 224 178 62
53507-242 186 14 241 196 14 210 166 10 22 18 6
53508- 2 2 6 2 2 6 2 2 6 2 2 6
53509- 2 2 6 2 2 6 6 6 6 121 92 8
53510-238 202 15 232 195 16 82 82 82 34 34 34
53511- 10 10 10 0 0 0 0 0 0 0 0 0
53512- 0 0 0 0 0 0 0 0 0 0 0 0
53513- 0 0 0 0 0 0 0 0 0 0 0 0
53514- 0 0 0 0 0 0 0 0 0 0 0 0
53515- 0 0 0 0 0 0 0 0 0 0 0 0
53516- 14 14 14 38 38 38 70 70 70 154 122 46
53517-190 142 34 200 144 11 197 138 11 197 138 11
53518-213 154 11 226 170 11 242 186 14 246 190 14
53519-246 190 14 246 190 14 246 190 14 246 190 14
53520-225 175 15 46 32 6 2 2 6 22 22 22
53521-158 158 158 250 250 250 253 253 253 253 253 253
53522-253 253 253 253 253 253 253 253 253 253 253 253
53523-253 253 253 253 253 253 253 253 253 253 253 253
53524-253 253 253 253 253 253 253 253 253 253 253 253
53525-253 253 253 253 253 253 253 253 253 253 253 253
53526-253 253 253 250 250 250 242 242 242 224 178 62
53527-239 182 13 236 186 11 213 154 11 46 32 6
53528- 2 2 6 2 2 6 2 2 6 2 2 6
53529- 2 2 6 2 2 6 61 42 6 225 175 15
53530-238 190 10 236 186 11 112 100 78 42 42 42
53531- 14 14 14 0 0 0 0 0 0 0 0 0
53532- 0 0 0 0 0 0 0 0 0 0 0 0
53533- 0 0 0 0 0 0 0 0 0 0 0 0
53534- 0 0 0 0 0 0 0 0 0 0 0 0
53535- 0 0 0 0 0 0 0 0 0 6 6 6
53536- 22 22 22 54 54 54 154 122 46 213 154 11
53537-226 170 11 230 174 11 226 170 11 226 170 11
53538-236 178 12 242 186 14 246 190 14 246 190 14
53539-246 190 14 246 190 14 246 190 14 246 190 14
53540-241 196 14 184 144 12 10 10 10 2 2 6
53541- 6 6 6 116 116 116 242 242 242 253 253 253
53542-253 253 253 253 253 253 253 253 253 253 253 253
53543-253 253 253 253 253 253 253 253 253 253 253 253
53544-253 253 253 253 253 253 253 253 253 253 253 253
53545-253 253 253 253 253 253 253 253 253 253 253 253
53546-253 253 253 231 231 231 198 198 198 214 170 54
53547-236 178 12 236 178 12 210 150 10 137 92 6
53548- 18 14 6 2 2 6 2 2 6 2 2 6
53549- 6 6 6 70 47 6 200 144 11 236 178 12
53550-239 182 13 239 182 13 124 112 88 58 58 58
53551- 22 22 22 6 6 6 0 0 0 0 0 0
53552- 0 0 0 0 0 0 0 0 0 0 0 0
53553- 0 0 0 0 0 0 0 0 0 0 0 0
53554- 0 0 0 0 0 0 0 0 0 0 0 0
53555- 0 0 0 0 0 0 0 0 0 10 10 10
53556- 30 30 30 70 70 70 180 133 36 226 170 11
53557-239 182 13 242 186 14 242 186 14 246 186 14
53558-246 190 14 246 190 14 246 190 14 246 190 14
53559-246 190 14 246 190 14 246 190 14 246 190 14
53560-246 190 14 232 195 16 98 70 6 2 2 6
53561- 2 2 6 2 2 6 66 66 66 221 221 221
53562-253 253 253 253 253 253 253 253 253 253 253 253
53563-253 253 253 253 253 253 253 253 253 253 253 253
53564-253 253 253 253 253 253 253 253 253 253 253 253
53565-253 253 253 253 253 253 253 253 253 253 253 253
53566-253 253 253 206 206 206 198 198 198 214 166 58
53567-230 174 11 230 174 11 216 158 10 192 133 9
53568-163 110 8 116 81 8 102 78 10 116 81 8
53569-167 114 7 197 138 11 226 170 11 239 182 13
53570-242 186 14 242 186 14 162 146 94 78 78 78
53571- 34 34 34 14 14 14 6 6 6 0 0 0
53572- 0 0 0 0 0 0 0 0 0 0 0 0
53573- 0 0 0 0 0 0 0 0 0 0 0 0
53574- 0 0 0 0 0 0 0 0 0 0 0 0
53575- 0 0 0 0 0 0 0 0 0 6 6 6
53576- 30 30 30 78 78 78 190 142 34 226 170 11
53577-239 182 13 246 190 14 246 190 14 246 190 14
53578-246 190 14 246 190 14 246 190 14 246 190 14
53579-246 190 14 246 190 14 246 190 14 246 190 14
53580-246 190 14 241 196 14 203 166 17 22 18 6
53581- 2 2 6 2 2 6 2 2 6 38 38 38
53582-218 218 218 253 253 253 253 253 253 253 253 253
53583-253 253 253 253 253 253 253 253 253 253 253 253
53584-253 253 253 253 253 253 253 253 253 253 253 253
53585-253 253 253 253 253 253 253 253 253 253 253 253
53586-250 250 250 206 206 206 198 198 198 202 162 69
53587-226 170 11 236 178 12 224 166 10 210 150 10
53588-200 144 11 197 138 11 192 133 9 197 138 11
53589-210 150 10 226 170 11 242 186 14 246 190 14
53590-246 190 14 246 186 14 225 175 15 124 112 88
53591- 62 62 62 30 30 30 14 14 14 6 6 6
53592- 0 0 0 0 0 0 0 0 0 0 0 0
53593- 0 0 0 0 0 0 0 0 0 0 0 0
53594- 0 0 0 0 0 0 0 0 0 0 0 0
53595- 0 0 0 0 0 0 0 0 0 10 10 10
53596- 30 30 30 78 78 78 174 135 50 224 166 10
53597-239 182 13 246 190 14 246 190 14 246 190 14
53598-246 190 14 246 190 14 246 190 14 246 190 14
53599-246 190 14 246 190 14 246 190 14 246 190 14
53600-246 190 14 246 190 14 241 196 14 139 102 15
53601- 2 2 6 2 2 6 2 2 6 2 2 6
53602- 78 78 78 250 250 250 253 253 253 253 253 253
53603-253 253 253 253 253 253 253 253 253 253 253 253
53604-253 253 253 253 253 253 253 253 253 253 253 253
53605-253 253 253 253 253 253 253 253 253 253 253 253
53606-250 250 250 214 214 214 198 198 198 190 150 46
53607-219 162 10 236 178 12 234 174 13 224 166 10
53608-216 158 10 213 154 11 213 154 11 216 158 10
53609-226 170 11 239 182 13 246 190 14 246 190 14
53610-246 190 14 246 190 14 242 186 14 206 162 42
53611-101 101 101 58 58 58 30 30 30 14 14 14
53612- 6 6 6 0 0 0 0 0 0 0 0 0
53613- 0 0 0 0 0 0 0 0 0 0 0 0
53614- 0 0 0 0 0 0 0 0 0 0 0 0
53615- 0 0 0 0 0 0 0 0 0 10 10 10
53616- 30 30 30 74 74 74 174 135 50 216 158 10
53617-236 178 12 246 190 14 246 190 14 246 190 14
53618-246 190 14 246 190 14 246 190 14 246 190 14
53619-246 190 14 246 190 14 246 190 14 246 190 14
53620-246 190 14 246 190 14 241 196 14 226 184 13
53621- 61 42 6 2 2 6 2 2 6 2 2 6
53622- 22 22 22 238 238 238 253 253 253 253 253 253
53623-253 253 253 253 253 253 253 253 253 253 253 253
53624-253 253 253 253 253 253 253 253 253 253 253 253
53625-253 253 253 253 253 253 253 253 253 253 253 253
53626-253 253 253 226 226 226 187 187 187 180 133 36
53627-216 158 10 236 178 12 239 182 13 236 178 12
53628-230 174 11 226 170 11 226 170 11 230 174 11
53629-236 178 12 242 186 14 246 190 14 246 190 14
53630-246 190 14 246 190 14 246 186 14 239 182 13
53631-206 162 42 106 106 106 66 66 66 34 34 34
53632- 14 14 14 6 6 6 0 0 0 0 0 0
53633- 0 0 0 0 0 0 0 0 0 0 0 0
53634- 0 0 0 0 0 0 0 0 0 0 0 0
53635- 0 0 0 0 0 0 0 0 0 6 6 6
53636- 26 26 26 70 70 70 163 133 67 213 154 11
53637-236 178 12 246 190 14 246 190 14 246 190 14
53638-246 190 14 246 190 14 246 190 14 246 190 14
53639-246 190 14 246 190 14 246 190 14 246 190 14
53640-246 190 14 246 190 14 246 190 14 241 196 14
53641-190 146 13 18 14 6 2 2 6 2 2 6
53642- 46 46 46 246 246 246 253 253 253 253 253 253
53643-253 253 253 253 253 253 253 253 253 253 253 253
53644-253 253 253 253 253 253 253 253 253 253 253 253
53645-253 253 253 253 253 253 253 253 253 253 253 253
53646-253 253 253 221 221 221 86 86 86 156 107 11
53647-216 158 10 236 178 12 242 186 14 246 186 14
53648-242 186 14 239 182 13 239 182 13 242 186 14
53649-242 186 14 246 186 14 246 190 14 246 190 14
53650-246 190 14 246 190 14 246 190 14 246 190 14
53651-242 186 14 225 175 15 142 122 72 66 66 66
53652- 30 30 30 10 10 10 0 0 0 0 0 0
53653- 0 0 0 0 0 0 0 0 0 0 0 0
53654- 0 0 0 0 0 0 0 0 0 0 0 0
53655- 0 0 0 0 0 0 0 0 0 6 6 6
53656- 26 26 26 70 70 70 163 133 67 210 150 10
53657-236 178 12 246 190 14 246 190 14 246 190 14
53658-246 190 14 246 190 14 246 190 14 246 190 14
53659-246 190 14 246 190 14 246 190 14 246 190 14
53660-246 190 14 246 190 14 246 190 14 246 190 14
53661-232 195 16 121 92 8 34 34 34 106 106 106
53662-221 221 221 253 253 253 253 253 253 253 253 253
53663-253 253 253 253 253 253 253 253 253 253 253 253
53664-253 253 253 253 253 253 253 253 253 253 253 253
53665-253 253 253 253 253 253 253 253 253 253 253 253
53666-242 242 242 82 82 82 18 14 6 163 110 8
53667-216 158 10 236 178 12 242 186 14 246 190 14
53668-246 190 14 246 190 14 246 190 14 246 190 14
53669-246 190 14 246 190 14 246 190 14 246 190 14
53670-246 190 14 246 190 14 246 190 14 246 190 14
53671-246 190 14 246 190 14 242 186 14 163 133 67
53672- 46 46 46 18 18 18 6 6 6 0 0 0
53673- 0 0 0 0 0 0 0 0 0 0 0 0
53674- 0 0 0 0 0 0 0 0 0 0 0 0
53675- 0 0 0 0 0 0 0 0 0 10 10 10
53676- 30 30 30 78 78 78 163 133 67 210 150 10
53677-236 178 12 246 186 14 246 190 14 246 190 14
53678-246 190 14 246 190 14 246 190 14 246 190 14
53679-246 190 14 246 190 14 246 190 14 246 190 14
53680-246 190 14 246 190 14 246 190 14 246 190 14
53681-241 196 14 215 174 15 190 178 144 253 253 253
53682-253 253 253 253 253 253 253 253 253 253 253 253
53683-253 253 253 253 253 253 253 253 253 253 253 253
53684-253 253 253 253 253 253 253 253 253 253 253 253
53685-253 253 253 253 253 253 253 253 253 218 218 218
53686- 58 58 58 2 2 6 22 18 6 167 114 7
53687-216 158 10 236 178 12 246 186 14 246 190 14
53688-246 190 14 246 190 14 246 190 14 246 190 14
53689-246 190 14 246 190 14 246 190 14 246 190 14
53690-246 190 14 246 190 14 246 190 14 246 190 14
53691-246 190 14 246 186 14 242 186 14 190 150 46
53692- 54 54 54 22 22 22 6 6 6 0 0 0
53693- 0 0 0 0 0 0 0 0 0 0 0 0
53694- 0 0 0 0 0 0 0 0 0 0 0 0
53695- 0 0 0 0 0 0 0 0 0 14 14 14
53696- 38 38 38 86 86 86 180 133 36 213 154 11
53697-236 178 12 246 186 14 246 190 14 246 190 14
53698-246 190 14 246 190 14 246 190 14 246 190 14
53699-246 190 14 246 190 14 246 190 14 246 190 14
53700-246 190 14 246 190 14 246 190 14 246 190 14
53701-246 190 14 232 195 16 190 146 13 214 214 214
53702-253 253 253 253 253 253 253 253 253 253 253 253
53703-253 253 253 253 253 253 253 253 253 253 253 253
53704-253 253 253 253 253 253 253 253 253 253 253 253
53705-253 253 253 250 250 250 170 170 170 26 26 26
53706- 2 2 6 2 2 6 37 26 9 163 110 8
53707-219 162 10 239 182 13 246 186 14 246 190 14
53708-246 190 14 246 190 14 246 190 14 246 190 14
53709-246 190 14 246 190 14 246 190 14 246 190 14
53710-246 190 14 246 190 14 246 190 14 246 190 14
53711-246 186 14 236 178 12 224 166 10 142 122 72
53712- 46 46 46 18 18 18 6 6 6 0 0 0
53713- 0 0 0 0 0 0 0 0 0 0 0 0
53714- 0 0 0 0 0 0 0 0 0 0 0 0
53715- 0 0 0 0 0 0 6 6 6 18 18 18
53716- 50 50 50 109 106 95 192 133 9 224 166 10
53717-242 186 14 246 190 14 246 190 14 246 190 14
53718-246 190 14 246 190 14 246 190 14 246 190 14
53719-246 190 14 246 190 14 246 190 14 246 190 14
53720-246 190 14 246 190 14 246 190 14 246 190 14
53721-242 186 14 226 184 13 210 162 10 142 110 46
53722-226 226 226 253 253 253 253 253 253 253 253 253
53723-253 253 253 253 253 253 253 253 253 253 253 253
53724-253 253 253 253 253 253 253 253 253 253 253 253
53725-198 198 198 66 66 66 2 2 6 2 2 6
53726- 2 2 6 2 2 6 50 34 6 156 107 11
53727-219 162 10 239 182 13 246 186 14 246 190 14
53728-246 190 14 246 190 14 246 190 14 246 190 14
53729-246 190 14 246 190 14 246 190 14 246 190 14
53730-246 190 14 246 190 14 246 190 14 242 186 14
53731-234 174 13 213 154 11 154 122 46 66 66 66
53732- 30 30 30 10 10 10 0 0 0 0 0 0
53733- 0 0 0 0 0 0 0 0 0 0 0 0
53734- 0 0 0 0 0 0 0 0 0 0 0 0
53735- 0 0 0 0 0 0 6 6 6 22 22 22
53736- 58 58 58 154 121 60 206 145 10 234 174 13
53737-242 186 14 246 186 14 246 190 14 246 190 14
53738-246 190 14 246 190 14 246 190 14 246 190 14
53739-246 190 14 246 190 14 246 190 14 246 190 14
53740-246 190 14 246 190 14 246 190 14 246 190 14
53741-246 186 14 236 178 12 210 162 10 163 110 8
53742- 61 42 6 138 138 138 218 218 218 250 250 250
53743-253 253 253 253 253 253 253 253 253 250 250 250
53744-242 242 242 210 210 210 144 144 144 66 66 66
53745- 6 6 6 2 2 6 2 2 6 2 2 6
53746- 2 2 6 2 2 6 61 42 6 163 110 8
53747-216 158 10 236 178 12 246 190 14 246 190 14
53748-246 190 14 246 190 14 246 190 14 246 190 14
53749-246 190 14 246 190 14 246 190 14 246 190 14
53750-246 190 14 239 182 13 230 174 11 216 158 10
53751-190 142 34 124 112 88 70 70 70 38 38 38
53752- 18 18 18 6 6 6 0 0 0 0 0 0
53753- 0 0 0 0 0 0 0 0 0 0 0 0
53754- 0 0 0 0 0 0 0 0 0 0 0 0
53755- 0 0 0 0 0 0 6 6 6 22 22 22
53756- 62 62 62 168 124 44 206 145 10 224 166 10
53757-236 178 12 239 182 13 242 186 14 242 186 14
53758-246 186 14 246 190 14 246 190 14 246 190 14
53759-246 190 14 246 190 14 246 190 14 246 190 14
53760-246 190 14 246 190 14 246 190 14 246 190 14
53761-246 190 14 236 178 12 216 158 10 175 118 6
53762- 80 54 7 2 2 6 6 6 6 30 30 30
53763- 54 54 54 62 62 62 50 50 50 38 38 38
53764- 14 14 14 2 2 6 2 2 6 2 2 6
53765- 2 2 6 2 2 6 2 2 6 2 2 6
53766- 2 2 6 6 6 6 80 54 7 167 114 7
53767-213 154 11 236 178 12 246 190 14 246 190 14
53768-246 190 14 246 190 14 246 190 14 246 190 14
53769-246 190 14 242 186 14 239 182 13 239 182 13
53770-230 174 11 210 150 10 174 135 50 124 112 88
53771- 82 82 82 54 54 54 34 34 34 18 18 18
53772- 6 6 6 0 0 0 0 0 0 0 0 0
53773- 0 0 0 0 0 0 0 0 0 0 0 0
53774- 0 0 0 0 0 0 0 0 0 0 0 0
53775- 0 0 0 0 0 0 6 6 6 18 18 18
53776- 50 50 50 158 118 36 192 133 9 200 144 11
53777-216 158 10 219 162 10 224 166 10 226 170 11
53778-230 174 11 236 178 12 239 182 13 239 182 13
53779-242 186 14 246 186 14 246 190 14 246 190 14
53780-246 190 14 246 190 14 246 190 14 246 190 14
53781-246 186 14 230 174 11 210 150 10 163 110 8
53782-104 69 6 10 10 10 2 2 6 2 2 6
53783- 2 2 6 2 2 6 2 2 6 2 2 6
53784- 2 2 6 2 2 6 2 2 6 2 2 6
53785- 2 2 6 2 2 6 2 2 6 2 2 6
53786- 2 2 6 6 6 6 91 60 6 167 114 7
53787-206 145 10 230 174 11 242 186 14 246 190 14
53788-246 190 14 246 190 14 246 186 14 242 186 14
53789-239 182 13 230 174 11 224 166 10 213 154 11
53790-180 133 36 124 112 88 86 86 86 58 58 58
53791- 38 38 38 22 22 22 10 10 10 6 6 6
53792- 0 0 0 0 0 0 0 0 0 0 0 0
53793- 0 0 0 0 0 0 0 0 0 0 0 0
53794- 0 0 0 0 0 0 0 0 0 0 0 0
53795- 0 0 0 0 0 0 0 0 0 14 14 14
53796- 34 34 34 70 70 70 138 110 50 158 118 36
53797-167 114 7 180 123 7 192 133 9 197 138 11
53798-200 144 11 206 145 10 213 154 11 219 162 10
53799-224 166 10 230 174 11 239 182 13 242 186 14
53800-246 186 14 246 186 14 246 186 14 246 186 14
53801-239 182 13 216 158 10 185 133 11 152 99 6
53802-104 69 6 18 14 6 2 2 6 2 2 6
53803- 2 2 6 2 2 6 2 2 6 2 2 6
53804- 2 2 6 2 2 6 2 2 6 2 2 6
53805- 2 2 6 2 2 6 2 2 6 2 2 6
53806- 2 2 6 6 6 6 80 54 7 152 99 6
53807-192 133 9 219 162 10 236 178 12 239 182 13
53808-246 186 14 242 186 14 239 182 13 236 178 12
53809-224 166 10 206 145 10 192 133 9 154 121 60
53810- 94 94 94 62 62 62 42 42 42 22 22 22
53811- 14 14 14 6 6 6 0 0 0 0 0 0
53812- 0 0 0 0 0 0 0 0 0 0 0 0
53813- 0 0 0 0 0 0 0 0 0 0 0 0
53814- 0 0 0 0 0 0 0 0 0 0 0 0
53815- 0 0 0 0 0 0 0 0 0 6 6 6
53816- 18 18 18 34 34 34 58 58 58 78 78 78
53817-101 98 89 124 112 88 142 110 46 156 107 11
53818-163 110 8 167 114 7 175 118 6 180 123 7
53819-185 133 11 197 138 11 210 150 10 219 162 10
53820-226 170 11 236 178 12 236 178 12 234 174 13
53821-219 162 10 197 138 11 163 110 8 130 83 6
53822- 91 60 6 10 10 10 2 2 6 2 2 6
53823- 18 18 18 38 38 38 38 38 38 38 38 38
53824- 38 38 38 38 38 38 38 38 38 38 38 38
53825- 38 38 38 38 38 38 26 26 26 2 2 6
53826- 2 2 6 6 6 6 70 47 6 137 92 6
53827-175 118 6 200 144 11 219 162 10 230 174 11
53828-234 174 13 230 174 11 219 162 10 210 150 10
53829-192 133 9 163 110 8 124 112 88 82 82 82
53830- 50 50 50 30 30 30 14 14 14 6 6 6
53831- 0 0 0 0 0 0 0 0 0 0 0 0
53832- 0 0 0 0 0 0 0 0 0 0 0 0
53833- 0 0 0 0 0 0 0 0 0 0 0 0
53834- 0 0 0 0 0 0 0 0 0 0 0 0
53835- 0 0 0 0 0 0 0 0 0 0 0 0
53836- 6 6 6 14 14 14 22 22 22 34 34 34
53837- 42 42 42 58 58 58 74 74 74 86 86 86
53838-101 98 89 122 102 70 130 98 46 121 87 25
53839-137 92 6 152 99 6 163 110 8 180 123 7
53840-185 133 11 197 138 11 206 145 10 200 144 11
53841-180 123 7 156 107 11 130 83 6 104 69 6
53842- 50 34 6 54 54 54 110 110 110 101 98 89
53843- 86 86 86 82 82 82 78 78 78 78 78 78
53844- 78 78 78 78 78 78 78 78 78 78 78 78
53845- 78 78 78 82 82 82 86 86 86 94 94 94
53846-106 106 106 101 101 101 86 66 34 124 80 6
53847-156 107 11 180 123 7 192 133 9 200 144 11
53848-206 145 10 200 144 11 192 133 9 175 118 6
53849-139 102 15 109 106 95 70 70 70 42 42 42
53850- 22 22 22 10 10 10 0 0 0 0 0 0
53851- 0 0 0 0 0 0 0 0 0 0 0 0
53852- 0 0 0 0 0 0 0 0 0 0 0 0
53853- 0 0 0 0 0 0 0 0 0 0 0 0
53854- 0 0 0 0 0 0 0 0 0 0 0 0
53855- 0 0 0 0 0 0 0 0 0 0 0 0
53856- 0 0 0 0 0 0 6 6 6 10 10 10
53857- 14 14 14 22 22 22 30 30 30 38 38 38
53858- 50 50 50 62 62 62 74 74 74 90 90 90
53859-101 98 89 112 100 78 121 87 25 124 80 6
53860-137 92 6 152 99 6 152 99 6 152 99 6
53861-138 86 6 124 80 6 98 70 6 86 66 30
53862-101 98 89 82 82 82 58 58 58 46 46 46
53863- 38 38 38 34 34 34 34 34 34 34 34 34
53864- 34 34 34 34 34 34 34 34 34 34 34 34
53865- 34 34 34 34 34 34 38 38 38 42 42 42
53866- 54 54 54 82 82 82 94 86 76 91 60 6
53867-134 86 6 156 107 11 167 114 7 175 118 6
53868-175 118 6 167 114 7 152 99 6 121 87 25
53869-101 98 89 62 62 62 34 34 34 18 18 18
53870- 6 6 6 0 0 0 0 0 0 0 0 0
53871- 0 0 0 0 0 0 0 0 0 0 0 0
53872- 0 0 0 0 0 0 0 0 0 0 0 0
53873- 0 0 0 0 0 0 0 0 0 0 0 0
53874- 0 0 0 0 0 0 0 0 0 0 0 0
53875- 0 0 0 0 0 0 0 0 0 0 0 0
53876- 0 0 0 0 0 0 0 0 0 0 0 0
53877- 0 0 0 6 6 6 6 6 6 10 10 10
53878- 18 18 18 22 22 22 30 30 30 42 42 42
53879- 50 50 50 66 66 66 86 86 86 101 98 89
53880-106 86 58 98 70 6 104 69 6 104 69 6
53881-104 69 6 91 60 6 82 62 34 90 90 90
53882- 62 62 62 38 38 38 22 22 22 14 14 14
53883- 10 10 10 10 10 10 10 10 10 10 10 10
53884- 10 10 10 10 10 10 6 6 6 10 10 10
53885- 10 10 10 10 10 10 10 10 10 14 14 14
53886- 22 22 22 42 42 42 70 70 70 89 81 66
53887- 80 54 7 104 69 6 124 80 6 137 92 6
53888-134 86 6 116 81 8 100 82 52 86 86 86
53889- 58 58 58 30 30 30 14 14 14 6 6 6
53890- 0 0 0 0 0 0 0 0 0 0 0 0
53891- 0 0 0 0 0 0 0 0 0 0 0 0
53892- 0 0 0 0 0 0 0 0 0 0 0 0
53893- 0 0 0 0 0 0 0 0 0 0 0 0
53894- 0 0 0 0 0 0 0 0 0 0 0 0
53895- 0 0 0 0 0 0 0 0 0 0 0 0
53896- 0 0 0 0 0 0 0 0 0 0 0 0
53897- 0 0 0 0 0 0 0 0 0 0 0 0
53898- 0 0 0 6 6 6 10 10 10 14 14 14
53899- 18 18 18 26 26 26 38 38 38 54 54 54
53900- 70 70 70 86 86 86 94 86 76 89 81 66
53901- 89 81 66 86 86 86 74 74 74 50 50 50
53902- 30 30 30 14 14 14 6 6 6 0 0 0
53903- 0 0 0 0 0 0 0 0 0 0 0 0
53904- 0 0 0 0 0 0 0 0 0 0 0 0
53905- 0 0 0 0 0 0 0 0 0 0 0 0
53906- 6 6 6 18 18 18 34 34 34 58 58 58
53907- 82 82 82 89 81 66 89 81 66 89 81 66
53908- 94 86 66 94 86 76 74 74 74 50 50 50
53909- 26 26 26 14 14 14 6 6 6 0 0 0
53910- 0 0 0 0 0 0 0 0 0 0 0 0
53911- 0 0 0 0 0 0 0 0 0 0 0 0
53912- 0 0 0 0 0 0 0 0 0 0 0 0
53913- 0 0 0 0 0 0 0 0 0 0 0 0
53914- 0 0 0 0 0 0 0 0 0 0 0 0
53915- 0 0 0 0 0 0 0 0 0 0 0 0
53916- 0 0 0 0 0 0 0 0 0 0 0 0
53917- 0 0 0 0 0 0 0 0 0 0 0 0
53918- 0 0 0 0 0 0 0 0 0 0 0 0
53919- 6 6 6 6 6 6 14 14 14 18 18 18
53920- 30 30 30 38 38 38 46 46 46 54 54 54
53921- 50 50 50 42 42 42 30 30 30 18 18 18
53922- 10 10 10 0 0 0 0 0 0 0 0 0
53923- 0 0 0 0 0 0 0 0 0 0 0 0
53924- 0 0 0 0 0 0 0 0 0 0 0 0
53925- 0 0 0 0 0 0 0 0 0 0 0 0
53926- 0 0 0 6 6 6 14 14 14 26 26 26
53927- 38 38 38 50 50 50 58 58 58 58 58 58
53928- 54 54 54 42 42 42 30 30 30 18 18 18
53929- 10 10 10 0 0 0 0 0 0 0 0 0
53930- 0 0 0 0 0 0 0 0 0 0 0 0
53931- 0 0 0 0 0 0 0 0 0 0 0 0
53932- 0 0 0 0 0 0 0 0 0 0 0 0
53933- 0 0 0 0 0 0 0 0 0 0 0 0
53934- 0 0 0 0 0 0 0 0 0 0 0 0
53935- 0 0 0 0 0 0 0 0 0 0 0 0
53936- 0 0 0 0 0 0 0 0 0 0 0 0
53937- 0 0 0 0 0 0 0 0 0 0 0 0
53938- 0 0 0 0 0 0 0 0 0 0 0 0
53939- 0 0 0 0 0 0 0 0 0 6 6 6
53940- 6 6 6 10 10 10 14 14 14 18 18 18
53941- 18 18 18 14 14 14 10 10 10 6 6 6
53942- 0 0 0 0 0 0 0 0 0 0 0 0
53943- 0 0 0 0 0 0 0 0 0 0 0 0
53944- 0 0 0 0 0 0 0 0 0 0 0 0
53945- 0 0 0 0 0 0 0 0 0 0 0 0
53946- 0 0 0 0 0 0 0 0 0 6 6 6
53947- 14 14 14 18 18 18 22 22 22 22 22 22
53948- 18 18 18 14 14 14 10 10 10 6 6 6
53949- 0 0 0 0 0 0 0 0 0 0 0 0
53950- 0 0 0 0 0 0 0 0 0 0 0 0
53951- 0 0 0 0 0 0 0 0 0 0 0 0
53952- 0 0 0 0 0 0 0 0 0 0 0 0
53953- 0 0 0 0 0 0 0 0 0 0 0 0
53954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53967+4 4 4 4 4 4
53968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53981+4 4 4 4 4 4
53982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53995+4 4 4 4 4 4
53996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
53999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54009+4 4 4 4 4 4
54010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54023+4 4 4 4 4 4
54024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54037+4 4 4 4 4 4
54038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54042+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
54043+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
54044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54047+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
54048+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
54049+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
54050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54051+4 4 4 4 4 4
54052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54056+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
54057+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
54058+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54061+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
54062+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
54063+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
54064+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54065+4 4 4 4 4 4
54066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54070+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
54071+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
54072+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
54073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54075+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
54076+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
54077+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
54078+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
54079+4 4 4 4 4 4
54080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54083+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
54084+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
54085+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
54086+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
54087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54088+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
54089+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
54090+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
54091+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
54092+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
54093+4 4 4 4 4 4
54094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54097+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
54098+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
54099+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
54100+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
54101+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54102+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
54103+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
54104+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
54105+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
54106+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
54107+4 4 4 4 4 4
54108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54111+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
54112+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
54113+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
54114+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
54115+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
54116+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
54117+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
54118+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
54119+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
54120+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
54121+4 4 4 4 4 4
54122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54124+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
54125+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
54126+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
54127+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
54128+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
54129+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
54130+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
54131+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
54132+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
54133+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
54134+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
54135+4 4 4 4 4 4
54136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54138+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
54139+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
54140+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
54141+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
54142+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
54143+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
54144+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
54145+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
54146+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
54147+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
54148+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
54149+4 4 4 4 4 4
54150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54152+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
54153+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
54154+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
54155+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
54156+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
54157+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
54158+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
54159+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
54160+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
54161+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
54162+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
54163+4 4 4 4 4 4
54164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54166+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
54167+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
54168+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
54169+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
54170+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
54171+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
54172+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
54173+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
54174+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
54175+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
54176+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
54177+4 4 4 4 4 4
54178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54179+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
54180+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
54181+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
54182+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
54183+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
54184+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
54185+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
54186+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
54187+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
54188+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
54189+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
54190+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
54191+4 4 4 4 4 4
54192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54193+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
54194+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
54195+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
54196+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
54197+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
54198+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
54199+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
54200+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
54201+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
54202+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
54203+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
54204+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
54205+0 0 0 4 4 4
54206+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
54207+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
54208+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
54209+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
54210+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
54211+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
54212+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
54213+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
54214+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
54215+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
54216+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
54217+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
54218+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
54219+2 0 0 0 0 0
54220+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
54221+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
54222+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
54223+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
54224+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
54225+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
54226+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
54227+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
54228+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
54229+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
54230+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
54231+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
54232+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
54233+37 38 37 0 0 0
54234+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
54235+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
54236+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
54237+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
54238+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
54239+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
54240+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
54241+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
54242+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
54243+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
54244+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
54245+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
54246+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
54247+85 115 134 4 0 0
54248+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
54249+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
54250+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
54251+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
54252+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
54253+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
54254+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
54255+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
54256+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
54257+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
54258+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
54259+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
54260+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
54261+60 73 81 4 0 0
54262+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
54263+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
54264+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
54265+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
54266+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
54267+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
54268+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
54269+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
54270+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
54271+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
54272+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
54273+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
54274+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
54275+16 19 21 4 0 0
54276+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
54277+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
54278+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
54279+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
54280+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
54281+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
54282+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
54283+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
54284+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
54285+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
54286+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
54287+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
54288+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
54289+4 0 0 4 3 3
54290+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
54291+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
54292+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
54293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
54294+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
54295+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
54296+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
54297+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
54298+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
54299+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
54300+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
54301+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
54302+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
54303+3 2 2 4 4 4
54304+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
54305+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
54306+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
54307+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
54308+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
54309+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
54310+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
54311+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
54312+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
54313+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
54314+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
54315+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
54316+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
54317+4 4 4 4 4 4
54318+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
54319+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
54320+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
54321+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
54322+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
54323+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
54324+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
54325+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
54326+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
54327+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
54328+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
54329+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
54330+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
54331+4 4 4 4 4 4
54332+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
54333+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
54334+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
54335+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
54336+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
54337+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
54338+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
54339+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
54340+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
54341+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
54342+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
54343+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
54344+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
54345+5 5 5 5 5 5
54346+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
54347+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
54348+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
54349+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
54350+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
54351+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
54352+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
54353+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
54354+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
54355+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
54356+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
54357+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
54358+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
54359+5 5 5 4 4 4
54360+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
54361+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
54362+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
54363+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
54364+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
54365+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
54366+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
54367+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
54368+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
54369+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
54370+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
54371+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
54372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54373+4 4 4 4 4 4
54374+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
54375+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
54376+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
54377+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
54378+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
54379+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
54380+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
54381+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
54382+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
54383+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
54384+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
54385+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
54386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54387+4 4 4 4 4 4
54388+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
54389+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
54390+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
54391+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
54392+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
54393+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
54394+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
54395+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
54396+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
54397+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
54398+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
54399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54401+4 4 4 4 4 4
54402+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
54403+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
54404+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
54405+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
54406+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
54407+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
54408+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
54409+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
54410+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
54411+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
54412+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
54413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54415+4 4 4 4 4 4
54416+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
54417+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
54418+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
54419+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
54420+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
54421+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
54422+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
54423+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
54424+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
54425+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
54426+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54429+4 4 4 4 4 4
54430+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
54431+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
54432+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
54433+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
54434+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
54435+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
54436+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
54437+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
54438+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
54439+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
54440+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
54441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54443+4 4 4 4 4 4
54444+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
54445+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
54446+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
54447+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
54448+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
54449+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
54450+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
54451+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
54452+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
54453+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
54454+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
54455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54457+4 4 4 4 4 4
54458+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
54459+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
54460+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
54461+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
54462+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
54463+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
54464+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
54465+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
54466+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
54467+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
54468+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54471+4 4 4 4 4 4
54472+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
54473+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
54474+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
54475+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
54476+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
54477+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
54478+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
54479+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
54480+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
54481+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
54482+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54485+4 4 4 4 4 4
54486+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
54487+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
54488+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
54489+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
54490+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
54491+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
54492+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
54493+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
54494+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
54495+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
54496+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54499+4 4 4 4 4 4
54500+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
54501+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
54502+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
54503+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
54504+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
54505+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
54506+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
54507+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
54508+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
54509+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54510+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54513+4 4 4 4 4 4
54514+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
54515+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
54516+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
54517+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
54518+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
54519+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
54520+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
54521+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
54522+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
54523+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54524+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54527+4 4 4 4 4 4
54528+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
54529+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
54530+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
54531+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
54532+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
54533+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
54534+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
54535+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
54536+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
54537+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54538+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54541+4 4 4 4 4 4
54542+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
54543+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
54544+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
54545+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
54546+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
54547+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
54548+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
54549+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
54550+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
54551+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54552+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54555+4 4 4 4 4 4
54556+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
54557+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
54558+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
54559+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
54560+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
54561+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
54562+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
54563+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
54564+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
54565+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54566+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54569+4 4 4 4 4 4
54570+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
54571+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
54572+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
54573+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
54574+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
54575+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
54576+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
54577+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
54578+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
54579+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54580+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54583+4 4 4 4 4 4
54584+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
54585+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
54586+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
54587+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
54588+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
54589+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
54590+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
54591+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
54592+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
54593+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54594+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54597+4 4 4 4 4 4
54598+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
54599+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
54600+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
54601+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
54602+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
54603+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
54604+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
54605+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
54606+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
54607+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54608+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54611+4 4 4 4 4 4
54612+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
54613+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
54614+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
54615+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
54616+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
54617+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
54618+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
54619+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
54620+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
54621+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54622+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54625+4 4 4 4 4 4
54626+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
54627+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
54628+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
54629+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
54630+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
54631+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
54632+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
54633+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
54634+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
54635+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54636+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54639+4 4 4 4 4 4
54640+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
54641+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
54642+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
54643+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
54644+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
54645+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
54646+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
54647+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
54648+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
54649+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54650+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54653+4 4 4 4 4 4
54654+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
54655+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
54656+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
54657+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
54658+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
54659+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
54660+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
54661+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
54662+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
54663+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54664+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54667+4 4 4 4 4 4
54668+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
54669+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
54670+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
54671+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
54672+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
54673+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
54674+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
54675+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
54676+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
54677+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54678+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54681+4 4 4 4 4 4
54682+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
54683+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
54684+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
54685+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
54686+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
54687+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
54688+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
54689+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
54690+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
54691+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54692+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54695+4 4 4 4 4 4
54696+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
54697+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
54698+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
54699+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
54700+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
54701+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
54702+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
54703+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
54704+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
54705+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
54706+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54709+4 4 4 4 4 4
54710+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
54711+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
54712+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
54713+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
54714+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
54715+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
54716+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
54717+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
54718+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
54719+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
54720+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
54721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54723+4 4 4 4 4 4
54724+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
54725+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
54726+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
54727+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
54728+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
54729+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
54730+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
54731+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
54732+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
54733+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
54734+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54737+4 4 4 4 4 4
54738+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
54739+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
54740+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
54741+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
54742+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
54743+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
54744+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54745+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
54746+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
54747+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
54748+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54751+4 4 4 4 4 4
54752+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
54753+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
54754+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
54755+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
54756+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
54757+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
54758+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
54759+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
54760+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
54761+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
54762+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54765+4 4 4 4 4 4
54766+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
54767+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
54768+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
54769+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
54770+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
54771+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
54772+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
54773+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
54774+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
54775+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
54776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54779+4 4 4 4 4 4
54780+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
54781+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
54782+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
54783+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
54784+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
54785+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
54786+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
54787+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
54788+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
54789+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
54790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54793+4 4 4 4 4 4
54794+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
54795+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
54796+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
54797+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
54798+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
54799+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
54800+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
54801+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
54802+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
54803+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
54804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54807+4 4 4 4 4 4
54808+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
54809+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
54810+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
54811+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
54812+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
54813+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
54814+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
54815+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
54816+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
54817+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54821+4 4 4 4 4 4
54822+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
54823+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
54824+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
54825+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
54826+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
54827+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
54828+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
54829+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
54830+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
54831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54835+4 4 4 4 4 4
54836+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
54837+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
54838+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
54839+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
54840+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
54841+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
54842+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
54843+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
54844+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
54845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54849+4 4 4 4 4 4
54850+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
54851+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
54852+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
54853+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
54854+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
54855+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
54856+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
54857+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
54858+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54863+4 4 4 4 4 4
54864+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
54865+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
54866+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
54867+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
54868+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
54869+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
54870+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
54871+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
54872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54877+4 4 4 4 4 4
54878+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
54879+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
54880+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
54881+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
54882+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
54883+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
54884+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
54885+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
54886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54891+4 4 4 4 4 4
54892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54893+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
54894+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
54895+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
54896+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
54897+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
54898+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
54899+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
54900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54905+4 4 4 4 4 4
54906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54907+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
54908+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
54909+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
54910+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
54911+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
54912+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
54913+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
54914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54919+4 4 4 4 4 4
54920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54921+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
54922+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
54923+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
54924+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
54925+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
54926+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
54927+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54933+4 4 4 4 4 4
54934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54936+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
54937+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
54938+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
54939+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
54940+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
54941+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
54942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54947+4 4 4 4 4 4
54948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
54951+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
54952+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
54953+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
54954+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
54955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54961+4 4 4 4 4 4
54962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54965+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
54966+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
54967+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
54968+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
54969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54975+4 4 4 4 4 4
54976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54979+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
54980+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
54981+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
54982+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
54983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54989+4 4 4 4 4 4
54990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54993+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
54994+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
54995+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
54996+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
54997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
54999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55003+4 4 4 4 4 4
55004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
55008+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
55009+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
55010+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
55011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55017+4 4 4 4 4 4
55018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55022+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
55023+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
55024+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
55025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55031+4 4 4 4 4 4
55032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55036+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
55037+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
55038+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55045+4 4 4 4 4 4
55046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55050+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
55051+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
55052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55059+4 4 4 4 4 4
55060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55064+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
55065+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
55066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
55073+4 4 4 4 4 4
55074diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/matrox/matroxfb_DAC1064.c
55075index a01147f..5d896f8 100644
55076--- a/drivers/video/matrox/matroxfb_DAC1064.c
55077+++ b/drivers/video/matrox/matroxfb_DAC1064.c
55078@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55079
55080 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55081 struct matrox_switch matrox_mystique = {
55082- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55083+ .preinit = MGA1064_preinit,
55084+ .reset = MGA1064_reset,
55085+ .init = MGA1064_init,
55086+ .restore = MGA1064_restore,
55087 };
55088 EXPORT_SYMBOL(matrox_mystique);
55089 #endif
55090
55091 #ifdef CONFIG_FB_MATROX_G
55092 struct matrox_switch matrox_G100 = {
55093- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55094+ .preinit = MGAG100_preinit,
55095+ .reset = MGAG100_reset,
55096+ .init = MGAG100_init,
55097+ .restore = MGAG100_restore,
55098 };
55099 EXPORT_SYMBOL(matrox_G100);
55100 #endif
55101diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/matrox/matroxfb_Ti3026.c
55102index 195ad7c..09743fc 100644
55103--- a/drivers/video/matrox/matroxfb_Ti3026.c
55104+++ b/drivers/video/matrox/matroxfb_Ti3026.c
55105@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
55106 }
55107
55108 struct matrox_switch matrox_millennium = {
55109- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
55110+ .preinit = Ti3026_preinit,
55111+ .reset = Ti3026_reset,
55112+ .init = Ti3026_init,
55113+ .restore = Ti3026_restore
55114 };
55115 EXPORT_SYMBOL(matrox_millennium);
55116 #endif
55117diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
55118index fe92eed..106e085 100644
55119--- a/drivers/video/mb862xx/mb862xxfb_accel.c
55120+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
55121@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
55122 struct mb862xxfb_par *par = info->par;
55123
55124 if (info->var.bits_per_pixel == 32) {
55125- info->fbops->fb_fillrect = cfb_fillrect;
55126- info->fbops->fb_copyarea = cfb_copyarea;
55127- info->fbops->fb_imageblit = cfb_imageblit;
55128+ pax_open_kernel();
55129+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55130+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55131+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55132+ pax_close_kernel();
55133 } else {
55134 outreg(disp, GC_L0EM, 3);
55135- info->fbops->fb_fillrect = mb86290fb_fillrect;
55136- info->fbops->fb_copyarea = mb86290fb_copyarea;
55137- info->fbops->fb_imageblit = mb86290fb_imageblit;
55138+ pax_open_kernel();
55139+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
55140+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
55141+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
55142+ pax_close_kernel();
55143 }
55144 outreg(draw, GDC_REG_DRAW_BASE, 0);
55145 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
55146diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
55147index ff22871..b129bed 100644
55148--- a/drivers/video/nvidia/nvidia.c
55149+++ b/drivers/video/nvidia/nvidia.c
55150@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
55151 info->fix.line_length = (info->var.xres_virtual *
55152 info->var.bits_per_pixel) >> 3;
55153 if (info->var.accel_flags) {
55154- info->fbops->fb_imageblit = nvidiafb_imageblit;
55155- info->fbops->fb_fillrect = nvidiafb_fillrect;
55156- info->fbops->fb_copyarea = nvidiafb_copyarea;
55157- info->fbops->fb_sync = nvidiafb_sync;
55158+ pax_open_kernel();
55159+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
55160+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
55161+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
55162+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
55163+ pax_close_kernel();
55164 info->pixmap.scan_align = 4;
55165 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55166 info->flags |= FBINFO_READS_FAST;
55167 NVResetGraphics(info);
55168 } else {
55169- info->fbops->fb_imageblit = cfb_imageblit;
55170- info->fbops->fb_fillrect = cfb_fillrect;
55171- info->fbops->fb_copyarea = cfb_copyarea;
55172- info->fbops->fb_sync = NULL;
55173+ pax_open_kernel();
55174+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55175+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55176+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55177+ *(void **)&info->fbops->fb_sync = NULL;
55178+ pax_close_kernel();
55179 info->pixmap.scan_align = 1;
55180 info->flags |= FBINFO_HWACCEL_DISABLED;
55181 info->flags &= ~FBINFO_READS_FAST;
55182@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
55183 info->pixmap.size = 8 * 1024;
55184 info->pixmap.flags = FB_PIXMAP_SYSTEM;
55185
55186- if (!hwcur)
55187- info->fbops->fb_cursor = NULL;
55188+ if (!hwcur) {
55189+ pax_open_kernel();
55190+ *(void **)&info->fbops->fb_cursor = NULL;
55191+ pax_close_kernel();
55192+ }
55193
55194 info->var.accel_flags = (!noaccel);
55195
55196diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
55197index 669a81f..e216d76 100644
55198--- a/drivers/video/omap2/dss/display.c
55199+++ b/drivers/video/omap2/dss/display.c
55200@@ -137,12 +137,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
55201 snprintf(dssdev->alias, sizeof(dssdev->alias),
55202 "display%d", disp_num_counter++);
55203
55204+ pax_open_kernel();
55205 if (drv && drv->get_resolution == NULL)
55206- drv->get_resolution = omapdss_default_get_resolution;
55207+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
55208 if (drv && drv->get_recommended_bpp == NULL)
55209- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55210+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55211 if (drv && drv->get_timings == NULL)
55212- drv->get_timings = omapdss_default_get_timings;
55213+ *(void **)&drv->get_timings = omapdss_default_get_timings;
55214+ pax_close_kernel();
55215
55216 mutex_lock(&panel_list_mutex);
55217 list_add_tail(&dssdev->panel_list, &panel_list);
55218diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
55219index 83433cb..71e9b98 100644
55220--- a/drivers/video/s1d13xxxfb.c
55221+++ b/drivers/video/s1d13xxxfb.c
55222@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
55223
55224 switch(prod_id) {
55225 case S1D13506_PROD_ID: /* activate acceleration */
55226- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55227- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55228+ pax_open_kernel();
55229+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55230+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55231+ pax_close_kernel();
55232 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
55233 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
55234 break;
55235diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
55236index d513ed6..90b0de9 100644
55237--- a/drivers/video/smscufx.c
55238+++ b/drivers/video/smscufx.c
55239@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
55240 fb_deferred_io_cleanup(info);
55241 kfree(info->fbdefio);
55242 info->fbdefio = NULL;
55243- info->fbops->fb_mmap = ufx_ops_mmap;
55244+ pax_open_kernel();
55245+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
55246+ pax_close_kernel();
55247 }
55248
55249 pr_debug("released /dev/fb%d user=%d count=%d",
55250diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
55251index 025f14e..20eb4db 100644
55252--- a/drivers/video/udlfb.c
55253+++ b/drivers/video/udlfb.c
55254@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
55255 dlfb_urb_completion(urb);
55256
55257 error:
55258- atomic_add(bytes_sent, &dev->bytes_sent);
55259- atomic_add(bytes_identical, &dev->bytes_identical);
55260- atomic_add(width*height*2, &dev->bytes_rendered);
55261+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55262+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55263+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
55264 end_cycles = get_cycles();
55265- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55266+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55267 >> 10)), /* Kcycles */
55268 &dev->cpu_kcycles_used);
55269
55270@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
55271 dlfb_urb_completion(urb);
55272
55273 error:
55274- atomic_add(bytes_sent, &dev->bytes_sent);
55275- atomic_add(bytes_identical, &dev->bytes_identical);
55276- atomic_add(bytes_rendered, &dev->bytes_rendered);
55277+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55278+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55279+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
55280 end_cycles = get_cycles();
55281- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55282+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55283 >> 10)), /* Kcycles */
55284 &dev->cpu_kcycles_used);
55285 }
55286@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
55287 fb_deferred_io_cleanup(info);
55288 kfree(info->fbdefio);
55289 info->fbdefio = NULL;
55290- info->fbops->fb_mmap = dlfb_ops_mmap;
55291+ pax_open_kernel();
55292+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
55293+ pax_close_kernel();
55294 }
55295
55296 pr_warn("released /dev/fb%d user=%d count=%d\n",
55297@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
55298 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55299 struct dlfb_data *dev = fb_info->par;
55300 return snprintf(buf, PAGE_SIZE, "%u\n",
55301- atomic_read(&dev->bytes_rendered));
55302+ atomic_read_unchecked(&dev->bytes_rendered));
55303 }
55304
55305 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55306@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55307 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55308 struct dlfb_data *dev = fb_info->par;
55309 return snprintf(buf, PAGE_SIZE, "%u\n",
55310- atomic_read(&dev->bytes_identical));
55311+ atomic_read_unchecked(&dev->bytes_identical));
55312 }
55313
55314 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55315@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55316 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55317 struct dlfb_data *dev = fb_info->par;
55318 return snprintf(buf, PAGE_SIZE, "%u\n",
55319- atomic_read(&dev->bytes_sent));
55320+ atomic_read_unchecked(&dev->bytes_sent));
55321 }
55322
55323 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55324@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55325 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55326 struct dlfb_data *dev = fb_info->par;
55327 return snprintf(buf, PAGE_SIZE, "%u\n",
55328- atomic_read(&dev->cpu_kcycles_used));
55329+ atomic_read_unchecked(&dev->cpu_kcycles_used));
55330 }
55331
55332 static ssize_t edid_show(
55333@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
55334 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55335 struct dlfb_data *dev = fb_info->par;
55336
55337- atomic_set(&dev->bytes_rendered, 0);
55338- atomic_set(&dev->bytes_identical, 0);
55339- atomic_set(&dev->bytes_sent, 0);
55340- atomic_set(&dev->cpu_kcycles_used, 0);
55341+ atomic_set_unchecked(&dev->bytes_rendered, 0);
55342+ atomic_set_unchecked(&dev->bytes_identical, 0);
55343+ atomic_set_unchecked(&dev->bytes_sent, 0);
55344+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
55345
55346 return count;
55347 }
55348diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
55349index 256fba7..6e75516 100644
55350--- a/drivers/video/uvesafb.c
55351+++ b/drivers/video/uvesafb.c
55352@@ -19,6 +19,7 @@
55353 #include <linux/io.h>
55354 #include <linux/mutex.h>
55355 #include <linux/slab.h>
55356+#include <linux/moduleloader.h>
55357 #include <video/edid.h>
55358 #include <video/uvesafb.h>
55359 #ifdef CONFIG_X86
55360@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
55361 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
55362 par->pmi_setpal = par->ypan = 0;
55363 } else {
55364+
55365+#ifdef CONFIG_PAX_KERNEXEC
55366+#ifdef CONFIG_MODULES
55367+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
55368+#endif
55369+ if (!par->pmi_code) {
55370+ par->pmi_setpal = par->ypan = 0;
55371+ return 0;
55372+ }
55373+#endif
55374+
55375 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
55376 + task->t.regs.edi);
55377+
55378+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55379+ pax_open_kernel();
55380+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
55381+ pax_close_kernel();
55382+
55383+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55384+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55385+#else
55386 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55387 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55388+#endif
55389+
55390 printk(KERN_INFO "uvesafb: protected mode interface info at "
55391 "%04x:%04x\n",
55392 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55393@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55394 par->ypan = ypan;
55395
55396 if (par->pmi_setpal || par->ypan) {
55397+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55398 if (__supported_pte_mask & _PAGE_NX) {
55399 par->pmi_setpal = par->ypan = 0;
55400 printk(KERN_WARNING "uvesafb: NX protection is active, "
55401 "better not use the PMI.\n");
55402- } else {
55403+ } else
55404+#endif
55405 uvesafb_vbe_getpmi(task, par);
55406- }
55407 }
55408 #else
55409 /* The protected mode interface is not available on non-x86. */
55410@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55411 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55412
55413 /* Disable blanking if the user requested so. */
55414- if (!blank)
55415- info->fbops->fb_blank = NULL;
55416+ if (!blank) {
55417+ pax_open_kernel();
55418+ *(void **)&info->fbops->fb_blank = NULL;
55419+ pax_close_kernel();
55420+ }
55421
55422 /*
55423 * Find out how much IO memory is required for the mode with
55424@@ -1530,8 +1557,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55425 info->flags = FBINFO_FLAG_DEFAULT |
55426 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55427
55428- if (!par->ypan)
55429- info->fbops->fb_pan_display = NULL;
55430+ if (!par->ypan) {
55431+ pax_open_kernel();
55432+ *(void **)&info->fbops->fb_pan_display = NULL;
55433+ pax_close_kernel();
55434+ }
55435 }
55436
55437 static void uvesafb_init_mtrr(struct fb_info *info)
55438@@ -1792,6 +1822,11 @@ out_mode:
55439 out:
55440 kfree(par->vbe_modes);
55441
55442+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55443+ if (par->pmi_code)
55444+ module_free_exec(NULL, par->pmi_code);
55445+#endif
55446+
55447 framebuffer_release(info);
55448 return err;
55449 }
55450@@ -1816,6 +1851,12 @@ static int uvesafb_remove(struct platform_device *dev)
55451 kfree(par->vbe_modes);
55452 kfree(par->vbe_state_orig);
55453 kfree(par->vbe_state_saved);
55454+
55455+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55456+ if (par->pmi_code)
55457+ module_free_exec(NULL, par->pmi_code);
55458+#endif
55459+
55460 }
55461
55462 framebuffer_release(info);
55463diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
55464index 1c7da3b..56ea0bd 100644
55465--- a/drivers/video/vesafb.c
55466+++ b/drivers/video/vesafb.c
55467@@ -9,6 +9,7 @@
55468 */
55469
55470 #include <linux/module.h>
55471+#include <linux/moduleloader.h>
55472 #include <linux/kernel.h>
55473 #include <linux/errno.h>
55474 #include <linux/string.h>
55475@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55476 static int vram_total; /* Set total amount of memory */
55477 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55478 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55479-static void (*pmi_start)(void) __read_mostly;
55480-static void (*pmi_pal) (void) __read_mostly;
55481+static void (*pmi_start)(void) __read_only;
55482+static void (*pmi_pal) (void) __read_only;
55483 static int depth __read_mostly;
55484 static int vga_compat __read_mostly;
55485 /* --------------------------------------------------------------------- */
55486@@ -234,6 +235,7 @@ static int vesafb_probe(struct platform_device *dev)
55487 unsigned int size_remap;
55488 unsigned int size_total;
55489 char *option = NULL;
55490+ void *pmi_code = NULL;
55491
55492 /* ignore error return of fb_get_options */
55493 fb_get_options("vesafb", &option);
55494@@ -280,10 +282,6 @@ static int vesafb_probe(struct platform_device *dev)
55495 size_remap = size_total;
55496 vesafb_fix.smem_len = size_remap;
55497
55498-#ifndef __i386__
55499- screen_info.vesapm_seg = 0;
55500-#endif
55501-
55502 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55503 printk(KERN_WARNING
55504 "vesafb: cannot reserve video memory at 0x%lx\n",
55505@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55506 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55507 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55508
55509+#ifdef __i386__
55510+
55511+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55512+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55513+ if (!pmi_code)
55514+#elif !defined(CONFIG_PAX_KERNEXEC)
55515+ if (0)
55516+#endif
55517+
55518+#endif
55519+ screen_info.vesapm_seg = 0;
55520+
55521 if (screen_info.vesapm_seg) {
55522- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55523- screen_info.vesapm_seg,screen_info.vesapm_off);
55524+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55525+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55526 }
55527
55528 if (screen_info.vesapm_seg < 0xc000)
55529@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55530
55531 if (ypan || pmi_setpal) {
55532 unsigned short *pmi_base;
55533+
55534 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55535- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55536- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55537+
55538+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55539+ pax_open_kernel();
55540+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55541+#else
55542+ pmi_code = pmi_base;
55543+#endif
55544+
55545+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55546+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55547+
55548+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55549+ pmi_start = ktva_ktla(pmi_start);
55550+ pmi_pal = ktva_ktla(pmi_pal);
55551+ pax_close_kernel();
55552+#endif
55553+
55554 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55555 if (pmi_base[3]) {
55556 printk(KERN_INFO "vesafb: pmi: ports = ");
55557@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55558 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55559 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55560
55561- if (!ypan)
55562- info->fbops->fb_pan_display = NULL;
55563+ if (!ypan) {
55564+ pax_open_kernel();
55565+ *(void **)&info->fbops->fb_pan_display = NULL;
55566+ pax_close_kernel();
55567+ }
55568
55569 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55570 err = -ENOMEM;
55571@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55572 fb_info(info, "%s frame buffer device\n", info->fix.id);
55573 return 0;
55574 err:
55575+
55576+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55577+ module_free_exec(NULL, pmi_code);
55578+#endif
55579+
55580 if (info->screen_base)
55581 iounmap(info->screen_base);
55582 framebuffer_release(info);
55583diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
55584index 88714ae..16c2e11 100644
55585--- a/drivers/video/via/via_clock.h
55586+++ b/drivers/video/via/via_clock.h
55587@@ -56,7 +56,7 @@ struct via_clock {
55588
55589 void (*set_engine_pll_state)(u8 state);
55590 void (*set_engine_pll)(struct via_pll_config config);
55591-};
55592+} __no_const;
55593
55594
55595 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55596diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
55597index fef20db..d28b1ab 100644
55598--- a/drivers/xen/xenfs/xenstored.c
55599+++ b/drivers/xen/xenfs/xenstored.c
55600@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
55601 static int xsd_kva_open(struct inode *inode, struct file *file)
55602 {
55603 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
55604+#ifdef CONFIG_GRKERNSEC_HIDESYM
55605+ NULL);
55606+#else
55607 xen_store_interface);
55608+#endif
55609+
55610 if (!file->private_data)
55611 return -ENOMEM;
55612 return 0;
55613diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
55614index 9ff073f..05cef23 100644
55615--- a/fs/9p/vfs_addr.c
55616+++ b/fs/9p/vfs_addr.c
55617@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
55618
55619 retval = v9fs_file_write_internal(inode,
55620 v9inode->writeback_fid,
55621- (__force const char __user *)buffer,
55622+ (const char __force_user *)buffer,
55623 len, &offset, 0);
55624 if (retval > 0)
55625 retval = 0;
55626diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
55627index 4e65aa9..043dc9a 100644
55628--- a/fs/9p/vfs_inode.c
55629+++ b/fs/9p/vfs_inode.c
55630@@ -1306,7 +1306,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
55631 void
55632 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
55633 {
55634- char *s = nd_get_link(nd);
55635+ const char *s = nd_get_link(nd);
55636
55637 p9_debug(P9_DEBUG_VFS, " %s %s\n",
55638 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
55639diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
55640index 370b24c..ff0be7b 100644
55641--- a/fs/Kconfig.binfmt
55642+++ b/fs/Kconfig.binfmt
55643@@ -103,7 +103,7 @@ config HAVE_AOUT
55644
55645 config BINFMT_AOUT
55646 tristate "Kernel support for a.out and ECOFF binaries"
55647- depends on HAVE_AOUT
55648+ depends on HAVE_AOUT && BROKEN
55649 ---help---
55650 A.out (Assembler.OUTput) is a set of formats for libraries and
55651 executables used in the earliest versions of UNIX. Linux used
55652diff --git a/fs/afs/inode.c b/fs/afs/inode.c
55653index ce25d75..dc09eeb 100644
55654--- a/fs/afs/inode.c
55655+++ b/fs/afs/inode.c
55656@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
55657 struct afs_vnode *vnode;
55658 struct super_block *sb;
55659 struct inode *inode;
55660- static atomic_t afs_autocell_ino;
55661+ static atomic_unchecked_t afs_autocell_ino;
55662
55663 _enter("{%x:%u},%*.*s,",
55664 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
55665@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
55666 data.fid.unique = 0;
55667 data.fid.vnode = 0;
55668
55669- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
55670+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
55671 afs_iget5_autocell_test, afs_iget5_set,
55672 &data);
55673 if (!inode) {
55674diff --git a/fs/aio.c b/fs/aio.c
55675index 062a5f6..e5618e0 100644
55676--- a/fs/aio.c
55677+++ b/fs/aio.c
55678@@ -374,7 +374,7 @@ static int aio_setup_ring(struct kioctx *ctx)
55679 size += sizeof(struct io_event) * nr_events;
55680
55681 nr_pages = PFN_UP(size);
55682- if (nr_pages < 0)
55683+ if (nr_pages <= 0)
55684 return -EINVAL;
55685
55686 file = aio_private_file(ctx, nr_pages);
55687diff --git a/fs/attr.c b/fs/attr.c
55688index 5d4e59d..fd02418 100644
55689--- a/fs/attr.c
55690+++ b/fs/attr.c
55691@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
55692 unsigned long limit;
55693
55694 limit = rlimit(RLIMIT_FSIZE);
55695+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
55696 if (limit != RLIM_INFINITY && offset > limit)
55697 goto out_sig;
55698 if (offset > inode->i_sb->s_maxbytes)
55699diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
55700index 689e40d..515cac5 100644
55701--- a/fs/autofs4/waitq.c
55702+++ b/fs/autofs4/waitq.c
55703@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
55704 {
55705 unsigned long sigpipe, flags;
55706 mm_segment_t fs;
55707- const char *data = (const char *)addr;
55708+ const char __user *data = (const char __force_user *)addr;
55709 ssize_t wr = 0;
55710
55711 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
55712@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
55713 return 1;
55714 }
55715
55716+#ifdef CONFIG_GRKERNSEC_HIDESYM
55717+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
55718+#endif
55719+
55720 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
55721 enum autofs_notify notify)
55722 {
55723@@ -373,7 +377,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
55724
55725 /* If this is a direct mount request create a dummy name */
55726 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
55727+#ifdef CONFIG_GRKERNSEC_HIDESYM
55728+ /* this name does get written to userland via autofs4_write() */
55729+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
55730+#else
55731 qstr.len = sprintf(name, "%p", dentry);
55732+#endif
55733 else {
55734 qstr.len = autofs4_getpath(sbi, dentry, &name);
55735 if (!qstr.len) {
55736diff --git a/fs/befs/endian.h b/fs/befs/endian.h
55737index 2722387..56059b5 100644
55738--- a/fs/befs/endian.h
55739+++ b/fs/befs/endian.h
55740@@ -11,7 +11,7 @@
55741
55742 #include <asm/byteorder.h>
55743
55744-static inline u64
55745+static inline u64 __intentional_overflow(-1)
55746 fs64_to_cpu(const struct super_block *sb, fs64 n)
55747 {
55748 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
55749@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
55750 return (__force fs64)cpu_to_be64(n);
55751 }
55752
55753-static inline u32
55754+static inline u32 __intentional_overflow(-1)
55755 fs32_to_cpu(const struct super_block *sb, fs32 n)
55756 {
55757 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
55758@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
55759 return (__force fs32)cpu_to_be32(n);
55760 }
55761
55762-static inline u16
55763+static inline u16 __intentional_overflow(-1)
55764 fs16_to_cpu(const struct super_block *sb, fs16 n)
55765 {
55766 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
55767diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
55768index ca0ba15..0fa3257 100644
55769--- a/fs/binfmt_aout.c
55770+++ b/fs/binfmt_aout.c
55771@@ -16,6 +16,7 @@
55772 #include <linux/string.h>
55773 #include <linux/fs.h>
55774 #include <linux/file.h>
55775+#include <linux/security.h>
55776 #include <linux/stat.h>
55777 #include <linux/fcntl.h>
55778 #include <linux/ptrace.h>
55779@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
55780 #endif
55781 # define START_STACK(u) ((void __user *)u.start_stack)
55782
55783+ memset(&dump, 0, sizeof(dump));
55784+
55785 fs = get_fs();
55786 set_fs(KERNEL_DS);
55787 has_dumped = 1;
55788@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
55789
55790 /* If the size of the dump file exceeds the rlimit, then see what would happen
55791 if we wrote the stack, but not the data area. */
55792+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
55793 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
55794 dump.u_dsize = 0;
55795
55796 /* Make sure we have enough room to write the stack and data areas. */
55797+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
55798 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
55799 dump.u_ssize = 0;
55800
55801@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
55802 rlim = rlimit(RLIMIT_DATA);
55803 if (rlim >= RLIM_INFINITY)
55804 rlim = ~0;
55805+
55806+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
55807 if (ex.a_data + ex.a_bss > rlim)
55808 return -ENOMEM;
55809
55810@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
55811
55812 install_exec_creds(bprm);
55813
55814+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55815+ current->mm->pax_flags = 0UL;
55816+#endif
55817+
55818+#ifdef CONFIG_PAX_PAGEEXEC
55819+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
55820+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
55821+
55822+#ifdef CONFIG_PAX_EMUTRAMP
55823+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
55824+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
55825+#endif
55826+
55827+#ifdef CONFIG_PAX_MPROTECT
55828+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
55829+ current->mm->pax_flags |= MF_PAX_MPROTECT;
55830+#endif
55831+
55832+ }
55833+#endif
55834+
55835 if (N_MAGIC(ex) == OMAGIC) {
55836 unsigned long text_addr, map_size;
55837 loff_t pos;
55838@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
55839 }
55840
55841 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
55842- PROT_READ | PROT_WRITE | PROT_EXEC,
55843+ PROT_READ | PROT_WRITE,
55844 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
55845 fd_offset + ex.a_text);
55846 if (error != N_DATADDR(ex)) {
55847diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
55848index 571a423..eed5754 100644
55849--- a/fs/binfmt_elf.c
55850+++ b/fs/binfmt_elf.c
55851@@ -34,6 +34,7 @@
55852 #include <linux/utsname.h>
55853 #include <linux/coredump.h>
55854 #include <linux/sched.h>
55855+#include <linux/xattr.h>
55856 #include <asm/uaccess.h>
55857 #include <asm/param.h>
55858 #include <asm/page.h>
55859@@ -48,7 +49,7 @@
55860 static int load_elf_binary(struct linux_binprm *bprm);
55861 static int load_elf_library(struct file *);
55862 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
55863- int, int, unsigned long);
55864+ int, int, unsigned long) __intentional_overflow(-1);
55865
55866 /*
55867 * If we don't support core dumping, then supply a NULL so we
55868@@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
55869 #define elf_core_dump NULL
55870 #endif
55871
55872+#ifdef CONFIG_PAX_MPROTECT
55873+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
55874+#endif
55875+
55876+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55877+static void elf_handle_mmap(struct file *file);
55878+#endif
55879+
55880 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
55881 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
55882 #else
55883@@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
55884 .load_binary = load_elf_binary,
55885 .load_shlib = load_elf_library,
55886 .core_dump = elf_core_dump,
55887+
55888+#ifdef CONFIG_PAX_MPROTECT
55889+ .handle_mprotect= elf_handle_mprotect,
55890+#endif
55891+
55892+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55893+ .handle_mmap = elf_handle_mmap,
55894+#endif
55895+
55896 .min_coredump = ELF_EXEC_PAGESIZE,
55897 };
55898
55899@@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
55900
55901 static int set_brk(unsigned long start, unsigned long end)
55902 {
55903+ unsigned long e = end;
55904+
55905 start = ELF_PAGEALIGN(start);
55906 end = ELF_PAGEALIGN(end);
55907 if (end > start) {
55908@@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
55909 if (BAD_ADDR(addr))
55910 return addr;
55911 }
55912- current->mm->start_brk = current->mm->brk = end;
55913+ current->mm->start_brk = current->mm->brk = e;
55914 return 0;
55915 }
55916
55917@@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
55918 elf_addr_t __user *u_rand_bytes;
55919 const char *k_platform = ELF_PLATFORM;
55920 const char *k_base_platform = ELF_BASE_PLATFORM;
55921- unsigned char k_rand_bytes[16];
55922+ u32 k_rand_bytes[4];
55923 int items;
55924 elf_addr_t *elf_info;
55925 int ei_index = 0;
55926 const struct cred *cred = current_cred();
55927 struct vm_area_struct *vma;
55928+ unsigned long saved_auxv[AT_VECTOR_SIZE];
55929
55930 /*
55931 * In some cases (e.g. Hyper-Threading), we want to avoid L1
55932@@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
55933 * Generate 16 random bytes for userspace PRNG seeding.
55934 */
55935 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
55936- u_rand_bytes = (elf_addr_t __user *)
55937- STACK_ALLOC(p, sizeof(k_rand_bytes));
55938+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
55939+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
55940+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
55941+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
55942+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
55943+ u_rand_bytes = (elf_addr_t __user *) p;
55944 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
55945 return -EFAULT;
55946
55947@@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
55948 return -EFAULT;
55949 current->mm->env_end = p;
55950
55951+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
55952+
55953 /* Put the elf_info on the stack in the right place. */
55954 sp = (elf_addr_t __user *)envp + 1;
55955- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
55956+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
55957 return -EFAULT;
55958 return 0;
55959 }
55960@@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
55961 an ELF header */
55962
55963 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55964- struct file *interpreter, unsigned long *interp_map_addr,
55965- unsigned long no_base)
55966+ struct file *interpreter, unsigned long no_base)
55967 {
55968 struct elf_phdr *elf_phdata;
55969 struct elf_phdr *eppnt;
55970- unsigned long load_addr = 0;
55971+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
55972 int load_addr_set = 0;
55973 unsigned long last_bss = 0, elf_bss = 0;
55974- unsigned long error = ~0UL;
55975+ unsigned long error = -EINVAL;
55976 unsigned long total_size;
55977 int retval, i, size;
55978
55979@@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55980 goto out_close;
55981 }
55982
55983+#ifdef CONFIG_PAX_SEGMEXEC
55984+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
55985+ pax_task_size = SEGMEXEC_TASK_SIZE;
55986+#endif
55987+
55988 eppnt = elf_phdata;
55989 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
55990 if (eppnt->p_type == PT_LOAD) {
55991@@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
55992 map_addr = elf_map(interpreter, load_addr + vaddr,
55993 eppnt, elf_prot, elf_type, total_size);
55994 total_size = 0;
55995- if (!*interp_map_addr)
55996- *interp_map_addr = map_addr;
55997 error = map_addr;
55998 if (BAD_ADDR(map_addr))
55999 goto out_close;
56000@@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
56001 k = load_addr + eppnt->p_vaddr;
56002 if (BAD_ADDR(k) ||
56003 eppnt->p_filesz > eppnt->p_memsz ||
56004- eppnt->p_memsz > TASK_SIZE ||
56005- TASK_SIZE - eppnt->p_memsz < k) {
56006+ eppnt->p_memsz > pax_task_size ||
56007+ pax_task_size - eppnt->p_memsz < k) {
56008 error = -ENOMEM;
56009 goto out_close;
56010 }
56011@@ -525,9 +554,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
56012 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
56013
56014 /* Map the last of the bss segment */
56015- error = vm_brk(elf_bss, last_bss - elf_bss);
56016- if (BAD_ADDR(error))
56017- goto out_close;
56018+ if (last_bss > elf_bss) {
56019+ error = vm_brk(elf_bss, last_bss - elf_bss);
56020+ if (BAD_ADDR(error))
56021+ goto out_close;
56022+ }
56023 }
56024
56025 error = load_addr;
56026@@ -538,6 +569,336 @@ out:
56027 return error;
56028 }
56029
56030+#ifdef CONFIG_PAX_PT_PAX_FLAGS
56031+#ifdef CONFIG_PAX_SOFTMODE
56032+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
56033+{
56034+ unsigned long pax_flags = 0UL;
56035+
56036+#ifdef CONFIG_PAX_PAGEEXEC
56037+ if (elf_phdata->p_flags & PF_PAGEEXEC)
56038+ pax_flags |= MF_PAX_PAGEEXEC;
56039+#endif
56040+
56041+#ifdef CONFIG_PAX_SEGMEXEC
56042+ if (elf_phdata->p_flags & PF_SEGMEXEC)
56043+ pax_flags |= MF_PAX_SEGMEXEC;
56044+#endif
56045+
56046+#ifdef CONFIG_PAX_EMUTRAMP
56047+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
56048+ pax_flags |= MF_PAX_EMUTRAMP;
56049+#endif
56050+
56051+#ifdef CONFIG_PAX_MPROTECT
56052+ if (elf_phdata->p_flags & PF_MPROTECT)
56053+ pax_flags |= MF_PAX_MPROTECT;
56054+#endif
56055+
56056+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
56057+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
56058+ pax_flags |= MF_PAX_RANDMMAP;
56059+#endif
56060+
56061+ return pax_flags;
56062+}
56063+#endif
56064+
56065+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
56066+{
56067+ unsigned long pax_flags = 0UL;
56068+
56069+#ifdef CONFIG_PAX_PAGEEXEC
56070+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
56071+ pax_flags |= MF_PAX_PAGEEXEC;
56072+#endif
56073+
56074+#ifdef CONFIG_PAX_SEGMEXEC
56075+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
56076+ pax_flags |= MF_PAX_SEGMEXEC;
56077+#endif
56078+
56079+#ifdef CONFIG_PAX_EMUTRAMP
56080+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
56081+ pax_flags |= MF_PAX_EMUTRAMP;
56082+#endif
56083+
56084+#ifdef CONFIG_PAX_MPROTECT
56085+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
56086+ pax_flags |= MF_PAX_MPROTECT;
56087+#endif
56088+
56089+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
56090+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
56091+ pax_flags |= MF_PAX_RANDMMAP;
56092+#endif
56093+
56094+ return pax_flags;
56095+}
56096+#endif
56097+
56098+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
56099+#ifdef CONFIG_PAX_SOFTMODE
56100+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
56101+{
56102+ unsigned long pax_flags = 0UL;
56103+
56104+#ifdef CONFIG_PAX_PAGEEXEC
56105+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
56106+ pax_flags |= MF_PAX_PAGEEXEC;
56107+#endif
56108+
56109+#ifdef CONFIG_PAX_SEGMEXEC
56110+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
56111+ pax_flags |= MF_PAX_SEGMEXEC;
56112+#endif
56113+
56114+#ifdef CONFIG_PAX_EMUTRAMP
56115+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
56116+ pax_flags |= MF_PAX_EMUTRAMP;
56117+#endif
56118+
56119+#ifdef CONFIG_PAX_MPROTECT
56120+ if (pax_flags_softmode & MF_PAX_MPROTECT)
56121+ pax_flags |= MF_PAX_MPROTECT;
56122+#endif
56123+
56124+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
56125+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
56126+ pax_flags |= MF_PAX_RANDMMAP;
56127+#endif
56128+
56129+ return pax_flags;
56130+}
56131+#endif
56132+
56133+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
56134+{
56135+ unsigned long pax_flags = 0UL;
56136+
56137+#ifdef CONFIG_PAX_PAGEEXEC
56138+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
56139+ pax_flags |= MF_PAX_PAGEEXEC;
56140+#endif
56141+
56142+#ifdef CONFIG_PAX_SEGMEXEC
56143+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
56144+ pax_flags |= MF_PAX_SEGMEXEC;
56145+#endif
56146+
56147+#ifdef CONFIG_PAX_EMUTRAMP
56148+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
56149+ pax_flags |= MF_PAX_EMUTRAMP;
56150+#endif
56151+
56152+#ifdef CONFIG_PAX_MPROTECT
56153+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
56154+ pax_flags |= MF_PAX_MPROTECT;
56155+#endif
56156+
56157+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
56158+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
56159+ pax_flags |= MF_PAX_RANDMMAP;
56160+#endif
56161+
56162+ return pax_flags;
56163+}
56164+#endif
56165+
56166+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56167+static unsigned long pax_parse_defaults(void)
56168+{
56169+ unsigned long pax_flags = 0UL;
56170+
56171+#ifdef CONFIG_PAX_SOFTMODE
56172+ if (pax_softmode)
56173+ return pax_flags;
56174+#endif
56175+
56176+#ifdef CONFIG_PAX_PAGEEXEC
56177+ pax_flags |= MF_PAX_PAGEEXEC;
56178+#endif
56179+
56180+#ifdef CONFIG_PAX_SEGMEXEC
56181+ pax_flags |= MF_PAX_SEGMEXEC;
56182+#endif
56183+
56184+#ifdef CONFIG_PAX_MPROTECT
56185+ pax_flags |= MF_PAX_MPROTECT;
56186+#endif
56187+
56188+#ifdef CONFIG_PAX_RANDMMAP
56189+ if (randomize_va_space)
56190+ pax_flags |= MF_PAX_RANDMMAP;
56191+#endif
56192+
56193+ return pax_flags;
56194+}
56195+
56196+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
56197+{
56198+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
56199+
56200+#ifdef CONFIG_PAX_EI_PAX
56201+
56202+#ifdef CONFIG_PAX_SOFTMODE
56203+ if (pax_softmode)
56204+ return pax_flags;
56205+#endif
56206+
56207+ pax_flags = 0UL;
56208+
56209+#ifdef CONFIG_PAX_PAGEEXEC
56210+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
56211+ pax_flags |= MF_PAX_PAGEEXEC;
56212+#endif
56213+
56214+#ifdef CONFIG_PAX_SEGMEXEC
56215+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
56216+ pax_flags |= MF_PAX_SEGMEXEC;
56217+#endif
56218+
56219+#ifdef CONFIG_PAX_EMUTRAMP
56220+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
56221+ pax_flags |= MF_PAX_EMUTRAMP;
56222+#endif
56223+
56224+#ifdef CONFIG_PAX_MPROTECT
56225+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
56226+ pax_flags |= MF_PAX_MPROTECT;
56227+#endif
56228+
56229+#ifdef CONFIG_PAX_ASLR
56230+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
56231+ pax_flags |= MF_PAX_RANDMMAP;
56232+#endif
56233+
56234+#endif
56235+
56236+ return pax_flags;
56237+
56238+}
56239+
56240+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
56241+{
56242+
56243+#ifdef CONFIG_PAX_PT_PAX_FLAGS
56244+ unsigned long i;
56245+
56246+ for (i = 0UL; i < elf_ex->e_phnum; i++)
56247+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
56248+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
56249+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
56250+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
56251+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
56252+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
56253+ return PAX_PARSE_FLAGS_FALLBACK;
56254+
56255+#ifdef CONFIG_PAX_SOFTMODE
56256+ if (pax_softmode)
56257+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
56258+ else
56259+#endif
56260+
56261+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
56262+ break;
56263+ }
56264+#endif
56265+
56266+ return PAX_PARSE_FLAGS_FALLBACK;
56267+}
56268+
56269+static unsigned long pax_parse_xattr_pax(struct file * const file)
56270+{
56271+
56272+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
56273+ ssize_t xattr_size, i;
56274+ unsigned char xattr_value[sizeof("pemrs") - 1];
56275+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
56276+
56277+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
56278+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
56279+ return PAX_PARSE_FLAGS_FALLBACK;
56280+
56281+ for (i = 0; i < xattr_size; i++)
56282+ switch (xattr_value[i]) {
56283+ default:
56284+ return PAX_PARSE_FLAGS_FALLBACK;
56285+
56286+#define parse_flag(option1, option2, flag) \
56287+ case option1: \
56288+ if (pax_flags_hardmode & MF_PAX_##flag) \
56289+ return PAX_PARSE_FLAGS_FALLBACK;\
56290+ pax_flags_hardmode |= MF_PAX_##flag; \
56291+ break; \
56292+ case option2: \
56293+ if (pax_flags_softmode & MF_PAX_##flag) \
56294+ return PAX_PARSE_FLAGS_FALLBACK;\
56295+ pax_flags_softmode |= MF_PAX_##flag; \
56296+ break;
56297+
56298+ parse_flag('p', 'P', PAGEEXEC);
56299+ parse_flag('e', 'E', EMUTRAMP);
56300+ parse_flag('m', 'M', MPROTECT);
56301+ parse_flag('r', 'R', RANDMMAP);
56302+ parse_flag('s', 'S', SEGMEXEC);
56303+
56304+#undef parse_flag
56305+ }
56306+
56307+ if (pax_flags_hardmode & pax_flags_softmode)
56308+ return PAX_PARSE_FLAGS_FALLBACK;
56309+
56310+#ifdef CONFIG_PAX_SOFTMODE
56311+ if (pax_softmode)
56312+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
56313+ else
56314+#endif
56315+
56316+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
56317+#else
56318+ return PAX_PARSE_FLAGS_FALLBACK;
56319+#endif
56320+
56321+}
56322+
56323+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
56324+{
56325+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
56326+
56327+ pax_flags = pax_parse_defaults();
56328+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
56329+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
56330+ xattr_pax_flags = pax_parse_xattr_pax(file);
56331+
56332+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
56333+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
56334+ pt_pax_flags != xattr_pax_flags)
56335+ return -EINVAL;
56336+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
56337+ pax_flags = xattr_pax_flags;
56338+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
56339+ pax_flags = pt_pax_flags;
56340+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
56341+ pax_flags = ei_pax_flags;
56342+
56343+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
56344+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
56345+ if ((__supported_pte_mask & _PAGE_NX))
56346+ pax_flags &= ~MF_PAX_SEGMEXEC;
56347+ else
56348+ pax_flags &= ~MF_PAX_PAGEEXEC;
56349+ }
56350+#endif
56351+
56352+ if (0 > pax_check_flags(&pax_flags))
56353+ return -EINVAL;
56354+
56355+ current->mm->pax_flags = pax_flags;
56356+ return 0;
56357+}
56358+#endif
56359+
56360 /*
56361 * These are the functions used to load ELF style executables and shared
56362 * libraries. There is no binary dependent code anywhere else.
56363@@ -554,6 +915,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
56364 {
56365 unsigned int random_variable = 0;
56366
56367+#ifdef CONFIG_PAX_RANDUSTACK
56368+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
56369+ return stack_top - current->mm->delta_stack;
56370+#endif
56371+
56372 if ((current->flags & PF_RANDOMIZE) &&
56373 !(current->personality & ADDR_NO_RANDOMIZE)) {
56374 random_variable = get_random_int() & STACK_RND_MASK;
56375@@ -572,7 +938,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
56376 unsigned long load_addr = 0, load_bias = 0;
56377 int load_addr_set = 0;
56378 char * elf_interpreter = NULL;
56379- unsigned long error;
56380+ unsigned long error = 0;
56381 struct elf_phdr *elf_ppnt, *elf_phdata;
56382 unsigned long elf_bss, elf_brk;
56383 int retval, i;
56384@@ -582,12 +948,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
56385 unsigned long start_code, end_code, start_data, end_data;
56386 unsigned long reloc_func_desc __maybe_unused = 0;
56387 int executable_stack = EXSTACK_DEFAULT;
56388- unsigned long def_flags = 0;
56389 struct pt_regs *regs = current_pt_regs();
56390 struct {
56391 struct elfhdr elf_ex;
56392 struct elfhdr interp_elf_ex;
56393 } *loc;
56394+ unsigned long pax_task_size;
56395
56396 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
56397 if (!loc) {
56398@@ -723,11 +1089,82 @@ static int load_elf_binary(struct linux_binprm *bprm)
56399 goto out_free_dentry;
56400
56401 /* OK, This is the point of no return */
56402- current->mm->def_flags = def_flags;
56403+ current->mm->def_flags = 0;
56404
56405 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
56406 may depend on the personality. */
56407 SET_PERSONALITY(loc->elf_ex);
56408+
56409+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56410+ current->mm->pax_flags = 0UL;
56411+#endif
56412+
56413+#ifdef CONFIG_PAX_DLRESOLVE
56414+ current->mm->call_dl_resolve = 0UL;
56415+#endif
56416+
56417+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56418+ current->mm->call_syscall = 0UL;
56419+#endif
56420+
56421+#ifdef CONFIG_PAX_ASLR
56422+ current->mm->delta_mmap = 0UL;
56423+ current->mm->delta_stack = 0UL;
56424+#endif
56425+
56426+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56427+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
56428+ send_sig(SIGKILL, current, 0);
56429+ goto out_free_dentry;
56430+ }
56431+#endif
56432+
56433+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56434+ pax_set_initial_flags(bprm);
56435+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
56436+ if (pax_set_initial_flags_func)
56437+ (pax_set_initial_flags_func)(bprm);
56438+#endif
56439+
56440+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56441+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
56442+ current->mm->context.user_cs_limit = PAGE_SIZE;
56443+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
56444+ }
56445+#endif
56446+
56447+#ifdef CONFIG_PAX_SEGMEXEC
56448+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
56449+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
56450+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
56451+ pax_task_size = SEGMEXEC_TASK_SIZE;
56452+ current->mm->def_flags |= VM_NOHUGEPAGE;
56453+ } else
56454+#endif
56455+
56456+ pax_task_size = TASK_SIZE;
56457+
56458+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
56459+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
56460+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
56461+ put_cpu();
56462+ }
56463+#endif
56464+
56465+#ifdef CONFIG_PAX_ASLR
56466+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
56467+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
56468+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
56469+ }
56470+#endif
56471+
56472+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
56473+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
56474+ executable_stack = EXSTACK_DISABLE_X;
56475+ current->personality &= ~READ_IMPLIES_EXEC;
56476+ } else
56477+#endif
56478+
56479 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
56480 current->personality |= READ_IMPLIES_EXEC;
56481
56482@@ -817,6 +1254,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
56483 #else
56484 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
56485 #endif
56486+
56487+#ifdef CONFIG_PAX_RANDMMAP
56488+ /* PaX: randomize base address at the default exe base if requested */
56489+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
56490+#ifdef CONFIG_SPARC64
56491+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
56492+#else
56493+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
56494+#endif
56495+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
56496+ elf_flags |= MAP_FIXED;
56497+ }
56498+#endif
56499+
56500 }
56501
56502 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
56503@@ -849,9 +1300,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
56504 * allowed task size. Note that p_filesz must always be
56505 * <= p_memsz so it is only necessary to check p_memsz.
56506 */
56507- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
56508- elf_ppnt->p_memsz > TASK_SIZE ||
56509- TASK_SIZE - elf_ppnt->p_memsz < k) {
56510+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
56511+ elf_ppnt->p_memsz > pax_task_size ||
56512+ pax_task_size - elf_ppnt->p_memsz < k) {
56513 /* set_brk can never work. Avoid overflows. */
56514 send_sig(SIGKILL, current, 0);
56515 retval = -EINVAL;
56516@@ -890,17 +1341,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
56517 goto out_free_dentry;
56518 }
56519 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
56520- send_sig(SIGSEGV, current, 0);
56521- retval = -EFAULT; /* Nobody gets to see this, but.. */
56522- goto out_free_dentry;
56523+ /*
56524+ * This bss-zeroing can fail if the ELF
56525+ * file specifies odd protections. So
56526+ * we don't check the return value
56527+ */
56528 }
56529
56530+#ifdef CONFIG_PAX_RANDMMAP
56531+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
56532+ unsigned long start, size, flags;
56533+ vm_flags_t vm_flags;
56534+
56535+ start = ELF_PAGEALIGN(elf_brk);
56536+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
56537+ flags = MAP_FIXED | MAP_PRIVATE;
56538+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
56539+
56540+ down_write(&current->mm->mmap_sem);
56541+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
56542+ retval = -ENOMEM;
56543+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
56544+// if (current->personality & ADDR_NO_RANDOMIZE)
56545+// vm_flags |= VM_READ | VM_MAYREAD;
56546+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
56547+ retval = IS_ERR_VALUE(start) ? start : 0;
56548+ }
56549+ up_write(&current->mm->mmap_sem);
56550+ if (retval == 0)
56551+ retval = set_brk(start + size, start + size + PAGE_SIZE);
56552+ if (retval < 0) {
56553+ send_sig(SIGKILL, current, 0);
56554+ goto out_free_dentry;
56555+ }
56556+ }
56557+#endif
56558+
56559 if (elf_interpreter) {
56560- unsigned long interp_map_addr = 0;
56561-
56562 elf_entry = load_elf_interp(&loc->interp_elf_ex,
56563 interpreter,
56564- &interp_map_addr,
56565 load_bias);
56566 if (!IS_ERR((void *)elf_entry)) {
56567 /*
56568@@ -1122,7 +1601,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
56569 * Decide what to dump of a segment, part, all or none.
56570 */
56571 static unsigned long vma_dump_size(struct vm_area_struct *vma,
56572- unsigned long mm_flags)
56573+ unsigned long mm_flags, long signr)
56574 {
56575 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
56576
56577@@ -1160,7 +1639,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
56578 if (vma->vm_file == NULL)
56579 return 0;
56580
56581- if (FILTER(MAPPED_PRIVATE))
56582+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
56583 goto whole;
56584
56585 /*
56586@@ -1367,9 +1846,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
56587 {
56588 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
56589 int i = 0;
56590- do
56591+ do {
56592 i += 2;
56593- while (auxv[i - 2] != AT_NULL);
56594+ } while (auxv[i - 2] != AT_NULL);
56595 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
56596 }
56597
56598@@ -1378,7 +1857,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
56599 {
56600 mm_segment_t old_fs = get_fs();
56601 set_fs(KERNEL_DS);
56602- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
56603+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
56604 set_fs(old_fs);
56605 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
56606 }
56607@@ -2002,14 +2481,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
56608 }
56609
56610 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
56611- unsigned long mm_flags)
56612+ struct coredump_params *cprm)
56613 {
56614 struct vm_area_struct *vma;
56615 size_t size = 0;
56616
56617 for (vma = first_vma(current, gate_vma); vma != NULL;
56618 vma = next_vma(vma, gate_vma))
56619- size += vma_dump_size(vma, mm_flags);
56620+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
56621 return size;
56622 }
56623
56624@@ -2100,7 +2579,7 @@ static int elf_core_dump(struct coredump_params *cprm)
56625
56626 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
56627
56628- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
56629+ offset += elf_core_vma_data_size(gate_vma, cprm);
56630 offset += elf_core_extra_data_size();
56631 e_shoff = offset;
56632
56633@@ -2128,7 +2607,7 @@ static int elf_core_dump(struct coredump_params *cprm)
56634 phdr.p_offset = offset;
56635 phdr.p_vaddr = vma->vm_start;
56636 phdr.p_paddr = 0;
56637- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
56638+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
56639 phdr.p_memsz = vma->vm_end - vma->vm_start;
56640 offset += phdr.p_filesz;
56641 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
56642@@ -2161,7 +2640,7 @@ static int elf_core_dump(struct coredump_params *cprm)
56643 unsigned long addr;
56644 unsigned long end;
56645
56646- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
56647+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
56648
56649 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
56650 struct page *page;
56651@@ -2202,6 +2681,167 @@ out:
56652
56653 #endif /* CONFIG_ELF_CORE */
56654
56655+#ifdef CONFIG_PAX_MPROTECT
56656+/* PaX: non-PIC ELF libraries need relocations on their executable segments
56657+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
56658+ * we'll remove VM_MAYWRITE for good on RELRO segments.
56659+ *
56660+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
56661+ * basis because we want to allow the common case and not the special ones.
56662+ */
56663+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
56664+{
56665+ struct elfhdr elf_h;
56666+ struct elf_phdr elf_p;
56667+ unsigned long i;
56668+ unsigned long oldflags;
56669+ bool is_textrel_rw, is_textrel_rx, is_relro;
56670+
56671+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
56672+ return;
56673+
56674+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
56675+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
56676+
56677+#ifdef CONFIG_PAX_ELFRELOCS
56678+ /* possible TEXTREL */
56679+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
56680+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
56681+#else
56682+ is_textrel_rw = false;
56683+ is_textrel_rx = false;
56684+#endif
56685+
56686+ /* possible RELRO */
56687+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
56688+
56689+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
56690+ return;
56691+
56692+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
56693+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
56694+
56695+#ifdef CONFIG_PAX_ETEXECRELOCS
56696+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
56697+#else
56698+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
56699+#endif
56700+
56701+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
56702+ !elf_check_arch(&elf_h) ||
56703+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
56704+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
56705+ return;
56706+
56707+ for (i = 0UL; i < elf_h.e_phnum; i++) {
56708+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
56709+ return;
56710+ switch (elf_p.p_type) {
56711+ case PT_DYNAMIC:
56712+ if (!is_textrel_rw && !is_textrel_rx)
56713+ continue;
56714+ i = 0UL;
56715+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
56716+ elf_dyn dyn;
56717+
56718+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
56719+ break;
56720+ if (dyn.d_tag == DT_NULL)
56721+ break;
56722+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
56723+ gr_log_textrel(vma);
56724+ if (is_textrel_rw)
56725+ vma->vm_flags |= VM_MAYWRITE;
56726+ else
56727+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
56728+ vma->vm_flags &= ~VM_MAYWRITE;
56729+ break;
56730+ }
56731+ i++;
56732+ }
56733+ is_textrel_rw = false;
56734+ is_textrel_rx = false;
56735+ continue;
56736+
56737+ case PT_GNU_RELRO:
56738+ if (!is_relro)
56739+ continue;
56740+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
56741+ vma->vm_flags &= ~VM_MAYWRITE;
56742+ is_relro = false;
56743+ continue;
56744+
56745+#ifdef CONFIG_PAX_PT_PAX_FLAGS
56746+ case PT_PAX_FLAGS: {
56747+ const char *msg_mprotect = "", *msg_emutramp = "";
56748+ char *buffer_lib, *buffer_exe;
56749+
56750+ if (elf_p.p_flags & PF_NOMPROTECT)
56751+ msg_mprotect = "MPROTECT disabled";
56752+
56753+#ifdef CONFIG_PAX_EMUTRAMP
56754+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
56755+ msg_emutramp = "EMUTRAMP enabled";
56756+#endif
56757+
56758+ if (!msg_mprotect[0] && !msg_emutramp[0])
56759+ continue;
56760+
56761+ if (!printk_ratelimit())
56762+ continue;
56763+
56764+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
56765+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
56766+ if (buffer_lib && buffer_exe) {
56767+ char *path_lib, *path_exe;
56768+
56769+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
56770+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
56771+
56772+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
56773+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
56774+
56775+ }
56776+ free_page((unsigned long)buffer_exe);
56777+ free_page((unsigned long)buffer_lib);
56778+ continue;
56779+ }
56780+#endif
56781+
56782+ }
56783+ }
56784+}
56785+#endif
56786+
56787+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
56788+
56789+extern int grsec_enable_log_rwxmaps;
56790+
56791+static void elf_handle_mmap(struct file *file)
56792+{
56793+ struct elfhdr elf_h;
56794+ struct elf_phdr elf_p;
56795+ unsigned long i;
56796+
56797+ if (!grsec_enable_log_rwxmaps)
56798+ return;
56799+
56800+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
56801+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
56802+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
56803+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
56804+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
56805+ return;
56806+
56807+ for (i = 0UL; i < elf_h.e_phnum; i++) {
56808+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
56809+ return;
56810+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
56811+ gr_log_ptgnustack(file);
56812+ }
56813+}
56814+#endif
56815+
56816 static int __init init_elf_binfmt(void)
56817 {
56818 register_binfmt(&elf_format);
56819diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
56820index d50bbe5..af3b649 100644
56821--- a/fs/binfmt_flat.c
56822+++ b/fs/binfmt_flat.c
56823@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
56824 realdatastart = (unsigned long) -ENOMEM;
56825 printk("Unable to allocate RAM for process data, errno %d\n",
56826 (int)-realdatastart);
56827+ down_write(&current->mm->mmap_sem);
56828 vm_munmap(textpos, text_len);
56829+ up_write(&current->mm->mmap_sem);
56830 ret = realdatastart;
56831 goto err;
56832 }
56833@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
56834 }
56835 if (IS_ERR_VALUE(result)) {
56836 printk("Unable to read data+bss, errno %d\n", (int)-result);
56837+ down_write(&current->mm->mmap_sem);
56838 vm_munmap(textpos, text_len);
56839 vm_munmap(realdatastart, len);
56840+ up_write(&current->mm->mmap_sem);
56841 ret = result;
56842 goto err;
56843 }
56844@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
56845 }
56846 if (IS_ERR_VALUE(result)) {
56847 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
56848+ down_write(&current->mm->mmap_sem);
56849 vm_munmap(textpos, text_len + data_len + extra +
56850 MAX_SHARED_LIBS * sizeof(unsigned long));
56851+ up_write(&current->mm->mmap_sem);
56852 ret = result;
56853 goto err;
56854 }
56855diff --git a/fs/bio.c b/fs/bio.c
56856index 33d79a4..c3c9893 100644
56857--- a/fs/bio.c
56858+++ b/fs/bio.c
56859@@ -1106,7 +1106,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
56860 /*
56861 * Overflow, abort
56862 */
56863- if (end < start)
56864+ if (end < start || end - start > INT_MAX - nr_pages)
56865 return ERR_PTR(-EINVAL);
56866
56867 nr_pages += end - start;
56868@@ -1240,7 +1240,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
56869 /*
56870 * Overflow, abort
56871 */
56872- if (end < start)
56873+ if (end < start || end - start > INT_MAX - nr_pages)
56874 return ERR_PTR(-EINVAL);
56875
56876 nr_pages += end - start;
56877@@ -1502,7 +1502,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
56878 const int read = bio_data_dir(bio) == READ;
56879 struct bio_map_data *bmd = bio->bi_private;
56880 int i;
56881- char *p = bmd->sgvecs[0].iov_base;
56882+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
56883
56884 bio_for_each_segment_all(bvec, bio, i) {
56885 char *addr = page_address(bvec->bv_page);
56886diff --git a/fs/block_dev.c b/fs/block_dev.c
56887index 1e86823..8e34695 100644
56888--- a/fs/block_dev.c
56889+++ b/fs/block_dev.c
56890@@ -637,7 +637,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
56891 else if (bdev->bd_contains == bdev)
56892 return true; /* is a whole device which isn't held */
56893
56894- else if (whole->bd_holder == bd_may_claim)
56895+ else if (whole->bd_holder == (void *)bd_may_claim)
56896 return true; /* is a partition of a device that is being partitioned */
56897 else if (whole->bd_holder != NULL)
56898 return false; /* is a partition of a held device */
56899diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
56900index 3de01b4..6547c39 100644
56901--- a/fs/btrfs/ctree.c
56902+++ b/fs/btrfs/ctree.c
56903@@ -1217,9 +1217,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
56904 free_extent_buffer(buf);
56905 add_root_to_dirty_list(root);
56906 } else {
56907- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
56908- parent_start = parent->start;
56909- else
56910+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
56911+ if (parent)
56912+ parent_start = parent->start;
56913+ else
56914+ parent_start = 0;
56915+ } else
56916 parent_start = 0;
56917
56918 WARN_ON(trans->transid != btrfs_header_generation(parent));
56919diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
56920index 8d292fb..bc205c2 100644
56921--- a/fs/btrfs/delayed-inode.c
56922+++ b/fs/btrfs/delayed-inode.c
56923@@ -459,7 +459,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
56924
56925 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
56926 {
56927- int seq = atomic_inc_return(&delayed_root->items_seq);
56928+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
56929 if ((atomic_dec_return(&delayed_root->items) <
56930 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
56931 waitqueue_active(&delayed_root->wait))
56932@@ -1379,7 +1379,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
56933 static int refs_newer(struct btrfs_delayed_root *delayed_root,
56934 int seq, int count)
56935 {
56936- int val = atomic_read(&delayed_root->items_seq);
56937+ int val = atomic_read_unchecked(&delayed_root->items_seq);
56938
56939 if (val < seq || val >= seq + count)
56940 return 1;
56941@@ -1396,7 +1396,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
56942 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
56943 return;
56944
56945- seq = atomic_read(&delayed_root->items_seq);
56946+ seq = atomic_read_unchecked(&delayed_root->items_seq);
56947
56948 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
56949 int ret;
56950diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
56951index a4b38f9..f86a509 100644
56952--- a/fs/btrfs/delayed-inode.h
56953+++ b/fs/btrfs/delayed-inode.h
56954@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
56955 */
56956 struct list_head prepare_list;
56957 atomic_t items; /* for delayed items */
56958- atomic_t items_seq; /* for delayed items */
56959+ atomic_unchecked_t items_seq; /* for delayed items */
56960 int nodes; /* for delayed nodes */
56961 wait_queue_head_t wait;
56962 };
56963@@ -87,7 +87,7 @@ static inline void btrfs_init_delayed_root(
56964 struct btrfs_delayed_root *delayed_root)
56965 {
56966 atomic_set(&delayed_root->items, 0);
56967- atomic_set(&delayed_root->items_seq, 0);
56968+ atomic_set_unchecked(&delayed_root->items_seq, 0);
56969 delayed_root->nodes = 0;
56970 spin_lock_init(&delayed_root->lock);
56971 init_waitqueue_head(&delayed_root->wait);
56972diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
56973index 9f831bb..14afde5 100644
56974--- a/fs/btrfs/ioctl.c
56975+++ b/fs/btrfs/ioctl.c
56976@@ -3457,9 +3457,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
56977 for (i = 0; i < num_types; i++) {
56978 struct btrfs_space_info *tmp;
56979
56980+ /* Don't copy in more than we allocated */
56981 if (!slot_count)
56982 break;
56983
56984+ slot_count--;
56985+
56986 info = NULL;
56987 rcu_read_lock();
56988 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
56989@@ -3481,10 +3484,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
56990 memcpy(dest, &space, sizeof(space));
56991 dest++;
56992 space_args.total_spaces++;
56993- slot_count--;
56994 }
56995- if (!slot_count)
56996- break;
56997 }
56998 up_read(&info->groups_sem);
56999 }
57000diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
57001index d71a11d..384e2c4 100644
57002--- a/fs/btrfs/super.c
57003+++ b/fs/btrfs/super.c
57004@@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
57005 function, line, errstr);
57006 return;
57007 }
57008- ACCESS_ONCE(trans->transaction->aborted) = errno;
57009+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
57010 /* Wake up anybody who may be waiting on this transaction */
57011 wake_up(&root->fs_info->transaction_wait);
57012 wake_up(&root->fs_info->transaction_blocked_wait);
57013diff --git a/fs/buffer.c b/fs/buffer.c
57014index aeeea65..7651d590 100644
57015--- a/fs/buffer.c
57016+++ b/fs/buffer.c
57017@@ -3428,7 +3428,7 @@ void __init buffer_init(void)
57018 bh_cachep = kmem_cache_create("buffer_head",
57019 sizeof(struct buffer_head), 0,
57020 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
57021- SLAB_MEM_SPREAD),
57022+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
57023 NULL);
57024
57025 /*
57026diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
57027index 622f469..e8d2d55 100644
57028--- a/fs/cachefiles/bind.c
57029+++ b/fs/cachefiles/bind.c
57030@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
57031 args);
57032
57033 /* start by checking things over */
57034- ASSERT(cache->fstop_percent >= 0 &&
57035- cache->fstop_percent < cache->fcull_percent &&
57036+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
57037 cache->fcull_percent < cache->frun_percent &&
57038 cache->frun_percent < 100);
57039
57040- ASSERT(cache->bstop_percent >= 0 &&
57041- cache->bstop_percent < cache->bcull_percent &&
57042+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
57043 cache->bcull_percent < cache->brun_percent &&
57044 cache->brun_percent < 100);
57045
57046diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
57047index 0a1467b..6a53245 100644
57048--- a/fs/cachefiles/daemon.c
57049+++ b/fs/cachefiles/daemon.c
57050@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
57051 if (n > buflen)
57052 return -EMSGSIZE;
57053
57054- if (copy_to_user(_buffer, buffer, n) != 0)
57055+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
57056 return -EFAULT;
57057
57058 return n;
57059@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
57060 if (test_bit(CACHEFILES_DEAD, &cache->flags))
57061 return -EIO;
57062
57063- if (datalen < 0 || datalen > PAGE_SIZE - 1)
57064+ if (datalen > PAGE_SIZE - 1)
57065 return -EOPNOTSUPP;
57066
57067 /* drag the command string into the kernel so we can parse it */
57068@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
57069 if (args[0] != '%' || args[1] != '\0')
57070 return -EINVAL;
57071
57072- if (fstop < 0 || fstop >= cache->fcull_percent)
57073+ if (fstop >= cache->fcull_percent)
57074 return cachefiles_daemon_range_error(cache, args);
57075
57076 cache->fstop_percent = fstop;
57077@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
57078 if (args[0] != '%' || args[1] != '\0')
57079 return -EINVAL;
57080
57081- if (bstop < 0 || bstop >= cache->bcull_percent)
57082+ if (bstop >= cache->bcull_percent)
57083 return cachefiles_daemon_range_error(cache, args);
57084
57085 cache->bstop_percent = bstop;
57086diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
57087index 5349473..d6c0b93 100644
57088--- a/fs/cachefiles/internal.h
57089+++ b/fs/cachefiles/internal.h
57090@@ -59,7 +59,7 @@ struct cachefiles_cache {
57091 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
57092 struct rb_root active_nodes; /* active nodes (can't be culled) */
57093 rwlock_t active_lock; /* lock for active_nodes */
57094- atomic_t gravecounter; /* graveyard uniquifier */
57095+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
57096 unsigned frun_percent; /* when to stop culling (% files) */
57097 unsigned fcull_percent; /* when to start culling (% files) */
57098 unsigned fstop_percent; /* when to stop allocating (% files) */
57099@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
57100 * proc.c
57101 */
57102 #ifdef CONFIG_CACHEFILES_HISTOGRAM
57103-extern atomic_t cachefiles_lookup_histogram[HZ];
57104-extern atomic_t cachefiles_mkdir_histogram[HZ];
57105-extern atomic_t cachefiles_create_histogram[HZ];
57106+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
57107+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
57108+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
57109
57110 extern int __init cachefiles_proc_init(void);
57111 extern void cachefiles_proc_cleanup(void);
57112 static inline
57113-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
57114+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
57115 {
57116 unsigned long jif = jiffies - start_jif;
57117 if (jif >= HZ)
57118 jif = HZ - 1;
57119- atomic_inc(&histogram[jif]);
57120+ atomic_inc_unchecked(&histogram[jif]);
57121 }
57122
57123 #else
57124diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
57125index ca65f39..48921e3 100644
57126--- a/fs/cachefiles/namei.c
57127+++ b/fs/cachefiles/namei.c
57128@@ -317,7 +317,7 @@ try_again:
57129 /* first step is to make up a grave dentry in the graveyard */
57130 sprintf(nbuffer, "%08x%08x",
57131 (uint32_t) get_seconds(),
57132- (uint32_t) atomic_inc_return(&cache->gravecounter));
57133+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
57134
57135 /* do the multiway lock magic */
57136 trap = lock_rename(cache->graveyard, dir);
57137diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
57138index eccd339..4c1d995 100644
57139--- a/fs/cachefiles/proc.c
57140+++ b/fs/cachefiles/proc.c
57141@@ -14,9 +14,9 @@
57142 #include <linux/seq_file.h>
57143 #include "internal.h"
57144
57145-atomic_t cachefiles_lookup_histogram[HZ];
57146-atomic_t cachefiles_mkdir_histogram[HZ];
57147-atomic_t cachefiles_create_histogram[HZ];
57148+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
57149+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
57150+atomic_unchecked_t cachefiles_create_histogram[HZ];
57151
57152 /*
57153 * display the latency histogram
57154@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
57155 return 0;
57156 default:
57157 index = (unsigned long) v - 3;
57158- x = atomic_read(&cachefiles_lookup_histogram[index]);
57159- y = atomic_read(&cachefiles_mkdir_histogram[index]);
57160- z = atomic_read(&cachefiles_create_histogram[index]);
57161+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
57162+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
57163+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
57164 if (x == 0 && y == 0 && z == 0)
57165 return 0;
57166
57167diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
57168index ebaff36..7e3ea26 100644
57169--- a/fs/cachefiles/rdwr.c
57170+++ b/fs/cachefiles/rdwr.c
57171@@ -950,7 +950,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
57172 old_fs = get_fs();
57173 set_fs(KERNEL_DS);
57174 ret = file->f_op->write(
57175- file, (const void __user *) data, len, &pos);
57176+ file, (const void __force_user *) data, len, &pos);
57177 set_fs(old_fs);
57178 kunmap(page);
57179 file_end_write(file);
57180diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
57181index 2a0bcae..34ec24e 100644
57182--- a/fs/ceph/dir.c
57183+++ b/fs/ceph/dir.c
57184@@ -240,7 +240,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
57185 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
57186 struct ceph_mds_client *mdsc = fsc->mdsc;
57187 unsigned frag = fpos_frag(ctx->pos);
57188- int off = fpos_off(ctx->pos);
57189+ unsigned int off = fpos_off(ctx->pos);
57190 int err;
57191 u32 ftype;
57192 struct ceph_mds_reply_info_parsed *rinfo;
57193diff --git a/fs/ceph/super.c b/fs/ceph/super.c
57194index 6a0951e..03fac6d 100644
57195--- a/fs/ceph/super.c
57196+++ b/fs/ceph/super.c
57197@@ -870,7 +870,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
57198 /*
57199 * construct our own bdi so we can control readahead, etc.
57200 */
57201-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
57202+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
57203
57204 static int ceph_register_bdi(struct super_block *sb,
57205 struct ceph_fs_client *fsc)
57206@@ -887,7 +887,7 @@ static int ceph_register_bdi(struct super_block *sb,
57207 default_backing_dev_info.ra_pages;
57208
57209 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
57210- atomic_long_inc_return(&bdi_seq));
57211+ atomic_long_inc_return_unchecked(&bdi_seq));
57212 if (!err)
57213 sb->s_bdi = &fsc->backing_dev_info;
57214 return err;
57215diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
57216index f3ac415..3d2420c 100644
57217--- a/fs/cifs/cifs_debug.c
57218+++ b/fs/cifs/cifs_debug.c
57219@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
57220
57221 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
57222 #ifdef CONFIG_CIFS_STATS2
57223- atomic_set(&totBufAllocCount, 0);
57224- atomic_set(&totSmBufAllocCount, 0);
57225+ atomic_set_unchecked(&totBufAllocCount, 0);
57226+ atomic_set_unchecked(&totSmBufAllocCount, 0);
57227 #endif /* CONFIG_CIFS_STATS2 */
57228 spin_lock(&cifs_tcp_ses_lock);
57229 list_for_each(tmp1, &cifs_tcp_ses_list) {
57230@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
57231 tcon = list_entry(tmp3,
57232 struct cifs_tcon,
57233 tcon_list);
57234- atomic_set(&tcon->num_smbs_sent, 0);
57235+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
57236 if (server->ops->clear_stats)
57237 server->ops->clear_stats(tcon);
57238 }
57239@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
57240 smBufAllocCount.counter, cifs_min_small);
57241 #ifdef CONFIG_CIFS_STATS2
57242 seq_printf(m, "Total Large %d Small %d Allocations\n",
57243- atomic_read(&totBufAllocCount),
57244- atomic_read(&totSmBufAllocCount));
57245+ atomic_read_unchecked(&totBufAllocCount),
57246+ atomic_read_unchecked(&totSmBufAllocCount));
57247 #endif /* CONFIG_CIFS_STATS2 */
57248
57249 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
57250@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
57251 if (tcon->need_reconnect)
57252 seq_puts(m, "\tDISCONNECTED ");
57253 seq_printf(m, "\nSMBs: %d",
57254- atomic_read(&tcon->num_smbs_sent));
57255+ atomic_read_unchecked(&tcon->num_smbs_sent));
57256 if (server->ops->print_stats)
57257 server->ops->print_stats(m, tcon);
57258 }
57259diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
57260index 849f613..eae6dec 100644
57261--- a/fs/cifs/cifsfs.c
57262+++ b/fs/cifs/cifsfs.c
57263@@ -1056,7 +1056,7 @@ cifs_init_request_bufs(void)
57264 */
57265 cifs_req_cachep = kmem_cache_create("cifs_request",
57266 CIFSMaxBufSize + max_hdr_size, 0,
57267- SLAB_HWCACHE_ALIGN, NULL);
57268+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
57269 if (cifs_req_cachep == NULL)
57270 return -ENOMEM;
57271
57272@@ -1083,7 +1083,7 @@ cifs_init_request_bufs(void)
57273 efficient to alloc 1 per page off the slab compared to 17K (5page)
57274 alloc of large cifs buffers even when page debugging is on */
57275 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
57276- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
57277+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
57278 NULL);
57279 if (cifs_sm_req_cachep == NULL) {
57280 mempool_destroy(cifs_req_poolp);
57281@@ -1168,8 +1168,8 @@ init_cifs(void)
57282 atomic_set(&bufAllocCount, 0);
57283 atomic_set(&smBufAllocCount, 0);
57284 #ifdef CONFIG_CIFS_STATS2
57285- atomic_set(&totBufAllocCount, 0);
57286- atomic_set(&totSmBufAllocCount, 0);
57287+ atomic_set_unchecked(&totBufAllocCount, 0);
57288+ atomic_set_unchecked(&totSmBufAllocCount, 0);
57289 #endif /* CONFIG_CIFS_STATS2 */
57290
57291 atomic_set(&midCount, 0);
57292diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
57293index 579c6d5..95b6d03353 100644
57294--- a/fs/cifs/cifsglob.h
57295+++ b/fs/cifs/cifsglob.h
57296@@ -797,35 +797,35 @@ struct cifs_tcon {
57297 __u16 Flags; /* optional support bits */
57298 enum statusEnum tidStatus;
57299 #ifdef CONFIG_CIFS_STATS
57300- atomic_t num_smbs_sent;
57301+ atomic_unchecked_t num_smbs_sent;
57302 union {
57303 struct {
57304- atomic_t num_writes;
57305- atomic_t num_reads;
57306- atomic_t num_flushes;
57307- atomic_t num_oplock_brks;
57308- atomic_t num_opens;
57309- atomic_t num_closes;
57310- atomic_t num_deletes;
57311- atomic_t num_mkdirs;
57312- atomic_t num_posixopens;
57313- atomic_t num_posixmkdirs;
57314- atomic_t num_rmdirs;
57315- atomic_t num_renames;
57316- atomic_t num_t2renames;
57317- atomic_t num_ffirst;
57318- atomic_t num_fnext;
57319- atomic_t num_fclose;
57320- atomic_t num_hardlinks;
57321- atomic_t num_symlinks;
57322- atomic_t num_locks;
57323- atomic_t num_acl_get;
57324- atomic_t num_acl_set;
57325+ atomic_unchecked_t num_writes;
57326+ atomic_unchecked_t num_reads;
57327+ atomic_unchecked_t num_flushes;
57328+ atomic_unchecked_t num_oplock_brks;
57329+ atomic_unchecked_t num_opens;
57330+ atomic_unchecked_t num_closes;
57331+ atomic_unchecked_t num_deletes;
57332+ atomic_unchecked_t num_mkdirs;
57333+ atomic_unchecked_t num_posixopens;
57334+ atomic_unchecked_t num_posixmkdirs;
57335+ atomic_unchecked_t num_rmdirs;
57336+ atomic_unchecked_t num_renames;
57337+ atomic_unchecked_t num_t2renames;
57338+ atomic_unchecked_t num_ffirst;
57339+ atomic_unchecked_t num_fnext;
57340+ atomic_unchecked_t num_fclose;
57341+ atomic_unchecked_t num_hardlinks;
57342+ atomic_unchecked_t num_symlinks;
57343+ atomic_unchecked_t num_locks;
57344+ atomic_unchecked_t num_acl_get;
57345+ atomic_unchecked_t num_acl_set;
57346 } cifs_stats;
57347 #ifdef CONFIG_CIFS_SMB2
57348 struct {
57349- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
57350- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
57351+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
57352+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
57353 } smb2_stats;
57354 #endif /* CONFIG_CIFS_SMB2 */
57355 } stats;
57356@@ -1155,7 +1155,7 @@ convert_delimiter(char *path, char delim)
57357 }
57358
57359 #ifdef CONFIG_CIFS_STATS
57360-#define cifs_stats_inc atomic_inc
57361+#define cifs_stats_inc atomic_inc_unchecked
57362
57363 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
57364 unsigned int bytes)
57365@@ -1521,8 +1521,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
57366 /* Various Debug counters */
57367 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
57368 #ifdef CONFIG_CIFS_STATS2
57369-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
57370-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
57371+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
57372+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
57373 #endif
57374 GLOBAL_EXTERN atomic_t smBufAllocCount;
57375 GLOBAL_EXTERN atomic_t midCount;
57376diff --git a/fs/cifs/file.c b/fs/cifs/file.c
57377index a1c9ead..63e4c62 100644
57378--- a/fs/cifs/file.c
57379+++ b/fs/cifs/file.c
57380@@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
57381 index = mapping->writeback_index; /* Start from prev offset */
57382 end = -1;
57383 } else {
57384- index = wbc->range_start >> PAGE_CACHE_SHIFT;
57385- end = wbc->range_end >> PAGE_CACHE_SHIFT;
57386- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
57387+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
57388 range_whole = true;
57389+ index = 0;
57390+ end = ULONG_MAX;
57391+ } else {
57392+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
57393+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
57394+ }
57395 scanned = true;
57396 }
57397 retry:
57398diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
57399index 2f9f379..43f8025 100644
57400--- a/fs/cifs/misc.c
57401+++ b/fs/cifs/misc.c
57402@@ -170,7 +170,7 @@ cifs_buf_get(void)
57403 memset(ret_buf, 0, buf_size + 3);
57404 atomic_inc(&bufAllocCount);
57405 #ifdef CONFIG_CIFS_STATS2
57406- atomic_inc(&totBufAllocCount);
57407+ atomic_inc_unchecked(&totBufAllocCount);
57408 #endif /* CONFIG_CIFS_STATS2 */
57409 }
57410
57411@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
57412 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
57413 atomic_inc(&smBufAllocCount);
57414 #ifdef CONFIG_CIFS_STATS2
57415- atomic_inc(&totSmBufAllocCount);
57416+ atomic_inc_unchecked(&totSmBufAllocCount);
57417 #endif /* CONFIG_CIFS_STATS2 */
57418
57419 }
57420diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
57421index ffc9ef9..b3c992b 100644
57422--- a/fs/cifs/smb1ops.c
57423+++ b/fs/cifs/smb1ops.c
57424@@ -609,27 +609,27 @@ static void
57425 cifs_clear_stats(struct cifs_tcon *tcon)
57426 {
57427 #ifdef CONFIG_CIFS_STATS
57428- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
57429- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
57430- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
57431- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
57432- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
57433- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
57434- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
57435- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
57436- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
57437- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
57438- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
57439- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
57440- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
57441- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
57442- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
57443- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
57444- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
57445- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
57446- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
57447- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
57448- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
57449+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
57450+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
57451+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
57452+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
57453+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
57454+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
57455+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
57456+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
57457+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
57458+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
57459+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
57460+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
57461+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
57462+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
57463+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
57464+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
57465+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
57466+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
57467+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
57468+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
57469+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
57470 #endif
57471 }
57472
57473@@ -638,36 +638,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
57474 {
57475 #ifdef CONFIG_CIFS_STATS
57476 seq_printf(m, " Oplocks breaks: %d",
57477- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
57478+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
57479 seq_printf(m, "\nReads: %d Bytes: %llu",
57480- atomic_read(&tcon->stats.cifs_stats.num_reads),
57481+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
57482 (long long)(tcon->bytes_read));
57483 seq_printf(m, "\nWrites: %d Bytes: %llu",
57484- atomic_read(&tcon->stats.cifs_stats.num_writes),
57485+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
57486 (long long)(tcon->bytes_written));
57487 seq_printf(m, "\nFlushes: %d",
57488- atomic_read(&tcon->stats.cifs_stats.num_flushes));
57489+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
57490 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
57491- atomic_read(&tcon->stats.cifs_stats.num_locks),
57492- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
57493- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
57494+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
57495+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
57496+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
57497 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
57498- atomic_read(&tcon->stats.cifs_stats.num_opens),
57499- atomic_read(&tcon->stats.cifs_stats.num_closes),
57500- atomic_read(&tcon->stats.cifs_stats.num_deletes));
57501+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
57502+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
57503+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
57504 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
57505- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
57506- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
57507+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
57508+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
57509 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
57510- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
57511- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
57512+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
57513+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
57514 seq_printf(m, "\nRenames: %d T2 Renames %d",
57515- atomic_read(&tcon->stats.cifs_stats.num_renames),
57516- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
57517+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
57518+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
57519 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
57520- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
57521- atomic_read(&tcon->stats.cifs_stats.num_fnext),
57522- atomic_read(&tcon->stats.cifs_stats.num_fclose));
57523+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
57524+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
57525+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
57526 #endif
57527 }
57528
57529diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
57530index 192f51a..539307e 100644
57531--- a/fs/cifs/smb2ops.c
57532+++ b/fs/cifs/smb2ops.c
57533@@ -364,8 +364,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
57534 #ifdef CONFIG_CIFS_STATS
57535 int i;
57536 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
57537- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
57538- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
57539+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
57540+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
57541 }
57542 #endif
57543 }
57544@@ -405,65 +405,65 @@ static void
57545 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
57546 {
57547 #ifdef CONFIG_CIFS_STATS
57548- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
57549- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
57550+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
57551+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
57552 seq_printf(m, "\nNegotiates: %d sent %d failed",
57553- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
57554- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
57555+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
57556+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
57557 seq_printf(m, "\nSessionSetups: %d sent %d failed",
57558- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
57559- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
57560+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
57561+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
57562 seq_printf(m, "\nLogoffs: %d sent %d failed",
57563- atomic_read(&sent[SMB2_LOGOFF_HE]),
57564- atomic_read(&failed[SMB2_LOGOFF_HE]));
57565+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
57566+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
57567 seq_printf(m, "\nTreeConnects: %d sent %d failed",
57568- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
57569- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
57570+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
57571+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
57572 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
57573- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
57574- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
57575+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
57576+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
57577 seq_printf(m, "\nCreates: %d sent %d failed",
57578- atomic_read(&sent[SMB2_CREATE_HE]),
57579- atomic_read(&failed[SMB2_CREATE_HE]));
57580+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
57581+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
57582 seq_printf(m, "\nCloses: %d sent %d failed",
57583- atomic_read(&sent[SMB2_CLOSE_HE]),
57584- atomic_read(&failed[SMB2_CLOSE_HE]));
57585+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
57586+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
57587 seq_printf(m, "\nFlushes: %d sent %d failed",
57588- atomic_read(&sent[SMB2_FLUSH_HE]),
57589- atomic_read(&failed[SMB2_FLUSH_HE]));
57590+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
57591+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
57592 seq_printf(m, "\nReads: %d sent %d failed",
57593- atomic_read(&sent[SMB2_READ_HE]),
57594- atomic_read(&failed[SMB2_READ_HE]));
57595+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
57596+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
57597 seq_printf(m, "\nWrites: %d sent %d failed",
57598- atomic_read(&sent[SMB2_WRITE_HE]),
57599- atomic_read(&failed[SMB2_WRITE_HE]));
57600+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
57601+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
57602 seq_printf(m, "\nLocks: %d sent %d failed",
57603- atomic_read(&sent[SMB2_LOCK_HE]),
57604- atomic_read(&failed[SMB2_LOCK_HE]));
57605+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
57606+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
57607 seq_printf(m, "\nIOCTLs: %d sent %d failed",
57608- atomic_read(&sent[SMB2_IOCTL_HE]),
57609- atomic_read(&failed[SMB2_IOCTL_HE]));
57610+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
57611+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
57612 seq_printf(m, "\nCancels: %d sent %d failed",
57613- atomic_read(&sent[SMB2_CANCEL_HE]),
57614- atomic_read(&failed[SMB2_CANCEL_HE]));
57615+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
57616+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
57617 seq_printf(m, "\nEchos: %d sent %d failed",
57618- atomic_read(&sent[SMB2_ECHO_HE]),
57619- atomic_read(&failed[SMB2_ECHO_HE]));
57620+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
57621+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
57622 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
57623- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
57624- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
57625+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
57626+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
57627 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
57628- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
57629- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
57630+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
57631+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
57632 seq_printf(m, "\nQueryInfos: %d sent %d failed",
57633- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
57634- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
57635+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
57636+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
57637 seq_printf(m, "\nSetInfos: %d sent %d failed",
57638- atomic_read(&sent[SMB2_SET_INFO_HE]),
57639- atomic_read(&failed[SMB2_SET_INFO_HE]));
57640+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
57641+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
57642 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
57643- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
57644- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
57645+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
57646+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
57647 #endif
57648 }
57649
57650diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
57651index 787e171..31dcd0a 100644
57652--- a/fs/cifs/smb2pdu.c
57653+++ b/fs/cifs/smb2pdu.c
57654@@ -2093,8 +2093,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
57655 default:
57656 cifs_dbg(VFS, "info level %u isn't supported\n",
57657 srch_inf->info_level);
57658- rc = -EINVAL;
57659- goto qdir_exit;
57660+ return -EINVAL;
57661 }
57662
57663 req->FileIndex = cpu_to_le32(index);
57664diff --git a/fs/coda/cache.c b/fs/coda/cache.c
57665index 1da168c..8bc7ff6 100644
57666--- a/fs/coda/cache.c
57667+++ b/fs/coda/cache.c
57668@@ -24,7 +24,7 @@
57669 #include "coda_linux.h"
57670 #include "coda_cache.h"
57671
57672-static atomic_t permission_epoch = ATOMIC_INIT(0);
57673+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
57674
57675 /* replace or extend an acl cache hit */
57676 void coda_cache_enter(struct inode *inode, int mask)
57677@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
57678 struct coda_inode_info *cii = ITOC(inode);
57679
57680 spin_lock(&cii->c_lock);
57681- cii->c_cached_epoch = atomic_read(&permission_epoch);
57682+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
57683 if (!uid_eq(cii->c_uid, current_fsuid())) {
57684 cii->c_uid = current_fsuid();
57685 cii->c_cached_perm = mask;
57686@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
57687 {
57688 struct coda_inode_info *cii = ITOC(inode);
57689 spin_lock(&cii->c_lock);
57690- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
57691+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
57692 spin_unlock(&cii->c_lock);
57693 }
57694
57695 /* remove all acl caches */
57696 void coda_cache_clear_all(struct super_block *sb)
57697 {
57698- atomic_inc(&permission_epoch);
57699+ atomic_inc_unchecked(&permission_epoch);
57700 }
57701
57702
57703@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
57704 spin_lock(&cii->c_lock);
57705 hit = (mask & cii->c_cached_perm) == mask &&
57706 uid_eq(cii->c_uid, current_fsuid()) &&
57707- cii->c_cached_epoch == atomic_read(&permission_epoch);
57708+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
57709 spin_unlock(&cii->c_lock);
57710
57711 return hit;
57712diff --git a/fs/compat.c b/fs/compat.c
57713index 6af20de..fec3fbb 100644
57714--- a/fs/compat.c
57715+++ b/fs/compat.c
57716@@ -54,7 +54,7 @@
57717 #include <asm/ioctls.h>
57718 #include "internal.h"
57719
57720-int compat_log = 1;
57721+int compat_log = 0;
57722
57723 int compat_printk(const char *fmt, ...)
57724 {
57725@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
57726
57727 set_fs(KERNEL_DS);
57728 /* The __user pointer cast is valid because of the set_fs() */
57729- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
57730+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
57731 set_fs(oldfs);
57732 /* truncating is ok because it's a user address */
57733 if (!ret)
57734@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
57735 goto out;
57736
57737 ret = -EINVAL;
57738- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
57739+ if (nr_segs > UIO_MAXIOV)
57740 goto out;
57741 if (nr_segs > fast_segs) {
57742 ret = -ENOMEM;
57743@@ -834,6 +834,7 @@ struct compat_old_linux_dirent {
57744 struct compat_readdir_callback {
57745 struct dir_context ctx;
57746 struct compat_old_linux_dirent __user *dirent;
57747+ struct file * file;
57748 int result;
57749 };
57750
57751@@ -851,6 +852,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
57752 buf->result = -EOVERFLOW;
57753 return -EOVERFLOW;
57754 }
57755+
57756+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
57757+ return 0;
57758+
57759 buf->result++;
57760 dirent = buf->dirent;
57761 if (!access_ok(VERIFY_WRITE, dirent,
57762@@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
57763 if (!f.file)
57764 return -EBADF;
57765
57766+ buf.file = f.file;
57767 error = iterate_dir(f.file, &buf.ctx);
57768 if (buf.result)
57769 error = buf.result;
57770@@ -901,6 +907,7 @@ struct compat_getdents_callback {
57771 struct dir_context ctx;
57772 struct compat_linux_dirent __user *current_dir;
57773 struct compat_linux_dirent __user *previous;
57774+ struct file * file;
57775 int count;
57776 int error;
57777 };
57778@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
57779 buf->error = -EOVERFLOW;
57780 return -EOVERFLOW;
57781 }
57782+
57783+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
57784+ return 0;
57785+
57786 dirent = buf->previous;
57787 if (dirent) {
57788 if (__put_user(offset, &dirent->d_off))
57789@@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
57790 if (!f.file)
57791 return -EBADF;
57792
57793+ buf.file = f.file;
57794 error = iterate_dir(f.file, &buf.ctx);
57795 if (error >= 0)
57796 error = buf.error;
57797@@ -987,6 +999,7 @@ struct compat_getdents_callback64 {
57798 struct dir_context ctx;
57799 struct linux_dirent64 __user *current_dir;
57800 struct linux_dirent64 __user *previous;
57801+ struct file * file;
57802 int count;
57803 int error;
57804 };
57805@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
57806 buf->error = -EINVAL; /* only used if we fail.. */
57807 if (reclen > buf->count)
57808 return -EINVAL;
57809+
57810+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
57811+ return 0;
57812+
57813 dirent = buf->previous;
57814
57815 if (dirent) {
57816@@ -1052,6 +1069,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
57817 if (!f.file)
57818 return -EBADF;
57819
57820+ buf.file = f.file;
57821 error = iterate_dir(f.file, &buf.ctx);
57822 if (error >= 0)
57823 error = buf.error;
57824diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
57825index a81147e..20bf2b5 100644
57826--- a/fs/compat_binfmt_elf.c
57827+++ b/fs/compat_binfmt_elf.c
57828@@ -30,11 +30,13 @@
57829 #undef elf_phdr
57830 #undef elf_shdr
57831 #undef elf_note
57832+#undef elf_dyn
57833 #undef elf_addr_t
57834 #define elfhdr elf32_hdr
57835 #define elf_phdr elf32_phdr
57836 #define elf_shdr elf32_shdr
57837 #define elf_note elf32_note
57838+#define elf_dyn Elf32_Dyn
57839 #define elf_addr_t Elf32_Addr
57840
57841 /*
57842diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
57843index dc52e13..ec61057 100644
57844--- a/fs/compat_ioctl.c
57845+++ b/fs/compat_ioctl.c
57846@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
57847 return -EFAULT;
57848 if (__get_user(udata, &ss32->iomem_base))
57849 return -EFAULT;
57850- ss.iomem_base = compat_ptr(udata);
57851+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
57852 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
57853 __get_user(ss.port_high, &ss32->port_high))
57854 return -EFAULT;
57855@@ -702,8 +702,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
57856 for (i = 0; i < nmsgs; i++) {
57857 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
57858 return -EFAULT;
57859- if (get_user(datap, &umsgs[i].buf) ||
57860- put_user(compat_ptr(datap), &tmsgs[i].buf))
57861+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
57862+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
57863 return -EFAULT;
57864 }
57865 return sys_ioctl(fd, cmd, (unsigned long)tdata);
57866@@ -796,7 +796,7 @@ static int compat_ioctl_preallocate(struct file *file,
57867 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
57868 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
57869 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
57870- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
57871+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
57872 return -EFAULT;
57873
57874 return ioctl_preallocate(file, p);
57875@@ -1616,8 +1616,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
57876 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
57877 {
57878 unsigned int a, b;
57879- a = *(unsigned int *)p;
57880- b = *(unsigned int *)q;
57881+ a = *(const unsigned int *)p;
57882+ b = *(const unsigned int *)q;
57883 if (a > b)
57884 return 1;
57885 if (a < b)
57886diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
57887index e081acb..911df21 100644
57888--- a/fs/configfs/dir.c
57889+++ b/fs/configfs/dir.c
57890@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
57891 }
57892 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
57893 struct configfs_dirent *next;
57894- const char *name;
57895+ const unsigned char * name;
57896+ char d_name[sizeof(next->s_dentry->d_iname)];
57897 int len;
57898 struct inode *inode = NULL;
57899
57900@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
57901 continue;
57902
57903 name = configfs_get_name(next);
57904- len = strlen(name);
57905+ if (next->s_dentry && name == next->s_dentry->d_iname) {
57906+ len = next->s_dentry->d_name.len;
57907+ memcpy(d_name, name, len);
57908+ name = d_name;
57909+ } else
57910+ len = strlen(name);
57911
57912 /*
57913 * We'll have a dentry and an inode for
57914diff --git a/fs/coredump.c b/fs/coredump.c
57915index bc3fbcd..6031650 100644
57916--- a/fs/coredump.c
57917+++ b/fs/coredump.c
57918@@ -438,8 +438,8 @@ static void wait_for_dump_helpers(struct file *file)
57919 struct pipe_inode_info *pipe = file->private_data;
57920
57921 pipe_lock(pipe);
57922- pipe->readers++;
57923- pipe->writers--;
57924+ atomic_inc(&pipe->readers);
57925+ atomic_dec(&pipe->writers);
57926 wake_up_interruptible_sync(&pipe->wait);
57927 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
57928 pipe_unlock(pipe);
57929@@ -448,11 +448,11 @@ static void wait_for_dump_helpers(struct file *file)
57930 * We actually want wait_event_freezable() but then we need
57931 * to clear TIF_SIGPENDING and improve dump_interrupted().
57932 */
57933- wait_event_interruptible(pipe->wait, pipe->readers == 1);
57934+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
57935
57936 pipe_lock(pipe);
57937- pipe->readers--;
57938- pipe->writers++;
57939+ atomic_dec(&pipe->readers);
57940+ atomic_inc(&pipe->writers);
57941 pipe_unlock(pipe);
57942 }
57943
57944@@ -499,7 +499,9 @@ void do_coredump(const siginfo_t *siginfo)
57945 struct files_struct *displaced;
57946 bool need_nonrelative = false;
57947 bool core_dumped = false;
57948- static atomic_t core_dump_count = ATOMIC_INIT(0);
57949+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
57950+ long signr = siginfo->si_signo;
57951+ int dumpable;
57952 struct coredump_params cprm = {
57953 .siginfo = siginfo,
57954 .regs = signal_pt_regs(),
57955@@ -512,12 +514,17 @@ void do_coredump(const siginfo_t *siginfo)
57956 .mm_flags = mm->flags,
57957 };
57958
57959- audit_core_dumps(siginfo->si_signo);
57960+ audit_core_dumps(signr);
57961+
57962+ dumpable = __get_dumpable(cprm.mm_flags);
57963+
57964+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
57965+ gr_handle_brute_attach(dumpable);
57966
57967 binfmt = mm->binfmt;
57968 if (!binfmt || !binfmt->core_dump)
57969 goto fail;
57970- if (!__get_dumpable(cprm.mm_flags))
57971+ if (!dumpable)
57972 goto fail;
57973
57974 cred = prepare_creds();
57975@@ -536,7 +543,7 @@ void do_coredump(const siginfo_t *siginfo)
57976 need_nonrelative = true;
57977 }
57978
57979- retval = coredump_wait(siginfo->si_signo, &core_state);
57980+ retval = coredump_wait(signr, &core_state);
57981 if (retval < 0)
57982 goto fail_creds;
57983
57984@@ -579,7 +586,7 @@ void do_coredump(const siginfo_t *siginfo)
57985 }
57986 cprm.limit = RLIM_INFINITY;
57987
57988- dump_count = atomic_inc_return(&core_dump_count);
57989+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
57990 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
57991 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
57992 task_tgid_vnr(current), current->comm);
57993@@ -611,6 +618,8 @@ void do_coredump(const siginfo_t *siginfo)
57994 } else {
57995 struct inode *inode;
57996
57997+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
57998+
57999 if (cprm.limit < binfmt->min_coredump)
58000 goto fail_unlock;
58001
58002@@ -669,7 +678,7 @@ close_fail:
58003 filp_close(cprm.file, NULL);
58004 fail_dropcount:
58005 if (ispipe)
58006- atomic_dec(&core_dump_count);
58007+ atomic_dec_unchecked(&core_dump_count);
58008 fail_unlock:
58009 kfree(cn.corename);
58010 coredump_finish(mm, core_dumped);
58011@@ -690,6 +699,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
58012 struct file *file = cprm->file;
58013 loff_t pos = file->f_pos;
58014 ssize_t n;
58015+
58016+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
58017 if (cprm->written + nr > cprm->limit)
58018 return 0;
58019 while (nr) {
58020diff --git a/fs/dcache.c b/fs/dcache.c
58021index fdbe230..ba17c1f 100644
58022--- a/fs/dcache.c
58023+++ b/fs/dcache.c
58024@@ -1495,7 +1495,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
58025 */
58026 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
58027 if (name->len > DNAME_INLINE_LEN-1) {
58028- dname = kmalloc(name->len + 1, GFP_KERNEL);
58029+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
58030 if (!dname) {
58031 kmem_cache_free(dentry_cache, dentry);
58032 return NULL;
58033@@ -3428,7 +3428,8 @@ void __init vfs_caches_init(unsigned long mempages)
58034 mempages -= reserve;
58035
58036 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
58037- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
58038+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
58039+ SLAB_NO_SANITIZE, NULL);
58040
58041 dcache_init();
58042 inode_init();
58043diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
58044index 9c0444c..628490c 100644
58045--- a/fs/debugfs/inode.c
58046+++ b/fs/debugfs/inode.c
58047@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
58048 */
58049 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
58050 {
58051+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
58052+ return __create_file(name, S_IFDIR | S_IRWXU,
58053+#else
58054 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
58055+#endif
58056 parent, NULL, NULL);
58057 }
58058 EXPORT_SYMBOL_GPL(debugfs_create_dir);
58059diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
58060index c36c448..fc96710 100644
58061--- a/fs/ecryptfs/inode.c
58062+++ b/fs/ecryptfs/inode.c
58063@@ -675,7 +675,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
58064 old_fs = get_fs();
58065 set_fs(get_ds());
58066 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
58067- (char __user *)lower_buf,
58068+ (char __force_user *)lower_buf,
58069 PATH_MAX);
58070 set_fs(old_fs);
58071 if (rc < 0)
58072diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
58073index e4141f2..d8263e8 100644
58074--- a/fs/ecryptfs/miscdev.c
58075+++ b/fs/ecryptfs/miscdev.c
58076@@ -304,7 +304,7 @@ check_list:
58077 goto out_unlock_msg_ctx;
58078 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
58079 if (msg_ctx->msg) {
58080- if (copy_to_user(&buf[i], packet_length, packet_length_size))
58081+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
58082 goto out_unlock_msg_ctx;
58083 i += packet_length_size;
58084 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
58085diff --git a/fs/exec.c b/fs/exec.c
58086index 7ea097f..0158d8a 100644
58087--- a/fs/exec.c
58088+++ b/fs/exec.c
58089@@ -55,8 +55,20 @@
58090 #include <linux/pipe_fs_i.h>
58091 #include <linux/oom.h>
58092 #include <linux/compat.h>
58093+#include <linux/random.h>
58094+#include <linux/seq_file.h>
58095+#include <linux/coredump.h>
58096+#include <linux/mman.h>
58097+
58098+#ifdef CONFIG_PAX_REFCOUNT
58099+#include <linux/kallsyms.h>
58100+#include <linux/kdebug.h>
58101+#endif
58102+
58103+#include <trace/events/fs.h>
58104
58105 #include <asm/uaccess.h>
58106+#include <asm/sections.h>
58107 #include <asm/mmu_context.h>
58108 #include <asm/tlb.h>
58109
58110@@ -66,19 +78,34 @@
58111
58112 #include <trace/events/sched.h>
58113
58114+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58115+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
58116+{
58117+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
58118+}
58119+#endif
58120+
58121+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
58122+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
58123+EXPORT_SYMBOL(pax_set_initial_flags_func);
58124+#endif
58125+
58126 int suid_dumpable = 0;
58127
58128 static LIST_HEAD(formats);
58129 static DEFINE_RWLOCK(binfmt_lock);
58130
58131+extern int gr_process_kernel_exec_ban(void);
58132+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
58133+
58134 void __register_binfmt(struct linux_binfmt * fmt, int insert)
58135 {
58136 BUG_ON(!fmt);
58137 if (WARN_ON(!fmt->load_binary))
58138 return;
58139 write_lock(&binfmt_lock);
58140- insert ? list_add(&fmt->lh, &formats) :
58141- list_add_tail(&fmt->lh, &formats);
58142+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
58143+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
58144 write_unlock(&binfmt_lock);
58145 }
58146
58147@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
58148 void unregister_binfmt(struct linux_binfmt * fmt)
58149 {
58150 write_lock(&binfmt_lock);
58151- list_del(&fmt->lh);
58152+ pax_list_del((struct list_head *)&fmt->lh);
58153 write_unlock(&binfmt_lock);
58154 }
58155
58156@@ -181,18 +208,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
58157 int write)
58158 {
58159 struct page *page;
58160- int ret;
58161
58162-#ifdef CONFIG_STACK_GROWSUP
58163- if (write) {
58164- ret = expand_downwards(bprm->vma, pos);
58165- if (ret < 0)
58166- return NULL;
58167- }
58168-#endif
58169- ret = get_user_pages(current, bprm->mm, pos,
58170- 1, write, 1, &page, NULL);
58171- if (ret <= 0)
58172+ if (0 > expand_downwards(bprm->vma, pos))
58173+ return NULL;
58174+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
58175 return NULL;
58176
58177 if (write) {
58178@@ -208,6 +227,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
58179 if (size <= ARG_MAX)
58180 return page;
58181
58182+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58183+ // only allow 512KB for argv+env on suid/sgid binaries
58184+ // to prevent easy ASLR exhaustion
58185+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
58186+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
58187+ (size > (512 * 1024))) {
58188+ put_page(page);
58189+ return NULL;
58190+ }
58191+#endif
58192+
58193 /*
58194 * Limit to 1/4-th the stack size for the argv+env strings.
58195 * This ensures that:
58196@@ -267,6 +297,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
58197 vma->vm_end = STACK_TOP_MAX;
58198 vma->vm_start = vma->vm_end - PAGE_SIZE;
58199 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
58200+
58201+#ifdef CONFIG_PAX_SEGMEXEC
58202+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
58203+#endif
58204+
58205 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
58206 INIT_LIST_HEAD(&vma->anon_vma_chain);
58207
58208@@ -277,6 +312,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
58209 mm->stack_vm = mm->total_vm = 1;
58210 up_write(&mm->mmap_sem);
58211 bprm->p = vma->vm_end - sizeof(void *);
58212+
58213+#ifdef CONFIG_PAX_RANDUSTACK
58214+ if (randomize_va_space)
58215+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
58216+#endif
58217+
58218 return 0;
58219 err:
58220 up_write(&mm->mmap_sem);
58221@@ -397,7 +438,7 @@ struct user_arg_ptr {
58222 } ptr;
58223 };
58224
58225-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
58226+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
58227 {
58228 const char __user *native;
58229
58230@@ -406,14 +447,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
58231 compat_uptr_t compat;
58232
58233 if (get_user(compat, argv.ptr.compat + nr))
58234- return ERR_PTR(-EFAULT);
58235+ return (const char __force_user *)ERR_PTR(-EFAULT);
58236
58237 return compat_ptr(compat);
58238 }
58239 #endif
58240
58241 if (get_user(native, argv.ptr.native + nr))
58242- return ERR_PTR(-EFAULT);
58243+ return (const char __force_user *)ERR_PTR(-EFAULT);
58244
58245 return native;
58246 }
58247@@ -432,7 +473,7 @@ static int count(struct user_arg_ptr argv, int max)
58248 if (!p)
58249 break;
58250
58251- if (IS_ERR(p))
58252+ if (IS_ERR((const char __force_kernel *)p))
58253 return -EFAULT;
58254
58255 if (i >= max)
58256@@ -467,7 +508,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
58257
58258 ret = -EFAULT;
58259 str = get_user_arg_ptr(argv, argc);
58260- if (IS_ERR(str))
58261+ if (IS_ERR((const char __force_kernel *)str))
58262 goto out;
58263
58264 len = strnlen_user(str, MAX_ARG_STRLEN);
58265@@ -549,7 +590,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
58266 int r;
58267 mm_segment_t oldfs = get_fs();
58268 struct user_arg_ptr argv = {
58269- .ptr.native = (const char __user *const __user *)__argv,
58270+ .ptr.native = (const char __user * const __force_user *)__argv,
58271 };
58272
58273 set_fs(KERNEL_DS);
58274@@ -584,7 +625,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
58275 unsigned long new_end = old_end - shift;
58276 struct mmu_gather tlb;
58277
58278- BUG_ON(new_start > new_end);
58279+ if (new_start >= new_end || new_start < mmap_min_addr)
58280+ return -ENOMEM;
58281
58282 /*
58283 * ensure there are no vmas between where we want to go
58284@@ -593,6 +635,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
58285 if (vma != find_vma(mm, new_start))
58286 return -EFAULT;
58287
58288+#ifdef CONFIG_PAX_SEGMEXEC
58289+ BUG_ON(pax_find_mirror_vma(vma));
58290+#endif
58291+
58292 /*
58293 * cover the whole range: [new_start, old_end)
58294 */
58295@@ -673,10 +719,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
58296 stack_top = arch_align_stack(stack_top);
58297 stack_top = PAGE_ALIGN(stack_top);
58298
58299- if (unlikely(stack_top < mmap_min_addr) ||
58300- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
58301- return -ENOMEM;
58302-
58303 stack_shift = vma->vm_end - stack_top;
58304
58305 bprm->p -= stack_shift;
58306@@ -688,8 +730,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
58307 bprm->exec -= stack_shift;
58308
58309 down_write(&mm->mmap_sem);
58310+
58311+ /* Move stack pages down in memory. */
58312+ if (stack_shift) {
58313+ ret = shift_arg_pages(vma, stack_shift);
58314+ if (ret)
58315+ goto out_unlock;
58316+ }
58317+
58318 vm_flags = VM_STACK_FLAGS;
58319
58320+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
58321+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58322+ vm_flags &= ~VM_EXEC;
58323+
58324+#ifdef CONFIG_PAX_MPROTECT
58325+ if (mm->pax_flags & MF_PAX_MPROTECT)
58326+ vm_flags &= ~VM_MAYEXEC;
58327+#endif
58328+
58329+ }
58330+#endif
58331+
58332 /*
58333 * Adjust stack execute permissions; explicitly enable for
58334 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
58335@@ -708,13 +770,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
58336 goto out_unlock;
58337 BUG_ON(prev != vma);
58338
58339- /* Move stack pages down in memory. */
58340- if (stack_shift) {
58341- ret = shift_arg_pages(vma, stack_shift);
58342- if (ret)
58343- goto out_unlock;
58344- }
58345-
58346 /* mprotect_fixup is overkill to remove the temporary stack flags */
58347 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
58348
58349@@ -738,6 +793,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
58350 #endif
58351 current->mm->start_stack = bprm->p;
58352 ret = expand_stack(vma, stack_base);
58353+
58354+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
58355+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
58356+ unsigned long size;
58357+ vm_flags_t vm_flags;
58358+
58359+ size = STACK_TOP - vma->vm_end;
58360+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
58361+
58362+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
58363+
58364+#ifdef CONFIG_X86
58365+ if (!ret) {
58366+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
58367+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
58368+ }
58369+#endif
58370+
58371+ }
58372+#endif
58373+
58374 if (ret)
58375 ret = -EFAULT;
58376
58377@@ -774,6 +850,8 @@ struct file *open_exec(const char *name)
58378
58379 fsnotify_open(file);
58380
58381+ trace_open_exec(name);
58382+
58383 err = deny_write_access(file);
58384 if (err)
58385 goto exit;
58386@@ -797,7 +875,7 @@ int kernel_read(struct file *file, loff_t offset,
58387 old_fs = get_fs();
58388 set_fs(get_ds());
58389 /* The cast to a user pointer is valid due to the set_fs() */
58390- result = vfs_read(file, (void __user *)addr, count, &pos);
58391+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
58392 set_fs(old_fs);
58393 return result;
58394 }
58395@@ -1253,7 +1331,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
58396 }
58397 rcu_read_unlock();
58398
58399- if (p->fs->users > n_fs) {
58400+ if (atomic_read(&p->fs->users) > n_fs) {
58401 bprm->unsafe |= LSM_UNSAFE_SHARE;
58402 } else {
58403 res = -EAGAIN;
58404@@ -1443,6 +1521,31 @@ static int exec_binprm(struct linux_binprm *bprm)
58405 return ret;
58406 }
58407
58408+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58409+static DEFINE_PER_CPU(u64, exec_counter);
58410+static int __init init_exec_counters(void)
58411+{
58412+ unsigned int cpu;
58413+
58414+ for_each_possible_cpu(cpu) {
58415+ per_cpu(exec_counter, cpu) = (u64)cpu;
58416+ }
58417+
58418+ return 0;
58419+}
58420+early_initcall(init_exec_counters);
58421+static inline void increment_exec_counter(void)
58422+{
58423+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
58424+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
58425+}
58426+#else
58427+static inline void increment_exec_counter(void) {}
58428+#endif
58429+
58430+extern void gr_handle_exec_args(struct linux_binprm *bprm,
58431+ struct user_arg_ptr argv);
58432+
58433 /*
58434 * sys_execve() executes a new program.
58435 */
58436@@ -1450,12 +1553,19 @@ static int do_execve_common(const char *filename,
58437 struct user_arg_ptr argv,
58438 struct user_arg_ptr envp)
58439 {
58440+#ifdef CONFIG_GRKERNSEC
58441+ struct file *old_exec_file;
58442+ struct acl_subject_label *old_acl;
58443+ struct rlimit old_rlim[RLIM_NLIMITS];
58444+#endif
58445 struct linux_binprm *bprm;
58446 struct file *file;
58447 struct files_struct *displaced;
58448 bool clear_in_exec;
58449 int retval;
58450
58451+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
58452+
58453 /*
58454 * We move the actual failure in case of RLIMIT_NPROC excess from
58455 * set*uid() to execve() because too many poorly written programs
58456@@ -1496,12 +1606,22 @@ static int do_execve_common(const char *filename,
58457 if (IS_ERR(file))
58458 goto out_unmark;
58459
58460+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
58461+ retval = -EPERM;
58462+ goto out_file;
58463+ }
58464+
58465 sched_exec();
58466
58467 bprm->file = file;
58468 bprm->filename = filename;
58469 bprm->interp = filename;
58470
58471+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
58472+ retval = -EACCES;
58473+ goto out_file;
58474+ }
58475+
58476 retval = bprm_mm_init(bprm);
58477 if (retval)
58478 goto out_file;
58479@@ -1518,24 +1638,70 @@ static int do_execve_common(const char *filename,
58480 if (retval < 0)
58481 goto out;
58482
58483+#ifdef CONFIG_GRKERNSEC
58484+ old_acl = current->acl;
58485+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
58486+ old_exec_file = current->exec_file;
58487+ get_file(file);
58488+ current->exec_file = file;
58489+#endif
58490+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
58491+ /* limit suid stack to 8MB
58492+ * we saved the old limits above and will restore them if this exec fails
58493+ */
58494+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
58495+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
58496+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
58497+#endif
58498+
58499+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
58500+ retval = -EPERM;
58501+ goto out_fail;
58502+ }
58503+
58504+ if (!gr_tpe_allow(file)) {
58505+ retval = -EACCES;
58506+ goto out_fail;
58507+ }
58508+
58509+ if (gr_check_crash_exec(file)) {
58510+ retval = -EACCES;
58511+ goto out_fail;
58512+ }
58513+
58514+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
58515+ bprm->unsafe);
58516+ if (retval < 0)
58517+ goto out_fail;
58518+
58519 retval = copy_strings_kernel(1, &bprm->filename, bprm);
58520 if (retval < 0)
58521- goto out;
58522+ goto out_fail;
58523
58524 bprm->exec = bprm->p;
58525 retval = copy_strings(bprm->envc, envp, bprm);
58526 if (retval < 0)
58527- goto out;
58528+ goto out_fail;
58529
58530 retval = copy_strings(bprm->argc, argv, bprm);
58531 if (retval < 0)
58532- goto out;
58533+ goto out_fail;
58534+
58535+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
58536+
58537+ gr_handle_exec_args(bprm, argv);
58538
58539 retval = exec_binprm(bprm);
58540 if (retval < 0)
58541- goto out;
58542+ goto out_fail;
58543+#ifdef CONFIG_GRKERNSEC
58544+ if (old_exec_file)
58545+ fput(old_exec_file);
58546+#endif
58547
58548 /* execve succeeded */
58549+
58550+ increment_exec_counter();
58551 current->fs->in_exec = 0;
58552 current->in_execve = 0;
58553 acct_update_integrals(current);
58554@@ -1545,6 +1711,14 @@ static int do_execve_common(const char *filename,
58555 put_files_struct(displaced);
58556 return retval;
58557
58558+out_fail:
58559+#ifdef CONFIG_GRKERNSEC
58560+ current->acl = old_acl;
58561+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
58562+ fput(current->exec_file);
58563+ current->exec_file = old_exec_file;
58564+#endif
58565+
58566 out:
58567 if (bprm->mm) {
58568 acct_arg_size(bprm, 0);
58569@@ -1699,3 +1873,295 @@ asmlinkage long compat_sys_execve(const char __user * filename,
58570 return error;
58571 }
58572 #endif
58573+
58574+int pax_check_flags(unsigned long *flags)
58575+{
58576+ int retval = 0;
58577+
58578+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
58579+ if (*flags & MF_PAX_SEGMEXEC)
58580+ {
58581+ *flags &= ~MF_PAX_SEGMEXEC;
58582+ retval = -EINVAL;
58583+ }
58584+#endif
58585+
58586+ if ((*flags & MF_PAX_PAGEEXEC)
58587+
58588+#ifdef CONFIG_PAX_PAGEEXEC
58589+ && (*flags & MF_PAX_SEGMEXEC)
58590+#endif
58591+
58592+ )
58593+ {
58594+ *flags &= ~MF_PAX_PAGEEXEC;
58595+ retval = -EINVAL;
58596+ }
58597+
58598+ if ((*flags & MF_PAX_MPROTECT)
58599+
58600+#ifdef CONFIG_PAX_MPROTECT
58601+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
58602+#endif
58603+
58604+ )
58605+ {
58606+ *flags &= ~MF_PAX_MPROTECT;
58607+ retval = -EINVAL;
58608+ }
58609+
58610+ if ((*flags & MF_PAX_EMUTRAMP)
58611+
58612+#ifdef CONFIG_PAX_EMUTRAMP
58613+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
58614+#endif
58615+
58616+ )
58617+ {
58618+ *flags &= ~MF_PAX_EMUTRAMP;
58619+ retval = -EINVAL;
58620+ }
58621+
58622+ return retval;
58623+}
58624+
58625+EXPORT_SYMBOL(pax_check_flags);
58626+
58627+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
58628+char *pax_get_path(const struct path *path, char *buf, int buflen)
58629+{
58630+ char *pathname = d_path(path, buf, buflen);
58631+
58632+ if (IS_ERR(pathname))
58633+ goto toolong;
58634+
58635+ pathname = mangle_path(buf, pathname, "\t\n\\");
58636+ if (!pathname)
58637+ goto toolong;
58638+
58639+ *pathname = 0;
58640+ return buf;
58641+
58642+toolong:
58643+ return "<path too long>";
58644+}
58645+EXPORT_SYMBOL(pax_get_path);
58646+
58647+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
58648+{
58649+ struct task_struct *tsk = current;
58650+ struct mm_struct *mm = current->mm;
58651+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
58652+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
58653+ char *path_exec = NULL;
58654+ char *path_fault = NULL;
58655+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
58656+ siginfo_t info = { };
58657+
58658+ if (buffer_exec && buffer_fault) {
58659+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
58660+
58661+ down_read(&mm->mmap_sem);
58662+ vma = mm->mmap;
58663+ while (vma && (!vma_exec || !vma_fault)) {
58664+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
58665+ vma_exec = vma;
58666+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
58667+ vma_fault = vma;
58668+ vma = vma->vm_next;
58669+ }
58670+ if (vma_exec)
58671+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
58672+ if (vma_fault) {
58673+ start = vma_fault->vm_start;
58674+ end = vma_fault->vm_end;
58675+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
58676+ if (vma_fault->vm_file)
58677+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
58678+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
58679+ path_fault = "<heap>";
58680+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
58681+ path_fault = "<stack>";
58682+ else
58683+ path_fault = "<anonymous mapping>";
58684+ }
58685+ up_read(&mm->mmap_sem);
58686+ }
58687+ if (tsk->signal->curr_ip)
58688+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
58689+ else
58690+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
58691+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
58692+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
58693+ free_page((unsigned long)buffer_exec);
58694+ free_page((unsigned long)buffer_fault);
58695+ pax_report_insns(regs, pc, sp);
58696+ info.si_signo = SIGKILL;
58697+ info.si_errno = 0;
58698+ info.si_code = SI_KERNEL;
58699+ info.si_pid = 0;
58700+ info.si_uid = 0;
58701+ do_coredump(&info);
58702+}
58703+#endif
58704+
58705+#ifdef CONFIG_PAX_REFCOUNT
58706+void pax_report_refcount_overflow(struct pt_regs *regs)
58707+{
58708+ if (current->signal->curr_ip)
58709+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
58710+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
58711+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
58712+ else
58713+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
58714+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
58715+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
58716+ preempt_disable();
58717+ show_regs(regs);
58718+ preempt_enable();
58719+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
58720+}
58721+#endif
58722+
58723+#ifdef CONFIG_PAX_USERCOPY
58724+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
58725+static noinline int check_stack_object(const void *obj, unsigned long len)
58726+{
58727+ const void * const stack = task_stack_page(current);
58728+ const void * const stackend = stack + THREAD_SIZE;
58729+
58730+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
58731+ const void *frame = NULL;
58732+ const void *oldframe;
58733+#endif
58734+
58735+ if (obj + len < obj)
58736+ return -1;
58737+
58738+ if (obj + len <= stack || stackend <= obj)
58739+ return 0;
58740+
58741+ if (obj < stack || stackend < obj + len)
58742+ return -1;
58743+
58744+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
58745+ oldframe = __builtin_frame_address(1);
58746+ if (oldframe)
58747+ frame = __builtin_frame_address(2);
58748+ /*
58749+ low ----------------------------------------------> high
58750+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
58751+ ^----------------^
58752+ allow copies only within here
58753+ */
58754+ while (stack <= frame && frame < stackend) {
58755+ /* if obj + len extends past the last frame, this
58756+ check won't pass and the next frame will be 0,
58757+ causing us to bail out and correctly report
58758+ the copy as invalid
58759+ */
58760+ if (obj + len <= frame)
58761+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
58762+ oldframe = frame;
58763+ frame = *(const void * const *)frame;
58764+ }
58765+ return -1;
58766+#else
58767+ return 1;
58768+#endif
58769+}
58770+
58771+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
58772+{
58773+ if (current->signal->curr_ip)
58774+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
58775+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
58776+ else
58777+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
58778+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
58779+ dump_stack();
58780+ gr_handle_kernel_exploit();
58781+ do_group_exit(SIGKILL);
58782+}
58783+#endif
58784+
58785+#ifdef CONFIG_PAX_USERCOPY
58786+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
58787+{
58788+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
58789+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
58790+#ifdef CONFIG_MODULES
58791+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
58792+#else
58793+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
58794+#endif
58795+
58796+#else
58797+ unsigned long textlow = (unsigned long)_stext;
58798+ unsigned long texthigh = (unsigned long)_etext;
58799+
58800+#ifdef CONFIG_X86_64
58801+ /* check against linear mapping as well */
58802+ if (high > (unsigned long)__va(__pa(textlow)) &&
58803+ low <= (unsigned long)__va(__pa(texthigh)))
58804+ return true;
58805+#endif
58806+
58807+#endif
58808+
58809+ if (high <= textlow || low > texthigh)
58810+ return false;
58811+ else
58812+ return true;
58813+}
58814+#endif
58815+
58816+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
58817+{
58818+
58819+#ifdef CONFIG_PAX_USERCOPY
58820+ const char *type;
58821+
58822+ if (!n)
58823+ return;
58824+
58825+ type = check_heap_object(ptr, n);
58826+ if (!type) {
58827+ int ret = check_stack_object(ptr, n);
58828+ if (ret == 1 || ret == 2)
58829+ return;
58830+ if (ret == 0) {
58831+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
58832+ type = "<kernel text>";
58833+ else
58834+ return;
58835+ } else
58836+ type = "<process stack>";
58837+ }
58838+
58839+ pax_report_usercopy(ptr, n, to_user, type);
58840+#endif
58841+
58842+}
58843+EXPORT_SYMBOL(__check_object_size);
58844+
58845+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
58846+void pax_track_stack(void)
58847+{
58848+ unsigned long sp = (unsigned long)&sp;
58849+ if (sp < current_thread_info()->lowest_stack &&
58850+ sp > (unsigned long)task_stack_page(current))
58851+ current_thread_info()->lowest_stack = sp;
58852+}
58853+EXPORT_SYMBOL(pax_track_stack);
58854+#endif
58855+
58856+#ifdef CONFIG_PAX_SIZE_OVERFLOW
58857+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
58858+{
58859+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
58860+ dump_stack();
58861+ do_group_exit(SIGKILL);
58862+}
58863+EXPORT_SYMBOL(report_size_overflow);
58864+#endif
58865diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
58866index 9f9992b..8b59411 100644
58867--- a/fs/ext2/balloc.c
58868+++ b/fs/ext2/balloc.c
58869@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
58870
58871 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
58872 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
58873- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
58874+ if (free_blocks < root_blocks + 1 &&
58875 !uid_eq(sbi->s_resuid, current_fsuid()) &&
58876 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
58877- !in_group_p (sbi->s_resgid))) {
58878+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
58879 return 0;
58880 }
58881 return 1;
58882diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
58883index 2d7557d..14e38f94 100644
58884--- a/fs/ext2/xattr.c
58885+++ b/fs/ext2/xattr.c
58886@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
58887 struct buffer_head *bh = NULL;
58888 struct ext2_xattr_entry *entry;
58889 char *end;
58890- size_t rest = buffer_size;
58891+ size_t rest = buffer_size, total_size = 0;
58892 int error;
58893
58894 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
58895@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
58896 buffer += size;
58897 }
58898 rest -= size;
58899+ total_size += size;
58900 }
58901 }
58902- error = buffer_size - rest; /* total size */
58903+ error = total_size;
58904
58905 cleanup:
58906 brelse(bh);
58907diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
58908index 22548f5..41521d8 100644
58909--- a/fs/ext3/balloc.c
58910+++ b/fs/ext3/balloc.c
58911@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
58912
58913 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
58914 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
58915- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
58916+ if (free_blocks < root_blocks + 1 &&
58917 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
58918 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
58919- !in_group_p (sbi->s_resgid))) {
58920+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
58921 return 0;
58922 }
58923 return 1;
58924diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
58925index b1fc963..881228c 100644
58926--- a/fs/ext3/xattr.c
58927+++ b/fs/ext3/xattr.c
58928@@ -330,7 +330,7 @@ static int
58929 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
58930 char *buffer, size_t buffer_size)
58931 {
58932- size_t rest = buffer_size;
58933+ size_t rest = buffer_size, total_size = 0;
58934
58935 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
58936 const struct xattr_handler *handler =
58937@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
58938 buffer += size;
58939 }
58940 rest -= size;
58941+ total_size += size;
58942 }
58943 }
58944- return buffer_size - rest;
58945+ return total_size;
58946 }
58947
58948 static int
58949diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
58950index 6ea7b14..8fa16d9 100644
58951--- a/fs/ext4/balloc.c
58952+++ b/fs/ext4/balloc.c
58953@@ -534,8 +534,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
58954 /* Hm, nope. Are (enough) root reserved clusters available? */
58955 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
58956 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
58957- capable(CAP_SYS_RESOURCE) ||
58958- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
58959+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
58960+ capable_nolog(CAP_SYS_RESOURCE)) {
58961
58962 if (free_clusters >= (nclusters + dirty_clusters +
58963 resv_clusters))
58964diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
58965index d3a534f..242c50a 100644
58966--- a/fs/ext4/ext4.h
58967+++ b/fs/ext4/ext4.h
58968@@ -1269,19 +1269,19 @@ struct ext4_sb_info {
58969 unsigned long s_mb_last_start;
58970
58971 /* stats for buddy allocator */
58972- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
58973- atomic_t s_bal_success; /* we found long enough chunks */
58974- atomic_t s_bal_allocated; /* in blocks */
58975- atomic_t s_bal_ex_scanned; /* total extents scanned */
58976- atomic_t s_bal_goals; /* goal hits */
58977- atomic_t s_bal_breaks; /* too long searches */
58978- atomic_t s_bal_2orders; /* 2^order hits */
58979+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
58980+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
58981+ atomic_unchecked_t s_bal_allocated; /* in blocks */
58982+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
58983+ atomic_unchecked_t s_bal_goals; /* goal hits */
58984+ atomic_unchecked_t s_bal_breaks; /* too long searches */
58985+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
58986 spinlock_t s_bal_lock;
58987 unsigned long s_mb_buddies_generated;
58988 unsigned long long s_mb_generation_time;
58989- atomic_t s_mb_lost_chunks;
58990- atomic_t s_mb_preallocated;
58991- atomic_t s_mb_discarded;
58992+ atomic_unchecked_t s_mb_lost_chunks;
58993+ atomic_unchecked_t s_mb_preallocated;
58994+ atomic_unchecked_t s_mb_discarded;
58995 atomic_t s_lock_busy;
58996
58997 /* locality groups */
58998diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
58999index 04a5c75..09894fa 100644
59000--- a/fs/ext4/mballoc.c
59001+++ b/fs/ext4/mballoc.c
59002@@ -1880,7 +1880,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
59003 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
59004
59005 if (EXT4_SB(sb)->s_mb_stats)
59006- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
59007+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
59008
59009 break;
59010 }
59011@@ -2189,7 +2189,7 @@ repeat:
59012 ac->ac_status = AC_STATUS_CONTINUE;
59013 ac->ac_flags |= EXT4_MB_HINT_FIRST;
59014 cr = 3;
59015- atomic_inc(&sbi->s_mb_lost_chunks);
59016+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
59017 goto repeat;
59018 }
59019 }
59020@@ -2697,25 +2697,25 @@ int ext4_mb_release(struct super_block *sb)
59021 if (sbi->s_mb_stats) {
59022 ext4_msg(sb, KERN_INFO,
59023 "mballoc: %u blocks %u reqs (%u success)",
59024- atomic_read(&sbi->s_bal_allocated),
59025- atomic_read(&sbi->s_bal_reqs),
59026- atomic_read(&sbi->s_bal_success));
59027+ atomic_read_unchecked(&sbi->s_bal_allocated),
59028+ atomic_read_unchecked(&sbi->s_bal_reqs),
59029+ atomic_read_unchecked(&sbi->s_bal_success));
59030 ext4_msg(sb, KERN_INFO,
59031 "mballoc: %u extents scanned, %u goal hits, "
59032 "%u 2^N hits, %u breaks, %u lost",
59033- atomic_read(&sbi->s_bal_ex_scanned),
59034- atomic_read(&sbi->s_bal_goals),
59035- atomic_read(&sbi->s_bal_2orders),
59036- atomic_read(&sbi->s_bal_breaks),
59037- atomic_read(&sbi->s_mb_lost_chunks));
59038+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
59039+ atomic_read_unchecked(&sbi->s_bal_goals),
59040+ atomic_read_unchecked(&sbi->s_bal_2orders),
59041+ atomic_read_unchecked(&sbi->s_bal_breaks),
59042+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
59043 ext4_msg(sb, KERN_INFO,
59044 "mballoc: %lu generated and it took %Lu",
59045 sbi->s_mb_buddies_generated,
59046 sbi->s_mb_generation_time);
59047 ext4_msg(sb, KERN_INFO,
59048 "mballoc: %u preallocated, %u discarded",
59049- atomic_read(&sbi->s_mb_preallocated),
59050- atomic_read(&sbi->s_mb_discarded));
59051+ atomic_read_unchecked(&sbi->s_mb_preallocated),
59052+ atomic_read_unchecked(&sbi->s_mb_discarded));
59053 }
59054
59055 free_percpu(sbi->s_locality_groups);
59056@@ -3169,16 +3169,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
59057 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
59058
59059 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
59060- atomic_inc(&sbi->s_bal_reqs);
59061- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
59062+ atomic_inc_unchecked(&sbi->s_bal_reqs);
59063+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
59064 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
59065- atomic_inc(&sbi->s_bal_success);
59066- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
59067+ atomic_inc_unchecked(&sbi->s_bal_success);
59068+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
59069 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
59070 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
59071- atomic_inc(&sbi->s_bal_goals);
59072+ atomic_inc_unchecked(&sbi->s_bal_goals);
59073 if (ac->ac_found > sbi->s_mb_max_to_scan)
59074- atomic_inc(&sbi->s_bal_breaks);
59075+ atomic_inc_unchecked(&sbi->s_bal_breaks);
59076 }
59077
59078 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
59079@@ -3583,7 +3583,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
59080 trace_ext4_mb_new_inode_pa(ac, pa);
59081
59082 ext4_mb_use_inode_pa(ac, pa);
59083- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
59084+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
59085
59086 ei = EXT4_I(ac->ac_inode);
59087 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
59088@@ -3643,7 +3643,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
59089 trace_ext4_mb_new_group_pa(ac, pa);
59090
59091 ext4_mb_use_group_pa(ac, pa);
59092- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
59093+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
59094
59095 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
59096 lg = ac->ac_lg;
59097@@ -3732,7 +3732,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
59098 * from the bitmap and continue.
59099 */
59100 }
59101- atomic_add(free, &sbi->s_mb_discarded);
59102+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
59103
59104 return err;
59105 }
59106@@ -3750,7 +3750,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
59107 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
59108 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
59109 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
59110- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
59111+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
59112 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
59113
59114 return 0;
59115diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
59116index 04434ad..6404663 100644
59117--- a/fs/ext4/mmp.c
59118+++ b/fs/ext4/mmp.c
59119@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
59120 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
59121 const char *function, unsigned int line, const char *msg)
59122 {
59123- __ext4_warning(sb, function, line, msg);
59124+ __ext4_warning(sb, function, line, "%s", msg);
59125 __ext4_warning(sb, function, line,
59126 "MMP failure info: last update time: %llu, last update "
59127 "node: %s, last update device: %s\n",
59128diff --git a/fs/ext4/super.c b/fs/ext4/super.c
59129index 710fed2..a82e4e8 100644
59130--- a/fs/ext4/super.c
59131+++ b/fs/ext4/super.c
59132@@ -1270,7 +1270,7 @@ static ext4_fsblk_t get_sb_block(void **data)
59133 }
59134
59135 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
59136-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
59137+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
59138 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
59139
59140 #ifdef CONFIG_QUOTA
59141@@ -2450,7 +2450,7 @@ struct ext4_attr {
59142 int offset;
59143 int deprecated_val;
59144 } u;
59145-};
59146+} __do_const;
59147
59148 static int parse_strtoull(const char *buf,
59149 unsigned long long max, unsigned long long *value)
59150diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
59151index 1423c48..9c0c6dc 100644
59152--- a/fs/ext4/xattr.c
59153+++ b/fs/ext4/xattr.c
59154@@ -381,7 +381,7 @@ static int
59155 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
59156 char *buffer, size_t buffer_size)
59157 {
59158- size_t rest = buffer_size;
59159+ size_t rest = buffer_size, total_size = 0;
59160
59161 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
59162 const struct xattr_handler *handler =
59163@@ -398,9 +398,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
59164 buffer += size;
59165 }
59166 rest -= size;
59167+ total_size += size;
59168 }
59169 }
59170- return buffer_size - rest;
59171+ return total_size;
59172 }
59173
59174 static int
59175diff --git a/fs/fcntl.c b/fs/fcntl.c
59176index ef68665..5deacdc 100644
59177--- a/fs/fcntl.c
59178+++ b/fs/fcntl.c
59179@@ -106,6 +106,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
59180 if (err)
59181 return err;
59182
59183+ if (gr_handle_chroot_fowner(pid, type))
59184+ return -ENOENT;
59185+ if (gr_check_protected_task_fowner(pid, type))
59186+ return -EACCES;
59187+
59188 f_modown(filp, pid, type, force);
59189 return 0;
59190 }
59191diff --git a/fs/fhandle.c b/fs/fhandle.c
59192index 999ff5c..41f4109 100644
59193--- a/fs/fhandle.c
59194+++ b/fs/fhandle.c
59195@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
59196 } else
59197 retval = 0;
59198 /* copy the mount id */
59199- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
59200- sizeof(*mnt_id)) ||
59201+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
59202 copy_to_user(ufh, handle,
59203 sizeof(struct file_handle) + handle_bytes))
59204 retval = -EFAULT;
59205diff --git a/fs/file.c b/fs/file.c
59206index 9de2026..8e334ca 100644
59207--- a/fs/file.c
59208+++ b/fs/file.c
59209@@ -16,6 +16,7 @@
59210 #include <linux/slab.h>
59211 #include <linux/vmalloc.h>
59212 #include <linux/file.h>
59213+#include <linux/security.h>
59214 #include <linux/fdtable.h>
59215 #include <linux/bitops.h>
59216 #include <linux/interrupt.h>
59217@@ -141,7 +142,7 @@ out:
59218 * Return <0 error code on error; 1 on successful completion.
59219 * The files->file_lock should be held on entry, and will be held on exit.
59220 */
59221-static int expand_fdtable(struct files_struct *files, int nr)
59222+static int expand_fdtable(struct files_struct *files, unsigned int nr)
59223 __releases(files->file_lock)
59224 __acquires(files->file_lock)
59225 {
59226@@ -186,7 +187,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
59227 * expanded and execution may have blocked.
59228 * The files->file_lock should be held on entry, and will be held on exit.
59229 */
59230-static int expand_files(struct files_struct *files, int nr)
59231+static int expand_files(struct files_struct *files, unsigned int nr)
59232 {
59233 struct fdtable *fdt;
59234
59235@@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
59236 if (!file)
59237 return __close_fd(files, fd);
59238
59239+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
59240 if (fd >= rlimit(RLIMIT_NOFILE))
59241 return -EBADF;
59242
59243@@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
59244 if (unlikely(oldfd == newfd))
59245 return -EINVAL;
59246
59247+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
59248 if (newfd >= rlimit(RLIMIT_NOFILE))
59249 return -EBADF;
59250
59251@@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
59252 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
59253 {
59254 int err;
59255+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
59256 if (from >= rlimit(RLIMIT_NOFILE))
59257 return -EINVAL;
59258 err = alloc_fd(from, flags);
59259diff --git a/fs/filesystems.c b/fs/filesystems.c
59260index 92567d9..fcd8cbf 100644
59261--- a/fs/filesystems.c
59262+++ b/fs/filesystems.c
59263@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
59264 int len = dot ? dot - name : strlen(name);
59265
59266 fs = __get_fs_type(name, len);
59267+#ifdef CONFIG_GRKERNSEC_MODHARDEN
59268+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
59269+#else
59270 if (!fs && (request_module("fs-%.*s", len, name) == 0))
59271+#endif
59272 fs = __get_fs_type(name, len);
59273
59274 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
59275diff --git a/fs/fs_struct.c b/fs/fs_struct.c
59276index 7dca743..543d620 100644
59277--- a/fs/fs_struct.c
59278+++ b/fs/fs_struct.c
59279@@ -4,6 +4,7 @@
59280 #include <linux/path.h>
59281 #include <linux/slab.h>
59282 #include <linux/fs_struct.h>
59283+#include <linux/grsecurity.h>
59284 #include "internal.h"
59285
59286 /*
59287@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
59288 write_seqcount_begin(&fs->seq);
59289 old_root = fs->root;
59290 fs->root = *path;
59291+ gr_set_chroot_entries(current, path);
59292 write_seqcount_end(&fs->seq);
59293 spin_unlock(&fs->lock);
59294 if (old_root.dentry)
59295@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
59296 int hits = 0;
59297 spin_lock(&fs->lock);
59298 write_seqcount_begin(&fs->seq);
59299+ /* this root replacement is only done by pivot_root,
59300+ leave grsec's chroot tagging alone for this task
59301+ so that a pivoted root isn't treated as a chroot
59302+ */
59303 hits += replace_path(&fs->root, old_root, new_root);
59304 hits += replace_path(&fs->pwd, old_root, new_root);
59305 write_seqcount_end(&fs->seq);
59306@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
59307 task_lock(tsk);
59308 spin_lock(&fs->lock);
59309 tsk->fs = NULL;
59310- kill = !--fs->users;
59311+ gr_clear_chroot_entries(tsk);
59312+ kill = !atomic_dec_return(&fs->users);
59313 spin_unlock(&fs->lock);
59314 task_unlock(tsk);
59315 if (kill)
59316@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
59317 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
59318 /* We don't need to lock fs - think why ;-) */
59319 if (fs) {
59320- fs->users = 1;
59321+ atomic_set(&fs->users, 1);
59322 fs->in_exec = 0;
59323 spin_lock_init(&fs->lock);
59324 seqcount_init(&fs->seq);
59325@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
59326 spin_lock(&old->lock);
59327 fs->root = old->root;
59328 path_get(&fs->root);
59329+ /* instead of calling gr_set_chroot_entries here,
59330+ we call it from every caller of this function
59331+ */
59332 fs->pwd = old->pwd;
59333 path_get(&fs->pwd);
59334 spin_unlock(&old->lock);
59335@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
59336
59337 task_lock(current);
59338 spin_lock(&fs->lock);
59339- kill = !--fs->users;
59340+ kill = !atomic_dec_return(&fs->users);
59341 current->fs = new_fs;
59342+ gr_set_chroot_entries(current, &new_fs->root);
59343 spin_unlock(&fs->lock);
59344 task_unlock(current);
59345
59346@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
59347
59348 int current_umask(void)
59349 {
59350- return current->fs->umask;
59351+ return current->fs->umask | gr_acl_umask();
59352 }
59353 EXPORT_SYMBOL(current_umask);
59354
59355 /* to be mentioned only in INIT_TASK */
59356 struct fs_struct init_fs = {
59357- .users = 1,
59358+ .users = ATOMIC_INIT(1),
59359 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
59360 .seq = SEQCNT_ZERO(init_fs.seq),
59361 .umask = 0022,
59362diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
59363index 29d7feb..303644d 100644
59364--- a/fs/fscache/cookie.c
59365+++ b/fs/fscache/cookie.c
59366@@ -19,7 +19,7 @@
59367
59368 struct kmem_cache *fscache_cookie_jar;
59369
59370-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
59371+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
59372
59373 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
59374 static int fscache_alloc_object(struct fscache_cache *cache,
59375@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
59376 parent ? (char *) parent->def->name : "<no-parent>",
59377 def->name, netfs_data, enable);
59378
59379- fscache_stat(&fscache_n_acquires);
59380+ fscache_stat_unchecked(&fscache_n_acquires);
59381
59382 /* if there's no parent cookie, then we don't create one here either */
59383 if (!parent) {
59384- fscache_stat(&fscache_n_acquires_null);
59385+ fscache_stat_unchecked(&fscache_n_acquires_null);
59386 _leave(" [no parent]");
59387 return NULL;
59388 }
59389@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
59390 /* allocate and initialise a cookie */
59391 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
59392 if (!cookie) {
59393- fscache_stat(&fscache_n_acquires_oom);
59394+ fscache_stat_unchecked(&fscache_n_acquires_oom);
59395 _leave(" [ENOMEM]");
59396 return NULL;
59397 }
59398@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
59399
59400 switch (cookie->def->type) {
59401 case FSCACHE_COOKIE_TYPE_INDEX:
59402- fscache_stat(&fscache_n_cookie_index);
59403+ fscache_stat_unchecked(&fscache_n_cookie_index);
59404 break;
59405 case FSCACHE_COOKIE_TYPE_DATAFILE:
59406- fscache_stat(&fscache_n_cookie_data);
59407+ fscache_stat_unchecked(&fscache_n_cookie_data);
59408 break;
59409 default:
59410- fscache_stat(&fscache_n_cookie_special);
59411+ fscache_stat_unchecked(&fscache_n_cookie_special);
59412 break;
59413 }
59414
59415@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
59416 } else {
59417 atomic_dec(&parent->n_children);
59418 __fscache_cookie_put(cookie);
59419- fscache_stat(&fscache_n_acquires_nobufs);
59420+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
59421 _leave(" = NULL");
59422 return NULL;
59423 }
59424@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
59425 }
59426 }
59427
59428- fscache_stat(&fscache_n_acquires_ok);
59429+ fscache_stat_unchecked(&fscache_n_acquires_ok);
59430 _leave(" = %p", cookie);
59431 return cookie;
59432 }
59433@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
59434 cache = fscache_select_cache_for_object(cookie->parent);
59435 if (!cache) {
59436 up_read(&fscache_addremove_sem);
59437- fscache_stat(&fscache_n_acquires_no_cache);
59438+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
59439 _leave(" = -ENOMEDIUM [no cache]");
59440 return -ENOMEDIUM;
59441 }
59442@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
59443 object = cache->ops->alloc_object(cache, cookie);
59444 fscache_stat_d(&fscache_n_cop_alloc_object);
59445 if (IS_ERR(object)) {
59446- fscache_stat(&fscache_n_object_no_alloc);
59447+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
59448 ret = PTR_ERR(object);
59449 goto error;
59450 }
59451
59452- fscache_stat(&fscache_n_object_alloc);
59453+ fscache_stat_unchecked(&fscache_n_object_alloc);
59454
59455- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
59456+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
59457
59458 _debug("ALLOC OBJ%x: %s {%lx}",
59459 object->debug_id, cookie->def->name, object->events);
59460@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
59461
59462 _enter("{%s}", cookie->def->name);
59463
59464- fscache_stat(&fscache_n_invalidates);
59465+ fscache_stat_unchecked(&fscache_n_invalidates);
59466
59467 /* Only permit invalidation of data files. Invalidating an index will
59468 * require the caller to release all its attachments to the tree rooted
59469@@ -477,10 +477,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
59470 {
59471 struct fscache_object *object;
59472
59473- fscache_stat(&fscache_n_updates);
59474+ fscache_stat_unchecked(&fscache_n_updates);
59475
59476 if (!cookie) {
59477- fscache_stat(&fscache_n_updates_null);
59478+ fscache_stat_unchecked(&fscache_n_updates_null);
59479 _leave(" [no cookie]");
59480 return;
59481 }
59482@@ -581,12 +581,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
59483 */
59484 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
59485 {
59486- fscache_stat(&fscache_n_relinquishes);
59487+ fscache_stat_unchecked(&fscache_n_relinquishes);
59488 if (retire)
59489- fscache_stat(&fscache_n_relinquishes_retire);
59490+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
59491
59492 if (!cookie) {
59493- fscache_stat(&fscache_n_relinquishes_null);
59494+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
59495 _leave(" [no cookie]");
59496 return;
59497 }
59498@@ -687,7 +687,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
59499 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
59500 goto inconsistent;
59501
59502- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
59503+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
59504
59505 __fscache_use_cookie(cookie);
59506 if (fscache_submit_op(object, op) < 0)
59507diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
59508index 4226f66..0fb3f45 100644
59509--- a/fs/fscache/internal.h
59510+++ b/fs/fscache/internal.h
59511@@ -133,8 +133,8 @@ extern void fscache_operation_gc(struct work_struct *);
59512 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
59513 extern int fscache_wait_for_operation_activation(struct fscache_object *,
59514 struct fscache_operation *,
59515- atomic_t *,
59516- atomic_t *,
59517+ atomic_unchecked_t *,
59518+ atomic_unchecked_t *,
59519 void (*)(struct fscache_operation *));
59520 extern void fscache_invalidate_writes(struct fscache_cookie *);
59521
59522@@ -153,101 +153,101 @@ extern void fscache_proc_cleanup(void);
59523 * stats.c
59524 */
59525 #ifdef CONFIG_FSCACHE_STATS
59526-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
59527-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
59528+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
59529+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
59530
59531-extern atomic_t fscache_n_op_pend;
59532-extern atomic_t fscache_n_op_run;
59533-extern atomic_t fscache_n_op_enqueue;
59534-extern atomic_t fscache_n_op_deferred_release;
59535-extern atomic_t fscache_n_op_release;
59536-extern atomic_t fscache_n_op_gc;
59537-extern atomic_t fscache_n_op_cancelled;
59538-extern atomic_t fscache_n_op_rejected;
59539+extern atomic_unchecked_t fscache_n_op_pend;
59540+extern atomic_unchecked_t fscache_n_op_run;
59541+extern atomic_unchecked_t fscache_n_op_enqueue;
59542+extern atomic_unchecked_t fscache_n_op_deferred_release;
59543+extern atomic_unchecked_t fscache_n_op_release;
59544+extern atomic_unchecked_t fscache_n_op_gc;
59545+extern atomic_unchecked_t fscache_n_op_cancelled;
59546+extern atomic_unchecked_t fscache_n_op_rejected;
59547
59548-extern atomic_t fscache_n_attr_changed;
59549-extern atomic_t fscache_n_attr_changed_ok;
59550-extern atomic_t fscache_n_attr_changed_nobufs;
59551-extern atomic_t fscache_n_attr_changed_nomem;
59552-extern atomic_t fscache_n_attr_changed_calls;
59553+extern atomic_unchecked_t fscache_n_attr_changed;
59554+extern atomic_unchecked_t fscache_n_attr_changed_ok;
59555+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
59556+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
59557+extern atomic_unchecked_t fscache_n_attr_changed_calls;
59558
59559-extern atomic_t fscache_n_allocs;
59560-extern atomic_t fscache_n_allocs_ok;
59561-extern atomic_t fscache_n_allocs_wait;
59562-extern atomic_t fscache_n_allocs_nobufs;
59563-extern atomic_t fscache_n_allocs_intr;
59564-extern atomic_t fscache_n_allocs_object_dead;
59565-extern atomic_t fscache_n_alloc_ops;
59566-extern atomic_t fscache_n_alloc_op_waits;
59567+extern atomic_unchecked_t fscache_n_allocs;
59568+extern atomic_unchecked_t fscache_n_allocs_ok;
59569+extern atomic_unchecked_t fscache_n_allocs_wait;
59570+extern atomic_unchecked_t fscache_n_allocs_nobufs;
59571+extern atomic_unchecked_t fscache_n_allocs_intr;
59572+extern atomic_unchecked_t fscache_n_allocs_object_dead;
59573+extern atomic_unchecked_t fscache_n_alloc_ops;
59574+extern atomic_unchecked_t fscache_n_alloc_op_waits;
59575
59576-extern atomic_t fscache_n_retrievals;
59577-extern atomic_t fscache_n_retrievals_ok;
59578-extern atomic_t fscache_n_retrievals_wait;
59579-extern atomic_t fscache_n_retrievals_nodata;
59580-extern atomic_t fscache_n_retrievals_nobufs;
59581-extern atomic_t fscache_n_retrievals_intr;
59582-extern atomic_t fscache_n_retrievals_nomem;
59583-extern atomic_t fscache_n_retrievals_object_dead;
59584-extern atomic_t fscache_n_retrieval_ops;
59585-extern atomic_t fscache_n_retrieval_op_waits;
59586+extern atomic_unchecked_t fscache_n_retrievals;
59587+extern atomic_unchecked_t fscache_n_retrievals_ok;
59588+extern atomic_unchecked_t fscache_n_retrievals_wait;
59589+extern atomic_unchecked_t fscache_n_retrievals_nodata;
59590+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
59591+extern atomic_unchecked_t fscache_n_retrievals_intr;
59592+extern atomic_unchecked_t fscache_n_retrievals_nomem;
59593+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
59594+extern atomic_unchecked_t fscache_n_retrieval_ops;
59595+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
59596
59597-extern atomic_t fscache_n_stores;
59598-extern atomic_t fscache_n_stores_ok;
59599-extern atomic_t fscache_n_stores_again;
59600-extern atomic_t fscache_n_stores_nobufs;
59601-extern atomic_t fscache_n_stores_oom;
59602-extern atomic_t fscache_n_store_ops;
59603-extern atomic_t fscache_n_store_calls;
59604-extern atomic_t fscache_n_store_pages;
59605-extern atomic_t fscache_n_store_radix_deletes;
59606-extern atomic_t fscache_n_store_pages_over_limit;
59607+extern atomic_unchecked_t fscache_n_stores;
59608+extern atomic_unchecked_t fscache_n_stores_ok;
59609+extern atomic_unchecked_t fscache_n_stores_again;
59610+extern atomic_unchecked_t fscache_n_stores_nobufs;
59611+extern atomic_unchecked_t fscache_n_stores_oom;
59612+extern atomic_unchecked_t fscache_n_store_ops;
59613+extern atomic_unchecked_t fscache_n_store_calls;
59614+extern atomic_unchecked_t fscache_n_store_pages;
59615+extern atomic_unchecked_t fscache_n_store_radix_deletes;
59616+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
59617
59618-extern atomic_t fscache_n_store_vmscan_not_storing;
59619-extern atomic_t fscache_n_store_vmscan_gone;
59620-extern atomic_t fscache_n_store_vmscan_busy;
59621-extern atomic_t fscache_n_store_vmscan_cancelled;
59622-extern atomic_t fscache_n_store_vmscan_wait;
59623+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
59624+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
59625+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
59626+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
59627+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
59628
59629-extern atomic_t fscache_n_marks;
59630-extern atomic_t fscache_n_uncaches;
59631+extern atomic_unchecked_t fscache_n_marks;
59632+extern atomic_unchecked_t fscache_n_uncaches;
59633
59634-extern atomic_t fscache_n_acquires;
59635-extern atomic_t fscache_n_acquires_null;
59636-extern atomic_t fscache_n_acquires_no_cache;
59637-extern atomic_t fscache_n_acquires_ok;
59638-extern atomic_t fscache_n_acquires_nobufs;
59639-extern atomic_t fscache_n_acquires_oom;
59640+extern atomic_unchecked_t fscache_n_acquires;
59641+extern atomic_unchecked_t fscache_n_acquires_null;
59642+extern atomic_unchecked_t fscache_n_acquires_no_cache;
59643+extern atomic_unchecked_t fscache_n_acquires_ok;
59644+extern atomic_unchecked_t fscache_n_acquires_nobufs;
59645+extern atomic_unchecked_t fscache_n_acquires_oom;
59646
59647-extern atomic_t fscache_n_invalidates;
59648-extern atomic_t fscache_n_invalidates_run;
59649+extern atomic_unchecked_t fscache_n_invalidates;
59650+extern atomic_unchecked_t fscache_n_invalidates_run;
59651
59652-extern atomic_t fscache_n_updates;
59653-extern atomic_t fscache_n_updates_null;
59654-extern atomic_t fscache_n_updates_run;
59655+extern atomic_unchecked_t fscache_n_updates;
59656+extern atomic_unchecked_t fscache_n_updates_null;
59657+extern atomic_unchecked_t fscache_n_updates_run;
59658
59659-extern atomic_t fscache_n_relinquishes;
59660-extern atomic_t fscache_n_relinquishes_null;
59661-extern atomic_t fscache_n_relinquishes_waitcrt;
59662-extern atomic_t fscache_n_relinquishes_retire;
59663+extern atomic_unchecked_t fscache_n_relinquishes;
59664+extern atomic_unchecked_t fscache_n_relinquishes_null;
59665+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
59666+extern atomic_unchecked_t fscache_n_relinquishes_retire;
59667
59668-extern atomic_t fscache_n_cookie_index;
59669-extern atomic_t fscache_n_cookie_data;
59670-extern atomic_t fscache_n_cookie_special;
59671+extern atomic_unchecked_t fscache_n_cookie_index;
59672+extern atomic_unchecked_t fscache_n_cookie_data;
59673+extern atomic_unchecked_t fscache_n_cookie_special;
59674
59675-extern atomic_t fscache_n_object_alloc;
59676-extern atomic_t fscache_n_object_no_alloc;
59677-extern atomic_t fscache_n_object_lookups;
59678-extern atomic_t fscache_n_object_lookups_negative;
59679-extern atomic_t fscache_n_object_lookups_positive;
59680-extern atomic_t fscache_n_object_lookups_timed_out;
59681-extern atomic_t fscache_n_object_created;
59682-extern atomic_t fscache_n_object_avail;
59683-extern atomic_t fscache_n_object_dead;
59684+extern atomic_unchecked_t fscache_n_object_alloc;
59685+extern atomic_unchecked_t fscache_n_object_no_alloc;
59686+extern atomic_unchecked_t fscache_n_object_lookups;
59687+extern atomic_unchecked_t fscache_n_object_lookups_negative;
59688+extern atomic_unchecked_t fscache_n_object_lookups_positive;
59689+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
59690+extern atomic_unchecked_t fscache_n_object_created;
59691+extern atomic_unchecked_t fscache_n_object_avail;
59692+extern atomic_unchecked_t fscache_n_object_dead;
59693
59694-extern atomic_t fscache_n_checkaux_none;
59695-extern atomic_t fscache_n_checkaux_okay;
59696-extern atomic_t fscache_n_checkaux_update;
59697-extern atomic_t fscache_n_checkaux_obsolete;
59698+extern atomic_unchecked_t fscache_n_checkaux_none;
59699+extern atomic_unchecked_t fscache_n_checkaux_okay;
59700+extern atomic_unchecked_t fscache_n_checkaux_update;
59701+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
59702
59703 extern atomic_t fscache_n_cop_alloc_object;
59704 extern atomic_t fscache_n_cop_lookup_object;
59705@@ -272,6 +272,11 @@ static inline void fscache_stat(atomic_t *stat)
59706 atomic_inc(stat);
59707 }
59708
59709+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
59710+{
59711+ atomic_inc_unchecked(stat);
59712+}
59713+
59714 static inline void fscache_stat_d(atomic_t *stat)
59715 {
59716 atomic_dec(stat);
59717@@ -284,6 +289,7 @@ extern const struct file_operations fscache_stats_fops;
59718
59719 #define __fscache_stat(stat) (NULL)
59720 #define fscache_stat(stat) do {} while (0)
59721+#define fscache_stat_unchecked(stat) do {} while (0)
59722 #define fscache_stat_d(stat) do {} while (0)
59723 #endif
59724
59725diff --git a/fs/fscache/object.c b/fs/fscache/object.c
59726index 53d35c5..5d68ed4 100644
59727--- a/fs/fscache/object.c
59728+++ b/fs/fscache/object.c
59729@@ -451,7 +451,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
59730 _debug("LOOKUP \"%s\" in \"%s\"",
59731 cookie->def->name, object->cache->tag->name);
59732
59733- fscache_stat(&fscache_n_object_lookups);
59734+ fscache_stat_unchecked(&fscache_n_object_lookups);
59735 fscache_stat(&fscache_n_cop_lookup_object);
59736 ret = object->cache->ops->lookup_object(object);
59737 fscache_stat_d(&fscache_n_cop_lookup_object);
59738@@ -461,7 +461,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
59739 if (ret == -ETIMEDOUT) {
59740 /* probably stuck behind another object, so move this one to
59741 * the back of the queue */
59742- fscache_stat(&fscache_n_object_lookups_timed_out);
59743+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
59744 _leave(" [timeout]");
59745 return NO_TRANSIT;
59746 }
59747@@ -489,7 +489,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
59748 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
59749
59750 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
59751- fscache_stat(&fscache_n_object_lookups_negative);
59752+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
59753
59754 /* Allow write requests to begin stacking up and read requests to begin
59755 * returning ENODATA.
59756@@ -524,7 +524,7 @@ void fscache_obtained_object(struct fscache_object *object)
59757 /* if we were still looking up, then we must have a positive lookup
59758 * result, in which case there may be data available */
59759 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
59760- fscache_stat(&fscache_n_object_lookups_positive);
59761+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
59762
59763 /* We do (presumably) have data */
59764 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
59765@@ -536,7 +536,7 @@ void fscache_obtained_object(struct fscache_object *object)
59766 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
59767 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
59768 } else {
59769- fscache_stat(&fscache_n_object_created);
59770+ fscache_stat_unchecked(&fscache_n_object_created);
59771 }
59772
59773 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
59774@@ -572,7 +572,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
59775 fscache_stat_d(&fscache_n_cop_lookup_complete);
59776
59777 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
59778- fscache_stat(&fscache_n_object_avail);
59779+ fscache_stat_unchecked(&fscache_n_object_avail);
59780
59781 _leave("");
59782 return transit_to(JUMPSTART_DEPS);
59783@@ -719,7 +719,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
59784
59785 /* this just shifts the object release to the work processor */
59786 fscache_put_object(object);
59787- fscache_stat(&fscache_n_object_dead);
59788+ fscache_stat_unchecked(&fscache_n_object_dead);
59789
59790 _leave("");
59791 return transit_to(OBJECT_DEAD);
59792@@ -884,7 +884,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
59793 enum fscache_checkaux result;
59794
59795 if (!object->cookie->def->check_aux) {
59796- fscache_stat(&fscache_n_checkaux_none);
59797+ fscache_stat_unchecked(&fscache_n_checkaux_none);
59798 return FSCACHE_CHECKAUX_OKAY;
59799 }
59800
59801@@ -893,17 +893,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
59802 switch (result) {
59803 /* entry okay as is */
59804 case FSCACHE_CHECKAUX_OKAY:
59805- fscache_stat(&fscache_n_checkaux_okay);
59806+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
59807 break;
59808
59809 /* entry requires update */
59810 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
59811- fscache_stat(&fscache_n_checkaux_update);
59812+ fscache_stat_unchecked(&fscache_n_checkaux_update);
59813 break;
59814
59815 /* entry requires deletion */
59816 case FSCACHE_CHECKAUX_OBSOLETE:
59817- fscache_stat(&fscache_n_checkaux_obsolete);
59818+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
59819 break;
59820
59821 default:
59822@@ -989,7 +989,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
59823 {
59824 const struct fscache_state *s;
59825
59826- fscache_stat(&fscache_n_invalidates_run);
59827+ fscache_stat_unchecked(&fscache_n_invalidates_run);
59828 fscache_stat(&fscache_n_cop_invalidate_object);
59829 s = _fscache_invalidate_object(object, event);
59830 fscache_stat_d(&fscache_n_cop_invalidate_object);
59831@@ -1004,7 +1004,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
59832 {
59833 _enter("{OBJ%x},%d", object->debug_id, event);
59834
59835- fscache_stat(&fscache_n_updates_run);
59836+ fscache_stat_unchecked(&fscache_n_updates_run);
59837 fscache_stat(&fscache_n_cop_update_object);
59838 object->cache->ops->update_object(object);
59839 fscache_stat_d(&fscache_n_cop_update_object);
59840diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
59841index 318071a..379938b 100644
59842--- a/fs/fscache/operation.c
59843+++ b/fs/fscache/operation.c
59844@@ -17,7 +17,7 @@
59845 #include <linux/slab.h>
59846 #include "internal.h"
59847
59848-atomic_t fscache_op_debug_id;
59849+atomic_unchecked_t fscache_op_debug_id;
59850 EXPORT_SYMBOL(fscache_op_debug_id);
59851
59852 /**
59853@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
59854 ASSERTCMP(atomic_read(&op->usage), >, 0);
59855 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
59856
59857- fscache_stat(&fscache_n_op_enqueue);
59858+ fscache_stat_unchecked(&fscache_n_op_enqueue);
59859 switch (op->flags & FSCACHE_OP_TYPE) {
59860 case FSCACHE_OP_ASYNC:
59861 _debug("queue async");
59862@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
59863 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
59864 if (op->processor)
59865 fscache_enqueue_operation(op);
59866- fscache_stat(&fscache_n_op_run);
59867+ fscache_stat_unchecked(&fscache_n_op_run);
59868 }
59869
59870 /*
59871@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
59872 if (object->n_in_progress > 0) {
59873 atomic_inc(&op->usage);
59874 list_add_tail(&op->pend_link, &object->pending_ops);
59875- fscache_stat(&fscache_n_op_pend);
59876+ fscache_stat_unchecked(&fscache_n_op_pend);
59877 } else if (!list_empty(&object->pending_ops)) {
59878 atomic_inc(&op->usage);
59879 list_add_tail(&op->pend_link, &object->pending_ops);
59880- fscache_stat(&fscache_n_op_pend);
59881+ fscache_stat_unchecked(&fscache_n_op_pend);
59882 fscache_start_operations(object);
59883 } else {
59884 ASSERTCMP(object->n_in_progress, ==, 0);
59885@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
59886 object->n_exclusive++; /* reads and writes must wait */
59887 atomic_inc(&op->usage);
59888 list_add_tail(&op->pend_link, &object->pending_ops);
59889- fscache_stat(&fscache_n_op_pend);
59890+ fscache_stat_unchecked(&fscache_n_op_pend);
59891 ret = 0;
59892 } else {
59893 /* If we're in any other state, there must have been an I/O
59894@@ -212,11 +212,11 @@ int fscache_submit_op(struct fscache_object *object,
59895 if (object->n_exclusive > 0) {
59896 atomic_inc(&op->usage);
59897 list_add_tail(&op->pend_link, &object->pending_ops);
59898- fscache_stat(&fscache_n_op_pend);
59899+ fscache_stat_unchecked(&fscache_n_op_pend);
59900 } else if (!list_empty(&object->pending_ops)) {
59901 atomic_inc(&op->usage);
59902 list_add_tail(&op->pend_link, &object->pending_ops);
59903- fscache_stat(&fscache_n_op_pend);
59904+ fscache_stat_unchecked(&fscache_n_op_pend);
59905 fscache_start_operations(object);
59906 } else {
59907 ASSERTCMP(object->n_exclusive, ==, 0);
59908@@ -228,10 +228,10 @@ int fscache_submit_op(struct fscache_object *object,
59909 object->n_ops++;
59910 atomic_inc(&op->usage);
59911 list_add_tail(&op->pend_link, &object->pending_ops);
59912- fscache_stat(&fscache_n_op_pend);
59913+ fscache_stat_unchecked(&fscache_n_op_pend);
59914 ret = 0;
59915 } else if (fscache_object_is_dying(object)) {
59916- fscache_stat(&fscache_n_op_rejected);
59917+ fscache_stat_unchecked(&fscache_n_op_rejected);
59918 op->state = FSCACHE_OP_ST_CANCELLED;
59919 ret = -ENOBUFS;
59920 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
59921@@ -310,7 +310,7 @@ int fscache_cancel_op(struct fscache_operation *op,
59922 ret = -EBUSY;
59923 if (op->state == FSCACHE_OP_ST_PENDING) {
59924 ASSERT(!list_empty(&op->pend_link));
59925- fscache_stat(&fscache_n_op_cancelled);
59926+ fscache_stat_unchecked(&fscache_n_op_cancelled);
59927 list_del_init(&op->pend_link);
59928 if (do_cancel)
59929 do_cancel(op);
59930@@ -342,7 +342,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
59931 while (!list_empty(&object->pending_ops)) {
59932 op = list_entry(object->pending_ops.next,
59933 struct fscache_operation, pend_link);
59934- fscache_stat(&fscache_n_op_cancelled);
59935+ fscache_stat_unchecked(&fscache_n_op_cancelled);
59936 list_del_init(&op->pend_link);
59937
59938 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
59939@@ -414,7 +414,7 @@ void fscache_put_operation(struct fscache_operation *op)
59940 op->state, ==, FSCACHE_OP_ST_CANCELLED);
59941 op->state = FSCACHE_OP_ST_DEAD;
59942
59943- fscache_stat(&fscache_n_op_release);
59944+ fscache_stat_unchecked(&fscache_n_op_release);
59945
59946 if (op->release) {
59947 op->release(op);
59948@@ -433,7 +433,7 @@ void fscache_put_operation(struct fscache_operation *op)
59949 * lock, and defer it otherwise */
59950 if (!spin_trylock(&object->lock)) {
59951 _debug("defer put");
59952- fscache_stat(&fscache_n_op_deferred_release);
59953+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
59954
59955 cache = object->cache;
59956 spin_lock(&cache->op_gc_list_lock);
59957@@ -486,7 +486,7 @@ void fscache_operation_gc(struct work_struct *work)
59958
59959 _debug("GC DEFERRED REL OBJ%x OP%x",
59960 object->debug_id, op->debug_id);
59961- fscache_stat(&fscache_n_op_gc);
59962+ fscache_stat_unchecked(&fscache_n_op_gc);
59963
59964 ASSERTCMP(atomic_read(&op->usage), ==, 0);
59965 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
59966diff --git a/fs/fscache/page.c b/fs/fscache/page.c
59967index 7f5c658..6c1e164 100644
59968--- a/fs/fscache/page.c
59969+++ b/fs/fscache/page.c
59970@@ -61,7 +61,7 @@ try_again:
59971 val = radix_tree_lookup(&cookie->stores, page->index);
59972 if (!val) {
59973 rcu_read_unlock();
59974- fscache_stat(&fscache_n_store_vmscan_not_storing);
59975+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
59976 __fscache_uncache_page(cookie, page);
59977 return true;
59978 }
59979@@ -91,11 +91,11 @@ try_again:
59980 spin_unlock(&cookie->stores_lock);
59981
59982 if (xpage) {
59983- fscache_stat(&fscache_n_store_vmscan_cancelled);
59984- fscache_stat(&fscache_n_store_radix_deletes);
59985+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
59986+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
59987 ASSERTCMP(xpage, ==, page);
59988 } else {
59989- fscache_stat(&fscache_n_store_vmscan_gone);
59990+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
59991 }
59992
59993 wake_up_bit(&cookie->flags, 0);
59994@@ -110,11 +110,11 @@ page_busy:
59995 * sleeping on memory allocation, so we may need to impose a timeout
59996 * too. */
59997 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
59998- fscache_stat(&fscache_n_store_vmscan_busy);
59999+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
60000 return false;
60001 }
60002
60003- fscache_stat(&fscache_n_store_vmscan_wait);
60004+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
60005 __fscache_wait_on_page_write(cookie, page);
60006 gfp &= ~__GFP_WAIT;
60007 goto try_again;
60008@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
60009 FSCACHE_COOKIE_STORING_TAG);
60010 if (!radix_tree_tag_get(&cookie->stores, page->index,
60011 FSCACHE_COOKIE_PENDING_TAG)) {
60012- fscache_stat(&fscache_n_store_radix_deletes);
60013+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
60014 xpage = radix_tree_delete(&cookie->stores, page->index);
60015 }
60016 spin_unlock(&cookie->stores_lock);
60017@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
60018
60019 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
60020
60021- fscache_stat(&fscache_n_attr_changed_calls);
60022+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
60023
60024 if (fscache_object_is_active(object)) {
60025 fscache_stat(&fscache_n_cop_attr_changed);
60026@@ -188,11 +188,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
60027
60028 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
60029
60030- fscache_stat(&fscache_n_attr_changed);
60031+ fscache_stat_unchecked(&fscache_n_attr_changed);
60032
60033 op = kzalloc(sizeof(*op), GFP_KERNEL);
60034 if (!op) {
60035- fscache_stat(&fscache_n_attr_changed_nomem);
60036+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
60037 _leave(" = -ENOMEM");
60038 return -ENOMEM;
60039 }
60040@@ -214,7 +214,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
60041 if (fscache_submit_exclusive_op(object, op) < 0)
60042 goto nobufs;
60043 spin_unlock(&cookie->lock);
60044- fscache_stat(&fscache_n_attr_changed_ok);
60045+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
60046 fscache_put_operation(op);
60047 _leave(" = 0");
60048 return 0;
60049@@ -225,7 +225,7 @@ nobufs:
60050 kfree(op);
60051 if (wake_cookie)
60052 __fscache_wake_unused_cookie(cookie);
60053- fscache_stat(&fscache_n_attr_changed_nobufs);
60054+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
60055 _leave(" = %d", -ENOBUFS);
60056 return -ENOBUFS;
60057 }
60058@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
60059 /* allocate a retrieval operation and attempt to submit it */
60060 op = kzalloc(sizeof(*op), GFP_NOIO);
60061 if (!op) {
60062- fscache_stat(&fscache_n_retrievals_nomem);
60063+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
60064 return NULL;
60065 }
60066
60067@@ -294,13 +294,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
60068 return 0;
60069 }
60070
60071- fscache_stat(&fscache_n_retrievals_wait);
60072+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
60073
60074 jif = jiffies;
60075 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
60076 fscache_wait_bit_interruptible,
60077 TASK_INTERRUPTIBLE) != 0) {
60078- fscache_stat(&fscache_n_retrievals_intr);
60079+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
60080 _leave(" = -ERESTARTSYS");
60081 return -ERESTARTSYS;
60082 }
60083@@ -329,8 +329,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
60084 */
60085 int fscache_wait_for_operation_activation(struct fscache_object *object,
60086 struct fscache_operation *op,
60087- atomic_t *stat_op_waits,
60088- atomic_t *stat_object_dead,
60089+ atomic_unchecked_t *stat_op_waits,
60090+ atomic_unchecked_t *stat_object_dead,
60091 void (*do_cancel)(struct fscache_operation *))
60092 {
60093 int ret;
60094@@ -340,7 +340,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
60095
60096 _debug(">>> WT");
60097 if (stat_op_waits)
60098- fscache_stat(stat_op_waits);
60099+ fscache_stat_unchecked(stat_op_waits);
60100 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
60101 fscache_wait_bit_interruptible,
60102 TASK_INTERRUPTIBLE) != 0) {
60103@@ -358,7 +358,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
60104 check_if_dead:
60105 if (op->state == FSCACHE_OP_ST_CANCELLED) {
60106 if (stat_object_dead)
60107- fscache_stat(stat_object_dead);
60108+ fscache_stat_unchecked(stat_object_dead);
60109 _leave(" = -ENOBUFS [cancelled]");
60110 return -ENOBUFS;
60111 }
60112@@ -366,7 +366,7 @@ check_if_dead:
60113 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
60114 fscache_cancel_op(op, do_cancel);
60115 if (stat_object_dead)
60116- fscache_stat(stat_object_dead);
60117+ fscache_stat_unchecked(stat_object_dead);
60118 return -ENOBUFS;
60119 }
60120 return 0;
60121@@ -394,7 +394,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
60122
60123 _enter("%p,%p,,,", cookie, page);
60124
60125- fscache_stat(&fscache_n_retrievals);
60126+ fscache_stat_unchecked(&fscache_n_retrievals);
60127
60128 if (hlist_empty(&cookie->backing_objects))
60129 goto nobufs;
60130@@ -436,7 +436,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
60131 goto nobufs_unlock_dec;
60132 spin_unlock(&cookie->lock);
60133
60134- fscache_stat(&fscache_n_retrieval_ops);
60135+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
60136
60137 /* pin the netfs read context in case we need to do the actual netfs
60138 * read because we've encountered a cache read failure */
60139@@ -467,15 +467,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
60140
60141 error:
60142 if (ret == -ENOMEM)
60143- fscache_stat(&fscache_n_retrievals_nomem);
60144+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
60145 else if (ret == -ERESTARTSYS)
60146- fscache_stat(&fscache_n_retrievals_intr);
60147+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
60148 else if (ret == -ENODATA)
60149- fscache_stat(&fscache_n_retrievals_nodata);
60150+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
60151 else if (ret < 0)
60152- fscache_stat(&fscache_n_retrievals_nobufs);
60153+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
60154 else
60155- fscache_stat(&fscache_n_retrievals_ok);
60156+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
60157
60158 fscache_put_retrieval(op);
60159 _leave(" = %d", ret);
60160@@ -490,7 +490,7 @@ nobufs_unlock:
60161 __fscache_wake_unused_cookie(cookie);
60162 kfree(op);
60163 nobufs:
60164- fscache_stat(&fscache_n_retrievals_nobufs);
60165+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
60166 _leave(" = -ENOBUFS");
60167 return -ENOBUFS;
60168 }
60169@@ -529,7 +529,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
60170
60171 _enter("%p,,%d,,,", cookie, *nr_pages);
60172
60173- fscache_stat(&fscache_n_retrievals);
60174+ fscache_stat_unchecked(&fscache_n_retrievals);
60175
60176 if (hlist_empty(&cookie->backing_objects))
60177 goto nobufs;
60178@@ -567,7 +567,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
60179 goto nobufs_unlock_dec;
60180 spin_unlock(&cookie->lock);
60181
60182- fscache_stat(&fscache_n_retrieval_ops);
60183+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
60184
60185 /* pin the netfs read context in case we need to do the actual netfs
60186 * read because we've encountered a cache read failure */
60187@@ -598,15 +598,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
60188
60189 error:
60190 if (ret == -ENOMEM)
60191- fscache_stat(&fscache_n_retrievals_nomem);
60192+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
60193 else if (ret == -ERESTARTSYS)
60194- fscache_stat(&fscache_n_retrievals_intr);
60195+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
60196 else if (ret == -ENODATA)
60197- fscache_stat(&fscache_n_retrievals_nodata);
60198+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
60199 else if (ret < 0)
60200- fscache_stat(&fscache_n_retrievals_nobufs);
60201+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
60202 else
60203- fscache_stat(&fscache_n_retrievals_ok);
60204+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
60205
60206 fscache_put_retrieval(op);
60207 _leave(" = %d", ret);
60208@@ -621,7 +621,7 @@ nobufs_unlock:
60209 if (wake_cookie)
60210 __fscache_wake_unused_cookie(cookie);
60211 nobufs:
60212- fscache_stat(&fscache_n_retrievals_nobufs);
60213+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
60214 _leave(" = -ENOBUFS");
60215 return -ENOBUFS;
60216 }
60217@@ -646,7 +646,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
60218
60219 _enter("%p,%p,,,", cookie, page);
60220
60221- fscache_stat(&fscache_n_allocs);
60222+ fscache_stat_unchecked(&fscache_n_allocs);
60223
60224 if (hlist_empty(&cookie->backing_objects))
60225 goto nobufs;
60226@@ -680,7 +680,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
60227 goto nobufs_unlock_dec;
60228 spin_unlock(&cookie->lock);
60229
60230- fscache_stat(&fscache_n_alloc_ops);
60231+ fscache_stat_unchecked(&fscache_n_alloc_ops);
60232
60233 ret = fscache_wait_for_operation_activation(
60234 object, &op->op,
60235@@ -697,11 +697,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
60236
60237 error:
60238 if (ret == -ERESTARTSYS)
60239- fscache_stat(&fscache_n_allocs_intr);
60240+ fscache_stat_unchecked(&fscache_n_allocs_intr);
60241 else if (ret < 0)
60242- fscache_stat(&fscache_n_allocs_nobufs);
60243+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
60244 else
60245- fscache_stat(&fscache_n_allocs_ok);
60246+ fscache_stat_unchecked(&fscache_n_allocs_ok);
60247
60248 fscache_put_retrieval(op);
60249 _leave(" = %d", ret);
60250@@ -715,7 +715,7 @@ nobufs_unlock:
60251 if (wake_cookie)
60252 __fscache_wake_unused_cookie(cookie);
60253 nobufs:
60254- fscache_stat(&fscache_n_allocs_nobufs);
60255+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
60256 _leave(" = -ENOBUFS");
60257 return -ENOBUFS;
60258 }
60259@@ -791,7 +791,7 @@ static void fscache_write_op(struct fscache_operation *_op)
60260
60261 spin_lock(&cookie->stores_lock);
60262
60263- fscache_stat(&fscache_n_store_calls);
60264+ fscache_stat_unchecked(&fscache_n_store_calls);
60265
60266 /* find a page to store */
60267 page = NULL;
60268@@ -802,7 +802,7 @@ static void fscache_write_op(struct fscache_operation *_op)
60269 page = results[0];
60270 _debug("gang %d [%lx]", n, page->index);
60271 if (page->index > op->store_limit) {
60272- fscache_stat(&fscache_n_store_pages_over_limit);
60273+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
60274 goto superseded;
60275 }
60276
60277@@ -814,7 +814,7 @@ static void fscache_write_op(struct fscache_operation *_op)
60278 spin_unlock(&cookie->stores_lock);
60279 spin_unlock(&object->lock);
60280
60281- fscache_stat(&fscache_n_store_pages);
60282+ fscache_stat_unchecked(&fscache_n_store_pages);
60283 fscache_stat(&fscache_n_cop_write_page);
60284 ret = object->cache->ops->write_page(op, page);
60285 fscache_stat_d(&fscache_n_cop_write_page);
60286@@ -918,7 +918,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
60287 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
60288 ASSERT(PageFsCache(page));
60289
60290- fscache_stat(&fscache_n_stores);
60291+ fscache_stat_unchecked(&fscache_n_stores);
60292
60293 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
60294 _leave(" = -ENOBUFS [invalidating]");
60295@@ -977,7 +977,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
60296 spin_unlock(&cookie->stores_lock);
60297 spin_unlock(&object->lock);
60298
60299- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
60300+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60301 op->store_limit = object->store_limit;
60302
60303 __fscache_use_cookie(cookie);
60304@@ -986,8 +986,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
60305
60306 spin_unlock(&cookie->lock);
60307 radix_tree_preload_end();
60308- fscache_stat(&fscache_n_store_ops);
60309- fscache_stat(&fscache_n_stores_ok);
60310+ fscache_stat_unchecked(&fscache_n_store_ops);
60311+ fscache_stat_unchecked(&fscache_n_stores_ok);
60312
60313 /* the work queue now carries its own ref on the object */
60314 fscache_put_operation(&op->op);
60315@@ -995,14 +995,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
60316 return 0;
60317
60318 already_queued:
60319- fscache_stat(&fscache_n_stores_again);
60320+ fscache_stat_unchecked(&fscache_n_stores_again);
60321 already_pending:
60322 spin_unlock(&cookie->stores_lock);
60323 spin_unlock(&object->lock);
60324 spin_unlock(&cookie->lock);
60325 radix_tree_preload_end();
60326 kfree(op);
60327- fscache_stat(&fscache_n_stores_ok);
60328+ fscache_stat_unchecked(&fscache_n_stores_ok);
60329 _leave(" = 0");
60330 return 0;
60331
60332@@ -1024,14 +1024,14 @@ nobufs:
60333 kfree(op);
60334 if (wake_cookie)
60335 __fscache_wake_unused_cookie(cookie);
60336- fscache_stat(&fscache_n_stores_nobufs);
60337+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
60338 _leave(" = -ENOBUFS");
60339 return -ENOBUFS;
60340
60341 nomem_free:
60342 kfree(op);
60343 nomem:
60344- fscache_stat(&fscache_n_stores_oom);
60345+ fscache_stat_unchecked(&fscache_n_stores_oom);
60346 _leave(" = -ENOMEM");
60347 return -ENOMEM;
60348 }
60349@@ -1049,7 +1049,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
60350 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
60351 ASSERTCMP(page, !=, NULL);
60352
60353- fscache_stat(&fscache_n_uncaches);
60354+ fscache_stat_unchecked(&fscache_n_uncaches);
60355
60356 /* cache withdrawal may beat us to it */
60357 if (!PageFsCache(page))
60358@@ -1100,7 +1100,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
60359 struct fscache_cookie *cookie = op->op.object->cookie;
60360
60361 #ifdef CONFIG_FSCACHE_STATS
60362- atomic_inc(&fscache_n_marks);
60363+ atomic_inc_unchecked(&fscache_n_marks);
60364 #endif
60365
60366 _debug("- mark %p{%lx}", page, page->index);
60367diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
60368index 40d13c7..ddf52b9 100644
60369--- a/fs/fscache/stats.c
60370+++ b/fs/fscache/stats.c
60371@@ -18,99 +18,99 @@
60372 /*
60373 * operation counters
60374 */
60375-atomic_t fscache_n_op_pend;
60376-atomic_t fscache_n_op_run;
60377-atomic_t fscache_n_op_enqueue;
60378-atomic_t fscache_n_op_requeue;
60379-atomic_t fscache_n_op_deferred_release;
60380-atomic_t fscache_n_op_release;
60381-atomic_t fscache_n_op_gc;
60382-atomic_t fscache_n_op_cancelled;
60383-atomic_t fscache_n_op_rejected;
60384+atomic_unchecked_t fscache_n_op_pend;
60385+atomic_unchecked_t fscache_n_op_run;
60386+atomic_unchecked_t fscache_n_op_enqueue;
60387+atomic_unchecked_t fscache_n_op_requeue;
60388+atomic_unchecked_t fscache_n_op_deferred_release;
60389+atomic_unchecked_t fscache_n_op_release;
60390+atomic_unchecked_t fscache_n_op_gc;
60391+atomic_unchecked_t fscache_n_op_cancelled;
60392+atomic_unchecked_t fscache_n_op_rejected;
60393
60394-atomic_t fscache_n_attr_changed;
60395-atomic_t fscache_n_attr_changed_ok;
60396-atomic_t fscache_n_attr_changed_nobufs;
60397-atomic_t fscache_n_attr_changed_nomem;
60398-atomic_t fscache_n_attr_changed_calls;
60399+atomic_unchecked_t fscache_n_attr_changed;
60400+atomic_unchecked_t fscache_n_attr_changed_ok;
60401+atomic_unchecked_t fscache_n_attr_changed_nobufs;
60402+atomic_unchecked_t fscache_n_attr_changed_nomem;
60403+atomic_unchecked_t fscache_n_attr_changed_calls;
60404
60405-atomic_t fscache_n_allocs;
60406-atomic_t fscache_n_allocs_ok;
60407-atomic_t fscache_n_allocs_wait;
60408-atomic_t fscache_n_allocs_nobufs;
60409-atomic_t fscache_n_allocs_intr;
60410-atomic_t fscache_n_allocs_object_dead;
60411-atomic_t fscache_n_alloc_ops;
60412-atomic_t fscache_n_alloc_op_waits;
60413+atomic_unchecked_t fscache_n_allocs;
60414+atomic_unchecked_t fscache_n_allocs_ok;
60415+atomic_unchecked_t fscache_n_allocs_wait;
60416+atomic_unchecked_t fscache_n_allocs_nobufs;
60417+atomic_unchecked_t fscache_n_allocs_intr;
60418+atomic_unchecked_t fscache_n_allocs_object_dead;
60419+atomic_unchecked_t fscache_n_alloc_ops;
60420+atomic_unchecked_t fscache_n_alloc_op_waits;
60421
60422-atomic_t fscache_n_retrievals;
60423-atomic_t fscache_n_retrievals_ok;
60424-atomic_t fscache_n_retrievals_wait;
60425-atomic_t fscache_n_retrievals_nodata;
60426-atomic_t fscache_n_retrievals_nobufs;
60427-atomic_t fscache_n_retrievals_intr;
60428-atomic_t fscache_n_retrievals_nomem;
60429-atomic_t fscache_n_retrievals_object_dead;
60430-atomic_t fscache_n_retrieval_ops;
60431-atomic_t fscache_n_retrieval_op_waits;
60432+atomic_unchecked_t fscache_n_retrievals;
60433+atomic_unchecked_t fscache_n_retrievals_ok;
60434+atomic_unchecked_t fscache_n_retrievals_wait;
60435+atomic_unchecked_t fscache_n_retrievals_nodata;
60436+atomic_unchecked_t fscache_n_retrievals_nobufs;
60437+atomic_unchecked_t fscache_n_retrievals_intr;
60438+atomic_unchecked_t fscache_n_retrievals_nomem;
60439+atomic_unchecked_t fscache_n_retrievals_object_dead;
60440+atomic_unchecked_t fscache_n_retrieval_ops;
60441+atomic_unchecked_t fscache_n_retrieval_op_waits;
60442
60443-atomic_t fscache_n_stores;
60444-atomic_t fscache_n_stores_ok;
60445-atomic_t fscache_n_stores_again;
60446-atomic_t fscache_n_stores_nobufs;
60447-atomic_t fscache_n_stores_oom;
60448-atomic_t fscache_n_store_ops;
60449-atomic_t fscache_n_store_calls;
60450-atomic_t fscache_n_store_pages;
60451-atomic_t fscache_n_store_radix_deletes;
60452-atomic_t fscache_n_store_pages_over_limit;
60453+atomic_unchecked_t fscache_n_stores;
60454+atomic_unchecked_t fscache_n_stores_ok;
60455+atomic_unchecked_t fscache_n_stores_again;
60456+atomic_unchecked_t fscache_n_stores_nobufs;
60457+atomic_unchecked_t fscache_n_stores_oom;
60458+atomic_unchecked_t fscache_n_store_ops;
60459+atomic_unchecked_t fscache_n_store_calls;
60460+atomic_unchecked_t fscache_n_store_pages;
60461+atomic_unchecked_t fscache_n_store_radix_deletes;
60462+atomic_unchecked_t fscache_n_store_pages_over_limit;
60463
60464-atomic_t fscache_n_store_vmscan_not_storing;
60465-atomic_t fscache_n_store_vmscan_gone;
60466-atomic_t fscache_n_store_vmscan_busy;
60467-atomic_t fscache_n_store_vmscan_cancelled;
60468-atomic_t fscache_n_store_vmscan_wait;
60469+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
60470+atomic_unchecked_t fscache_n_store_vmscan_gone;
60471+atomic_unchecked_t fscache_n_store_vmscan_busy;
60472+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
60473+atomic_unchecked_t fscache_n_store_vmscan_wait;
60474
60475-atomic_t fscache_n_marks;
60476-atomic_t fscache_n_uncaches;
60477+atomic_unchecked_t fscache_n_marks;
60478+atomic_unchecked_t fscache_n_uncaches;
60479
60480-atomic_t fscache_n_acquires;
60481-atomic_t fscache_n_acquires_null;
60482-atomic_t fscache_n_acquires_no_cache;
60483-atomic_t fscache_n_acquires_ok;
60484-atomic_t fscache_n_acquires_nobufs;
60485-atomic_t fscache_n_acquires_oom;
60486+atomic_unchecked_t fscache_n_acquires;
60487+atomic_unchecked_t fscache_n_acquires_null;
60488+atomic_unchecked_t fscache_n_acquires_no_cache;
60489+atomic_unchecked_t fscache_n_acquires_ok;
60490+atomic_unchecked_t fscache_n_acquires_nobufs;
60491+atomic_unchecked_t fscache_n_acquires_oom;
60492
60493-atomic_t fscache_n_invalidates;
60494-atomic_t fscache_n_invalidates_run;
60495+atomic_unchecked_t fscache_n_invalidates;
60496+atomic_unchecked_t fscache_n_invalidates_run;
60497
60498-atomic_t fscache_n_updates;
60499-atomic_t fscache_n_updates_null;
60500-atomic_t fscache_n_updates_run;
60501+atomic_unchecked_t fscache_n_updates;
60502+atomic_unchecked_t fscache_n_updates_null;
60503+atomic_unchecked_t fscache_n_updates_run;
60504
60505-atomic_t fscache_n_relinquishes;
60506-atomic_t fscache_n_relinquishes_null;
60507-atomic_t fscache_n_relinquishes_waitcrt;
60508-atomic_t fscache_n_relinquishes_retire;
60509+atomic_unchecked_t fscache_n_relinquishes;
60510+atomic_unchecked_t fscache_n_relinquishes_null;
60511+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
60512+atomic_unchecked_t fscache_n_relinquishes_retire;
60513
60514-atomic_t fscache_n_cookie_index;
60515-atomic_t fscache_n_cookie_data;
60516-atomic_t fscache_n_cookie_special;
60517+atomic_unchecked_t fscache_n_cookie_index;
60518+atomic_unchecked_t fscache_n_cookie_data;
60519+atomic_unchecked_t fscache_n_cookie_special;
60520
60521-atomic_t fscache_n_object_alloc;
60522-atomic_t fscache_n_object_no_alloc;
60523-atomic_t fscache_n_object_lookups;
60524-atomic_t fscache_n_object_lookups_negative;
60525-atomic_t fscache_n_object_lookups_positive;
60526-atomic_t fscache_n_object_lookups_timed_out;
60527-atomic_t fscache_n_object_created;
60528-atomic_t fscache_n_object_avail;
60529-atomic_t fscache_n_object_dead;
60530+atomic_unchecked_t fscache_n_object_alloc;
60531+atomic_unchecked_t fscache_n_object_no_alloc;
60532+atomic_unchecked_t fscache_n_object_lookups;
60533+atomic_unchecked_t fscache_n_object_lookups_negative;
60534+atomic_unchecked_t fscache_n_object_lookups_positive;
60535+atomic_unchecked_t fscache_n_object_lookups_timed_out;
60536+atomic_unchecked_t fscache_n_object_created;
60537+atomic_unchecked_t fscache_n_object_avail;
60538+atomic_unchecked_t fscache_n_object_dead;
60539
60540-atomic_t fscache_n_checkaux_none;
60541-atomic_t fscache_n_checkaux_okay;
60542-atomic_t fscache_n_checkaux_update;
60543-atomic_t fscache_n_checkaux_obsolete;
60544+atomic_unchecked_t fscache_n_checkaux_none;
60545+atomic_unchecked_t fscache_n_checkaux_okay;
60546+atomic_unchecked_t fscache_n_checkaux_update;
60547+atomic_unchecked_t fscache_n_checkaux_obsolete;
60548
60549 atomic_t fscache_n_cop_alloc_object;
60550 atomic_t fscache_n_cop_lookup_object;
60551@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
60552 seq_puts(m, "FS-Cache statistics\n");
60553
60554 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
60555- atomic_read(&fscache_n_cookie_index),
60556- atomic_read(&fscache_n_cookie_data),
60557- atomic_read(&fscache_n_cookie_special));
60558+ atomic_read_unchecked(&fscache_n_cookie_index),
60559+ atomic_read_unchecked(&fscache_n_cookie_data),
60560+ atomic_read_unchecked(&fscache_n_cookie_special));
60561
60562 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
60563- atomic_read(&fscache_n_object_alloc),
60564- atomic_read(&fscache_n_object_no_alloc),
60565- atomic_read(&fscache_n_object_avail),
60566- atomic_read(&fscache_n_object_dead));
60567+ atomic_read_unchecked(&fscache_n_object_alloc),
60568+ atomic_read_unchecked(&fscache_n_object_no_alloc),
60569+ atomic_read_unchecked(&fscache_n_object_avail),
60570+ atomic_read_unchecked(&fscache_n_object_dead));
60571 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
60572- atomic_read(&fscache_n_checkaux_none),
60573- atomic_read(&fscache_n_checkaux_okay),
60574- atomic_read(&fscache_n_checkaux_update),
60575- atomic_read(&fscache_n_checkaux_obsolete));
60576+ atomic_read_unchecked(&fscache_n_checkaux_none),
60577+ atomic_read_unchecked(&fscache_n_checkaux_okay),
60578+ atomic_read_unchecked(&fscache_n_checkaux_update),
60579+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
60580
60581 seq_printf(m, "Pages : mrk=%u unc=%u\n",
60582- atomic_read(&fscache_n_marks),
60583- atomic_read(&fscache_n_uncaches));
60584+ atomic_read_unchecked(&fscache_n_marks),
60585+ atomic_read_unchecked(&fscache_n_uncaches));
60586
60587 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
60588 " oom=%u\n",
60589- atomic_read(&fscache_n_acquires),
60590- atomic_read(&fscache_n_acquires_null),
60591- atomic_read(&fscache_n_acquires_no_cache),
60592- atomic_read(&fscache_n_acquires_ok),
60593- atomic_read(&fscache_n_acquires_nobufs),
60594- atomic_read(&fscache_n_acquires_oom));
60595+ atomic_read_unchecked(&fscache_n_acquires),
60596+ atomic_read_unchecked(&fscache_n_acquires_null),
60597+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
60598+ atomic_read_unchecked(&fscache_n_acquires_ok),
60599+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
60600+ atomic_read_unchecked(&fscache_n_acquires_oom));
60601
60602 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
60603- atomic_read(&fscache_n_object_lookups),
60604- atomic_read(&fscache_n_object_lookups_negative),
60605- atomic_read(&fscache_n_object_lookups_positive),
60606- atomic_read(&fscache_n_object_created),
60607- atomic_read(&fscache_n_object_lookups_timed_out));
60608+ atomic_read_unchecked(&fscache_n_object_lookups),
60609+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
60610+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
60611+ atomic_read_unchecked(&fscache_n_object_created),
60612+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
60613
60614 seq_printf(m, "Invals : n=%u run=%u\n",
60615- atomic_read(&fscache_n_invalidates),
60616- atomic_read(&fscache_n_invalidates_run));
60617+ atomic_read_unchecked(&fscache_n_invalidates),
60618+ atomic_read_unchecked(&fscache_n_invalidates_run));
60619
60620 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
60621- atomic_read(&fscache_n_updates),
60622- atomic_read(&fscache_n_updates_null),
60623- atomic_read(&fscache_n_updates_run));
60624+ atomic_read_unchecked(&fscache_n_updates),
60625+ atomic_read_unchecked(&fscache_n_updates_null),
60626+ atomic_read_unchecked(&fscache_n_updates_run));
60627
60628 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
60629- atomic_read(&fscache_n_relinquishes),
60630- atomic_read(&fscache_n_relinquishes_null),
60631- atomic_read(&fscache_n_relinquishes_waitcrt),
60632- atomic_read(&fscache_n_relinquishes_retire));
60633+ atomic_read_unchecked(&fscache_n_relinquishes),
60634+ atomic_read_unchecked(&fscache_n_relinquishes_null),
60635+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
60636+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
60637
60638 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
60639- atomic_read(&fscache_n_attr_changed),
60640- atomic_read(&fscache_n_attr_changed_ok),
60641- atomic_read(&fscache_n_attr_changed_nobufs),
60642- atomic_read(&fscache_n_attr_changed_nomem),
60643- atomic_read(&fscache_n_attr_changed_calls));
60644+ atomic_read_unchecked(&fscache_n_attr_changed),
60645+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
60646+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
60647+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
60648+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
60649
60650 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
60651- atomic_read(&fscache_n_allocs),
60652- atomic_read(&fscache_n_allocs_ok),
60653- atomic_read(&fscache_n_allocs_wait),
60654- atomic_read(&fscache_n_allocs_nobufs),
60655- atomic_read(&fscache_n_allocs_intr));
60656+ atomic_read_unchecked(&fscache_n_allocs),
60657+ atomic_read_unchecked(&fscache_n_allocs_ok),
60658+ atomic_read_unchecked(&fscache_n_allocs_wait),
60659+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
60660+ atomic_read_unchecked(&fscache_n_allocs_intr));
60661 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
60662- atomic_read(&fscache_n_alloc_ops),
60663- atomic_read(&fscache_n_alloc_op_waits),
60664- atomic_read(&fscache_n_allocs_object_dead));
60665+ atomic_read_unchecked(&fscache_n_alloc_ops),
60666+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
60667+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
60668
60669 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
60670 " int=%u oom=%u\n",
60671- atomic_read(&fscache_n_retrievals),
60672- atomic_read(&fscache_n_retrievals_ok),
60673- atomic_read(&fscache_n_retrievals_wait),
60674- atomic_read(&fscache_n_retrievals_nodata),
60675- atomic_read(&fscache_n_retrievals_nobufs),
60676- atomic_read(&fscache_n_retrievals_intr),
60677- atomic_read(&fscache_n_retrievals_nomem));
60678+ atomic_read_unchecked(&fscache_n_retrievals),
60679+ atomic_read_unchecked(&fscache_n_retrievals_ok),
60680+ atomic_read_unchecked(&fscache_n_retrievals_wait),
60681+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
60682+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
60683+ atomic_read_unchecked(&fscache_n_retrievals_intr),
60684+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
60685 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
60686- atomic_read(&fscache_n_retrieval_ops),
60687- atomic_read(&fscache_n_retrieval_op_waits),
60688- atomic_read(&fscache_n_retrievals_object_dead));
60689+ atomic_read_unchecked(&fscache_n_retrieval_ops),
60690+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
60691+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
60692
60693 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
60694- atomic_read(&fscache_n_stores),
60695- atomic_read(&fscache_n_stores_ok),
60696- atomic_read(&fscache_n_stores_again),
60697- atomic_read(&fscache_n_stores_nobufs),
60698- atomic_read(&fscache_n_stores_oom));
60699+ atomic_read_unchecked(&fscache_n_stores),
60700+ atomic_read_unchecked(&fscache_n_stores_ok),
60701+ atomic_read_unchecked(&fscache_n_stores_again),
60702+ atomic_read_unchecked(&fscache_n_stores_nobufs),
60703+ atomic_read_unchecked(&fscache_n_stores_oom));
60704 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
60705- atomic_read(&fscache_n_store_ops),
60706- atomic_read(&fscache_n_store_calls),
60707- atomic_read(&fscache_n_store_pages),
60708- atomic_read(&fscache_n_store_radix_deletes),
60709- atomic_read(&fscache_n_store_pages_over_limit));
60710+ atomic_read_unchecked(&fscache_n_store_ops),
60711+ atomic_read_unchecked(&fscache_n_store_calls),
60712+ atomic_read_unchecked(&fscache_n_store_pages),
60713+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
60714+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
60715
60716 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
60717- atomic_read(&fscache_n_store_vmscan_not_storing),
60718- atomic_read(&fscache_n_store_vmscan_gone),
60719- atomic_read(&fscache_n_store_vmscan_busy),
60720- atomic_read(&fscache_n_store_vmscan_cancelled),
60721- atomic_read(&fscache_n_store_vmscan_wait));
60722+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
60723+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
60724+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
60725+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
60726+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
60727
60728 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
60729- atomic_read(&fscache_n_op_pend),
60730- atomic_read(&fscache_n_op_run),
60731- atomic_read(&fscache_n_op_enqueue),
60732- atomic_read(&fscache_n_op_cancelled),
60733- atomic_read(&fscache_n_op_rejected));
60734+ atomic_read_unchecked(&fscache_n_op_pend),
60735+ atomic_read_unchecked(&fscache_n_op_run),
60736+ atomic_read_unchecked(&fscache_n_op_enqueue),
60737+ atomic_read_unchecked(&fscache_n_op_cancelled),
60738+ atomic_read_unchecked(&fscache_n_op_rejected));
60739 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
60740- atomic_read(&fscache_n_op_deferred_release),
60741- atomic_read(&fscache_n_op_release),
60742- atomic_read(&fscache_n_op_gc));
60743+ atomic_read_unchecked(&fscache_n_op_deferred_release),
60744+ atomic_read_unchecked(&fscache_n_op_release),
60745+ atomic_read_unchecked(&fscache_n_op_gc));
60746
60747 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
60748 atomic_read(&fscache_n_cop_alloc_object),
60749diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
60750index b96a49b..9bfdc47 100644
60751--- a/fs/fuse/cuse.c
60752+++ b/fs/fuse/cuse.c
60753@@ -606,10 +606,12 @@ static int __init cuse_init(void)
60754 INIT_LIST_HEAD(&cuse_conntbl[i]);
60755
60756 /* inherit and extend fuse_dev_operations */
60757- cuse_channel_fops = fuse_dev_operations;
60758- cuse_channel_fops.owner = THIS_MODULE;
60759- cuse_channel_fops.open = cuse_channel_open;
60760- cuse_channel_fops.release = cuse_channel_release;
60761+ pax_open_kernel();
60762+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
60763+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
60764+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
60765+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
60766+ pax_close_kernel();
60767
60768 cuse_class = class_create(THIS_MODULE, "cuse");
60769 if (IS_ERR(cuse_class))
60770diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
60771index fa8cb4b..4acb935 100644
60772--- a/fs/fuse/dev.c
60773+++ b/fs/fuse/dev.c
60774@@ -1323,7 +1323,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
60775 ret = 0;
60776 pipe_lock(pipe);
60777
60778- if (!pipe->readers) {
60779+ if (!atomic_read(&pipe->readers)) {
60780 send_sig(SIGPIPE, current, 0);
60781 if (!ret)
60782 ret = -EPIPE;
60783@@ -1352,7 +1352,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
60784 page_nr++;
60785 ret += buf->len;
60786
60787- if (pipe->files)
60788+ if (atomic_read(&pipe->files))
60789 do_wakeup = 1;
60790 }
60791
60792diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
60793index c3eb2c4..98007d4 100644
60794--- a/fs/fuse/dir.c
60795+++ b/fs/fuse/dir.c
60796@@ -1408,7 +1408,7 @@ static char *read_link(struct dentry *dentry)
60797 return link;
60798 }
60799
60800-static void free_link(char *link)
60801+static void free_link(const char *link)
60802 {
60803 if (!IS_ERR(link))
60804 free_page((unsigned long) link);
60805diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
60806index db23ce1..9e6cd9d 100644
60807--- a/fs/hostfs/hostfs_kern.c
60808+++ b/fs/hostfs/hostfs_kern.c
60809@@ -895,7 +895,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
60810
60811 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
60812 {
60813- char *s = nd_get_link(nd);
60814+ const char *s = nd_get_link(nd);
60815 if (!IS_ERR(s))
60816 __putname(s);
60817 }
60818diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
60819index d19b30a..ef89c36 100644
60820--- a/fs/hugetlbfs/inode.c
60821+++ b/fs/hugetlbfs/inode.c
60822@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
60823 struct mm_struct *mm = current->mm;
60824 struct vm_area_struct *vma;
60825 struct hstate *h = hstate_file(file);
60826+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
60827 struct vm_unmapped_area_info info;
60828
60829 if (len & ~huge_page_mask(h))
60830@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
60831 return addr;
60832 }
60833
60834+#ifdef CONFIG_PAX_RANDMMAP
60835+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
60836+#endif
60837+
60838 if (addr) {
60839 addr = ALIGN(addr, huge_page_size(h));
60840 vma = find_vma(mm, addr);
60841- if (TASK_SIZE - len >= addr &&
60842- (!vma || addr + len <= vma->vm_start))
60843+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
60844 return addr;
60845 }
60846
60847 info.flags = 0;
60848 info.length = len;
60849 info.low_limit = TASK_UNMAPPED_BASE;
60850+
60851+#ifdef CONFIG_PAX_RANDMMAP
60852+ if (mm->pax_flags & MF_PAX_RANDMMAP)
60853+ info.low_limit += mm->delta_mmap;
60854+#endif
60855+
60856 info.high_limit = TASK_SIZE;
60857 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
60858 info.align_offset = 0;
60859@@ -908,7 +918,7 @@ static struct file_system_type hugetlbfs_fs_type = {
60860 };
60861 MODULE_ALIAS_FS("hugetlbfs");
60862
60863-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
60864+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
60865
60866 static int can_do_hugetlb_shm(void)
60867 {
60868diff --git a/fs/inode.c b/fs/inode.c
60869index 4bcdad3..1883822 100644
60870--- a/fs/inode.c
60871+++ b/fs/inode.c
60872@@ -841,8 +841,8 @@ unsigned int get_next_ino(void)
60873
60874 #ifdef CONFIG_SMP
60875 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
60876- static atomic_t shared_last_ino;
60877- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
60878+ static atomic_unchecked_t shared_last_ino;
60879+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
60880
60881 res = next - LAST_INO_BATCH;
60882 }
60883diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
60884index 4a6cf28..d3a29d3 100644
60885--- a/fs/jffs2/erase.c
60886+++ b/fs/jffs2/erase.c
60887@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
60888 struct jffs2_unknown_node marker = {
60889 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
60890 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
60891- .totlen = cpu_to_je32(c->cleanmarker_size)
60892+ .totlen = cpu_to_je32(c->cleanmarker_size),
60893+ .hdr_crc = cpu_to_je32(0)
60894 };
60895
60896 jffs2_prealloc_raw_node_refs(c, jeb, 1);
60897diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
60898index a6597d6..41b30ec 100644
60899--- a/fs/jffs2/wbuf.c
60900+++ b/fs/jffs2/wbuf.c
60901@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
60902 {
60903 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
60904 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
60905- .totlen = constant_cpu_to_je32(8)
60906+ .totlen = constant_cpu_to_je32(8),
60907+ .hdr_crc = constant_cpu_to_je32(0)
60908 };
60909
60910 /*
60911diff --git a/fs/jfs/super.c b/fs/jfs/super.c
60912index 6669aa2..36b033d 100644
60913--- a/fs/jfs/super.c
60914+++ b/fs/jfs/super.c
60915@@ -882,7 +882,7 @@ static int __init init_jfs_fs(void)
60916
60917 jfs_inode_cachep =
60918 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
60919- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
60920+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
60921 init_once);
60922 if (jfs_inode_cachep == NULL)
60923 return -ENOMEM;
60924diff --git a/fs/libfs.c b/fs/libfs.c
60925index a184424..944ddce 100644
60926--- a/fs/libfs.c
60927+++ b/fs/libfs.c
60928@@ -159,6 +159,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
60929
60930 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
60931 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
60932+ char d_name[sizeof(next->d_iname)];
60933+ const unsigned char *name;
60934+
60935 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
60936 if (!simple_positive(next)) {
60937 spin_unlock(&next->d_lock);
60938@@ -167,7 +170,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
60939
60940 spin_unlock(&next->d_lock);
60941 spin_unlock(&dentry->d_lock);
60942- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
60943+ name = next->d_name.name;
60944+ if (name == next->d_iname) {
60945+ memcpy(d_name, name, next->d_name.len);
60946+ name = d_name;
60947+ }
60948+ if (!dir_emit(ctx, name, next->d_name.len,
60949 next->d_inode->i_ino, dt_type(next->d_inode)))
60950 return 0;
60951 spin_lock(&dentry->d_lock);
60952@@ -999,7 +1007,7 @@ EXPORT_SYMBOL(noop_fsync);
60953 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
60954 void *cookie)
60955 {
60956- char *s = nd_get_link(nd);
60957+ const char *s = nd_get_link(nd);
60958 if (!IS_ERR(s))
60959 kfree(s);
60960 }
60961diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
60962index acd3947..1f896e2 100644
60963--- a/fs/lockd/clntproc.c
60964+++ b/fs/lockd/clntproc.c
60965@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
60966 /*
60967 * Cookie counter for NLM requests
60968 */
60969-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
60970+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
60971
60972 void nlmclnt_next_cookie(struct nlm_cookie *c)
60973 {
60974- u32 cookie = atomic_inc_return(&nlm_cookie);
60975+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
60976
60977 memcpy(c->data, &cookie, 4);
60978 c->len=4;
60979diff --git a/fs/locks.c b/fs/locks.c
60980index 92a0f0a..45a48f0 100644
60981--- a/fs/locks.c
60982+++ b/fs/locks.c
60983@@ -2219,16 +2219,16 @@ void locks_remove_flock(struct file *filp)
60984 return;
60985
60986 if (filp->f_op->flock) {
60987- struct file_lock fl = {
60988+ struct file_lock flock = {
60989 .fl_pid = current->tgid,
60990 .fl_file = filp,
60991 .fl_flags = FL_FLOCK,
60992 .fl_type = F_UNLCK,
60993 .fl_end = OFFSET_MAX,
60994 };
60995- filp->f_op->flock(filp, F_SETLKW, &fl);
60996- if (fl.fl_ops && fl.fl_ops->fl_release_private)
60997- fl.fl_ops->fl_release_private(&fl);
60998+ filp->f_op->flock(filp, F_SETLKW, &flock);
60999+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
61000+ flock.fl_ops->fl_release_private(&flock);
61001 }
61002
61003 spin_lock(&inode->i_lock);
61004diff --git a/fs/mount.h b/fs/mount.h
61005index a17458c..e69fb5b 100644
61006--- a/fs/mount.h
61007+++ b/fs/mount.h
61008@@ -11,7 +11,7 @@ struct mnt_namespace {
61009 u64 seq; /* Sequence number to prevent loops */
61010 wait_queue_head_t poll;
61011 int event;
61012-};
61013+} __randomize_layout;
61014
61015 struct mnt_pcp {
61016 int mnt_count;
61017@@ -57,7 +57,7 @@ struct mount {
61018 int mnt_expiry_mark; /* true if marked for expiry */
61019 int mnt_pinned;
61020 struct path mnt_ex_mountpoint;
61021-};
61022+} __randomize_layout;
61023
61024 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
61025
61026diff --git a/fs/namei.c b/fs/namei.c
61027index cfe6608..f9deefc 100644
61028--- a/fs/namei.c
61029+++ b/fs/namei.c
61030@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
61031 if (ret != -EACCES)
61032 return ret;
61033
61034+#ifdef CONFIG_GRKERNSEC
61035+ /* we'll block if we have to log due to a denied capability use */
61036+ if (mask & MAY_NOT_BLOCK)
61037+ return -ECHILD;
61038+#endif
61039+
61040 if (S_ISDIR(inode->i_mode)) {
61041 /* DACs are overridable for directories */
61042- if (inode_capable(inode, CAP_DAC_OVERRIDE))
61043- return 0;
61044 if (!(mask & MAY_WRITE))
61045- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
61046+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
61047+ inode_capable(inode, CAP_DAC_READ_SEARCH))
61048 return 0;
61049+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
61050+ return 0;
61051 return -EACCES;
61052 }
61053 /*
61054+ * Searching includes executable on directories, else just read.
61055+ */
61056+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
61057+ if (mask == MAY_READ)
61058+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
61059+ inode_capable(inode, CAP_DAC_READ_SEARCH))
61060+ return 0;
61061+
61062+ /*
61063 * Read/write DACs are always overridable.
61064 * Executable DACs are overridable when there is
61065 * at least one exec bit set.
61066@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
61067 if (inode_capable(inode, CAP_DAC_OVERRIDE))
61068 return 0;
61069
61070- /*
61071- * Searching includes executable on directories, else just read.
61072- */
61073- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
61074- if (mask == MAY_READ)
61075- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
61076- return 0;
61077-
61078 return -EACCES;
61079 }
61080
61081@@ -810,7 +818,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
61082 {
61083 struct dentry *dentry = link->dentry;
61084 int error;
61085- char *s;
61086+ const char *s;
61087
61088 BUG_ON(nd->flags & LOOKUP_RCU);
61089
61090@@ -831,6 +839,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
61091 if (error)
61092 goto out_put_nd_path;
61093
61094+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
61095+ dentry->d_inode, dentry, nd->path.mnt)) {
61096+ error = -EACCES;
61097+ goto out_put_nd_path;
61098+ }
61099+
61100 nd->last_type = LAST_BIND;
61101 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
61102 error = PTR_ERR(*p);
61103@@ -1582,6 +1596,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
61104 if (res)
61105 break;
61106 res = walk_component(nd, path, LOOKUP_FOLLOW);
61107+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
61108+ res = -EACCES;
61109 put_link(nd, &link, cookie);
61110 } while (res > 0);
61111
61112@@ -1655,7 +1671,7 @@ EXPORT_SYMBOL(full_name_hash);
61113 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
61114 {
61115 unsigned long a, b, adata, bdata, mask, hash, len;
61116- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
61117+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
61118
61119 hash = a = 0;
61120 len = -sizeof(unsigned long);
61121@@ -1939,6 +1955,8 @@ static int path_lookupat(int dfd, const char *name,
61122 if (err)
61123 break;
61124 err = lookup_last(nd, &path);
61125+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
61126+ err = -EACCES;
61127 put_link(nd, &link, cookie);
61128 }
61129 }
61130@@ -1946,6 +1964,13 @@ static int path_lookupat(int dfd, const char *name,
61131 if (!err)
61132 err = complete_walk(nd);
61133
61134+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
61135+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
61136+ path_put(&nd->path);
61137+ err = -ENOENT;
61138+ }
61139+ }
61140+
61141 if (!err && nd->flags & LOOKUP_DIRECTORY) {
61142 if (!d_is_directory(nd->path.dentry)) {
61143 path_put(&nd->path);
61144@@ -1973,8 +1998,15 @@ static int filename_lookup(int dfd, struct filename *name,
61145 retval = path_lookupat(dfd, name->name,
61146 flags | LOOKUP_REVAL, nd);
61147
61148- if (likely(!retval))
61149+ if (likely(!retval)) {
61150 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
61151+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
61152+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
61153+ path_put(&nd->path);
61154+ return -ENOENT;
61155+ }
61156+ }
61157+ }
61158 return retval;
61159 }
61160
61161@@ -2548,6 +2580,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
61162 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
61163 return -EPERM;
61164
61165+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
61166+ return -EPERM;
61167+ if (gr_handle_rawio(inode))
61168+ return -EPERM;
61169+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
61170+ return -EACCES;
61171+
61172 return 0;
61173 }
61174
61175@@ -2779,7 +2818,7 @@ looked_up:
61176 * cleared otherwise prior to returning.
61177 */
61178 static int lookup_open(struct nameidata *nd, struct path *path,
61179- struct file *file,
61180+ struct path *link, struct file *file,
61181 const struct open_flags *op,
61182 bool got_write, int *opened)
61183 {
61184@@ -2814,6 +2853,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
61185 /* Negative dentry, just create the file */
61186 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
61187 umode_t mode = op->mode;
61188+
61189+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
61190+ error = -EACCES;
61191+ goto out_dput;
61192+ }
61193+
61194+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
61195+ error = -EACCES;
61196+ goto out_dput;
61197+ }
61198+
61199 if (!IS_POSIXACL(dir->d_inode))
61200 mode &= ~current_umask();
61201 /*
61202@@ -2835,6 +2885,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
61203 nd->flags & LOOKUP_EXCL);
61204 if (error)
61205 goto out_dput;
61206+ else
61207+ gr_handle_create(dentry, nd->path.mnt);
61208 }
61209 out_no_open:
61210 path->dentry = dentry;
61211@@ -2849,7 +2901,7 @@ out_dput:
61212 /*
61213 * Handle the last step of open()
61214 */
61215-static int do_last(struct nameidata *nd, struct path *path,
61216+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
61217 struct file *file, const struct open_flags *op,
61218 int *opened, struct filename *name)
61219 {
61220@@ -2899,6 +2951,15 @@ static int do_last(struct nameidata *nd, struct path *path,
61221 if (error)
61222 return error;
61223
61224+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
61225+ error = -ENOENT;
61226+ goto out;
61227+ }
61228+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
61229+ error = -EACCES;
61230+ goto out;
61231+ }
61232+
61233 audit_inode(name, dir, LOOKUP_PARENT);
61234 error = -EISDIR;
61235 /* trailing slashes? */
61236@@ -2918,7 +2979,7 @@ retry_lookup:
61237 */
61238 }
61239 mutex_lock(&dir->d_inode->i_mutex);
61240- error = lookup_open(nd, path, file, op, got_write, opened);
61241+ error = lookup_open(nd, path, link, file, op, got_write, opened);
61242 mutex_unlock(&dir->d_inode->i_mutex);
61243
61244 if (error <= 0) {
61245@@ -2942,11 +3003,28 @@ retry_lookup:
61246 goto finish_open_created;
61247 }
61248
61249+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
61250+ error = -ENOENT;
61251+ goto exit_dput;
61252+ }
61253+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
61254+ error = -EACCES;
61255+ goto exit_dput;
61256+ }
61257+
61258 /*
61259 * create/update audit record if it already exists.
61260 */
61261- if (d_is_positive(path->dentry))
61262+ if (d_is_positive(path->dentry)) {
61263+ /* only check if O_CREAT is specified, all other checks need to go
61264+ into may_open */
61265+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
61266+ error = -EACCES;
61267+ goto exit_dput;
61268+ }
61269+
61270 audit_inode(name, path->dentry, 0);
61271+ }
61272
61273 /*
61274 * If atomic_open() acquired write access it is dropped now due to
61275@@ -2987,6 +3065,11 @@ finish_lookup:
61276 }
61277 }
61278 BUG_ON(inode != path->dentry->d_inode);
61279+ /* if we're resolving a symlink to another symlink */
61280+ if (link && gr_handle_symlink_owner(link, inode)) {
61281+ error = -EACCES;
61282+ goto out;
61283+ }
61284 return 1;
61285 }
61286
61287@@ -2996,7 +3079,6 @@ finish_lookup:
61288 save_parent.dentry = nd->path.dentry;
61289 save_parent.mnt = mntget(path->mnt);
61290 nd->path.dentry = path->dentry;
61291-
61292 }
61293 nd->inode = inode;
61294 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
61295@@ -3006,7 +3088,18 @@ finish_open:
61296 path_put(&save_parent);
61297 return error;
61298 }
61299+
61300+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
61301+ error = -ENOENT;
61302+ goto out;
61303+ }
61304+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
61305+ error = -EACCES;
61306+ goto out;
61307+ }
61308+
61309 audit_inode(name, nd->path.dentry, 0);
61310+
61311 error = -EISDIR;
61312 if ((open_flag & O_CREAT) &&
61313 (d_is_directory(nd->path.dentry) || d_is_autodir(nd->path.dentry)))
61314@@ -3170,7 +3263,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
61315 if (unlikely(error))
61316 goto out;
61317
61318- error = do_last(nd, &path, file, op, &opened, pathname);
61319+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
61320 while (unlikely(error > 0)) { /* trailing symlink */
61321 struct path link = path;
61322 void *cookie;
61323@@ -3188,7 +3281,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
61324 error = follow_link(&link, nd, &cookie);
61325 if (unlikely(error))
61326 break;
61327- error = do_last(nd, &path, file, op, &opened, pathname);
61328+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
61329 put_link(nd, &link, cookie);
61330 }
61331 out:
61332@@ -3288,9 +3381,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
61333 goto unlock;
61334
61335 error = -EEXIST;
61336- if (d_is_positive(dentry))
61337+ if (d_is_positive(dentry)) {
61338+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
61339+ error = -ENOENT;
61340 goto fail;
61341-
61342+ }
61343 /*
61344 * Special case - lookup gave negative, but... we had foo/bar/
61345 * From the vfs_mknod() POV we just have a negative dentry -
61346@@ -3342,6 +3437,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
61347 }
61348 EXPORT_SYMBOL(user_path_create);
61349
61350+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
61351+{
61352+ struct filename *tmp = getname(pathname);
61353+ struct dentry *res;
61354+ if (IS_ERR(tmp))
61355+ return ERR_CAST(tmp);
61356+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
61357+ if (IS_ERR(res))
61358+ putname(tmp);
61359+ else
61360+ *to = tmp;
61361+ return res;
61362+}
61363+
61364 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
61365 {
61366 int error = may_create(dir, dentry);
61367@@ -3404,6 +3513,17 @@ retry:
61368
61369 if (!IS_POSIXACL(path.dentry->d_inode))
61370 mode &= ~current_umask();
61371+
61372+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
61373+ error = -EPERM;
61374+ goto out;
61375+ }
61376+
61377+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
61378+ error = -EACCES;
61379+ goto out;
61380+ }
61381+
61382 error = security_path_mknod(&path, dentry, mode, dev);
61383 if (error)
61384 goto out;
61385@@ -3420,6 +3540,8 @@ retry:
61386 break;
61387 }
61388 out:
61389+ if (!error)
61390+ gr_handle_create(dentry, path.mnt);
61391 done_path_create(&path, dentry);
61392 if (retry_estale(error, lookup_flags)) {
61393 lookup_flags |= LOOKUP_REVAL;
61394@@ -3472,9 +3594,16 @@ retry:
61395
61396 if (!IS_POSIXACL(path.dentry->d_inode))
61397 mode &= ~current_umask();
61398+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
61399+ error = -EACCES;
61400+ goto out;
61401+ }
61402 error = security_path_mkdir(&path, dentry, mode);
61403 if (!error)
61404 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
61405+ if (!error)
61406+ gr_handle_create(dentry, path.mnt);
61407+out:
61408 done_path_create(&path, dentry);
61409 if (retry_estale(error, lookup_flags)) {
61410 lookup_flags |= LOOKUP_REVAL;
61411@@ -3555,6 +3684,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
61412 struct filename *name;
61413 struct dentry *dentry;
61414 struct nameidata nd;
61415+ ino_t saved_ino = 0;
61416+ dev_t saved_dev = 0;
61417 unsigned int lookup_flags = 0;
61418 retry:
61419 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
61420@@ -3587,10 +3718,21 @@ retry:
61421 error = -ENOENT;
61422 goto exit3;
61423 }
61424+
61425+ saved_ino = dentry->d_inode->i_ino;
61426+ saved_dev = gr_get_dev_from_dentry(dentry);
61427+
61428+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
61429+ error = -EACCES;
61430+ goto exit3;
61431+ }
61432+
61433 error = security_path_rmdir(&nd.path, dentry);
61434 if (error)
61435 goto exit3;
61436 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
61437+ if (!error && (saved_dev || saved_ino))
61438+ gr_handle_delete(saved_ino, saved_dev);
61439 exit3:
61440 dput(dentry);
61441 exit2:
61442@@ -3680,6 +3822,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
61443 struct nameidata nd;
61444 struct inode *inode = NULL;
61445 struct inode *delegated_inode = NULL;
61446+ ino_t saved_ino = 0;
61447+ dev_t saved_dev = 0;
61448 unsigned int lookup_flags = 0;
61449 retry:
61450 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
61451@@ -3706,10 +3850,22 @@ retry_deleg:
61452 if (d_is_negative(dentry))
61453 goto slashes;
61454 ihold(inode);
61455+
61456+ if (inode->i_nlink <= 1) {
61457+ saved_ino = inode->i_ino;
61458+ saved_dev = gr_get_dev_from_dentry(dentry);
61459+ }
61460+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
61461+ error = -EACCES;
61462+ goto exit2;
61463+ }
61464+
61465 error = security_path_unlink(&nd.path, dentry);
61466 if (error)
61467 goto exit2;
61468 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
61469+ if (!error && (saved_ino || saved_dev))
61470+ gr_handle_delete(saved_ino, saved_dev);
61471 exit2:
61472 dput(dentry);
61473 }
61474@@ -3797,9 +3953,17 @@ retry:
61475 if (IS_ERR(dentry))
61476 goto out_putname;
61477
61478+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
61479+ error = -EACCES;
61480+ goto out;
61481+ }
61482+
61483 error = security_path_symlink(&path, dentry, from->name);
61484 if (!error)
61485 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
61486+ if (!error)
61487+ gr_handle_create(dentry, path.mnt);
61488+out:
61489 done_path_create(&path, dentry);
61490 if (retry_estale(error, lookup_flags)) {
61491 lookup_flags |= LOOKUP_REVAL;
61492@@ -3902,6 +4066,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
61493 struct dentry *new_dentry;
61494 struct path old_path, new_path;
61495 struct inode *delegated_inode = NULL;
61496+ struct filename *to = NULL;
61497 int how = 0;
61498 int error;
61499
61500@@ -3925,7 +4090,7 @@ retry:
61501 if (error)
61502 return error;
61503
61504- new_dentry = user_path_create(newdfd, newname, &new_path,
61505+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
61506 (how & LOOKUP_REVAL));
61507 error = PTR_ERR(new_dentry);
61508 if (IS_ERR(new_dentry))
61509@@ -3937,11 +4102,28 @@ retry:
61510 error = may_linkat(&old_path);
61511 if (unlikely(error))
61512 goto out_dput;
61513+
61514+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
61515+ old_path.dentry->d_inode,
61516+ old_path.dentry->d_inode->i_mode, to)) {
61517+ error = -EACCES;
61518+ goto out_dput;
61519+ }
61520+
61521+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
61522+ old_path.dentry, old_path.mnt, to)) {
61523+ error = -EACCES;
61524+ goto out_dput;
61525+ }
61526+
61527 error = security_path_link(old_path.dentry, &new_path, new_dentry);
61528 if (error)
61529 goto out_dput;
61530 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
61531+ if (!error)
61532+ gr_handle_create(new_dentry, new_path.mnt);
61533 out_dput:
61534+ putname(to);
61535 done_path_create(&new_path, new_dentry);
61536 if (delegated_inode) {
61537 error = break_deleg_wait(&delegated_inode);
61538@@ -4228,6 +4410,12 @@ retry_deleg:
61539 if (new_dentry == trap)
61540 goto exit5;
61541
61542+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
61543+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
61544+ to);
61545+ if (error)
61546+ goto exit5;
61547+
61548 error = security_path_rename(&oldnd.path, old_dentry,
61549 &newnd.path, new_dentry);
61550 if (error)
61551@@ -4235,6 +4423,9 @@ retry_deleg:
61552 error = vfs_rename(old_dir->d_inode, old_dentry,
61553 new_dir->d_inode, new_dentry,
61554 &delegated_inode);
61555+ if (!error)
61556+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
61557+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
61558 exit5:
61559 dput(new_dentry);
61560 exit4:
61561@@ -4271,6 +4462,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
61562
61563 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
61564 {
61565+ char tmpbuf[64];
61566+ const char *newlink;
61567 int len;
61568
61569 len = PTR_ERR(link);
61570@@ -4280,7 +4473,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
61571 len = strlen(link);
61572 if (len > (unsigned) buflen)
61573 len = buflen;
61574- if (copy_to_user(buffer, link, len))
61575+
61576+ if (len < sizeof(tmpbuf)) {
61577+ memcpy(tmpbuf, link, len);
61578+ newlink = tmpbuf;
61579+ } else
61580+ newlink = link;
61581+
61582+ if (copy_to_user(buffer, newlink, len))
61583 len = -EFAULT;
61584 out:
61585 return len;
61586diff --git a/fs/namespace.c b/fs/namespace.c
61587index be32ebc..c595734 100644
61588--- a/fs/namespace.c
61589+++ b/fs/namespace.c
61590@@ -1293,6 +1293,9 @@ static int do_umount(struct mount *mnt, int flags)
61591 if (!(sb->s_flags & MS_RDONLY))
61592 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
61593 up_write(&sb->s_umount);
61594+
61595+ gr_log_remount(mnt->mnt_devname, retval);
61596+
61597 return retval;
61598 }
61599
61600@@ -1315,6 +1318,9 @@ static int do_umount(struct mount *mnt, int flags)
61601 }
61602 unlock_mount_hash();
61603 namespace_unlock();
61604+
61605+ gr_log_unmount(mnt->mnt_devname, retval);
61606+
61607 return retval;
61608 }
61609
61610@@ -1334,7 +1340,7 @@ static inline bool may_mount(void)
61611 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
61612 */
61613
61614-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
61615+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
61616 {
61617 struct path path;
61618 struct mount *mnt;
61619@@ -1376,7 +1382,7 @@ out:
61620 /*
61621 * The 2.0 compatible umount. No flags.
61622 */
61623-SYSCALL_DEFINE1(oldumount, char __user *, name)
61624+SYSCALL_DEFINE1(oldumount, const char __user *, name)
61625 {
61626 return sys_umount(name, 0);
61627 }
61628@@ -2379,6 +2385,16 @@ long do_mount(const char *dev_name, const char *dir_name,
61629 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
61630 MS_STRICTATIME);
61631
61632+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
61633+ retval = -EPERM;
61634+ goto dput_out;
61635+ }
61636+
61637+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
61638+ retval = -EPERM;
61639+ goto dput_out;
61640+ }
61641+
61642 if (flags & MS_REMOUNT)
61643 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
61644 data_page);
61645@@ -2393,6 +2409,9 @@ long do_mount(const char *dev_name, const char *dir_name,
61646 dev_name, data_page);
61647 dput_out:
61648 path_put(&path);
61649+
61650+ gr_log_mount(dev_name, dir_name, retval);
61651+
61652 return retval;
61653 }
61654
61655@@ -2410,7 +2429,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
61656 * number incrementing at 10Ghz will take 12,427 years to wrap which
61657 * is effectively never, so we can ignore the possibility.
61658 */
61659-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
61660+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
61661
61662 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
61663 {
61664@@ -2425,7 +2444,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
61665 kfree(new_ns);
61666 return ERR_PTR(ret);
61667 }
61668- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
61669+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
61670 atomic_set(&new_ns->count, 1);
61671 new_ns->root = NULL;
61672 INIT_LIST_HEAD(&new_ns->list);
61673@@ -2435,7 +2454,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
61674 return new_ns;
61675 }
61676
61677-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
61678+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
61679 struct user_namespace *user_ns, struct fs_struct *new_fs)
61680 {
61681 struct mnt_namespace *new_ns;
61682@@ -2556,8 +2575,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
61683 }
61684 EXPORT_SYMBOL(mount_subtree);
61685
61686-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
61687- char __user *, type, unsigned long, flags, void __user *, data)
61688+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
61689+ const char __user *, type, unsigned long, flags, void __user *, data)
61690 {
61691 int ret;
61692 char *kernel_type;
61693@@ -2670,6 +2689,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
61694 if (error)
61695 goto out2;
61696
61697+ if (gr_handle_chroot_pivot()) {
61698+ error = -EPERM;
61699+ goto out2;
61700+ }
61701+
61702 get_fs_root(current->fs, &root);
61703 old_mp = lock_mount(&old);
61704 error = PTR_ERR(old_mp);
61705@@ -2930,7 +2954,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
61706 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
61707 return -EPERM;
61708
61709- if (fs->users != 1)
61710+ if (atomic_read(&fs->users) != 1)
61711 return -EINVAL;
61712
61713 get_mnt_ns(mnt_ns);
61714diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
61715index f4ccfe6..a5cf064 100644
61716--- a/fs/nfs/callback_xdr.c
61717+++ b/fs/nfs/callback_xdr.c
61718@@ -51,7 +51,7 @@ struct callback_op {
61719 callback_decode_arg_t decode_args;
61720 callback_encode_res_t encode_res;
61721 long res_maxsize;
61722-};
61723+} __do_const;
61724
61725 static struct callback_op callback_ops[];
61726
61727diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
61728index 5d94c02..630214f 100644
61729--- a/fs/nfs/inode.c
61730+++ b/fs/nfs/inode.c
61731@@ -1153,16 +1153,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
61732 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
61733 }
61734
61735-static atomic_long_t nfs_attr_generation_counter;
61736+static atomic_long_unchecked_t nfs_attr_generation_counter;
61737
61738 static unsigned long nfs_read_attr_generation_counter(void)
61739 {
61740- return atomic_long_read(&nfs_attr_generation_counter);
61741+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
61742 }
61743
61744 unsigned long nfs_inc_attr_generation_counter(void)
61745 {
61746- return atomic_long_inc_return(&nfs_attr_generation_counter);
61747+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
61748 }
61749
61750 void nfs_fattr_init(struct nfs_fattr *fattr)
61751diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
61752index 419572f..5414a23 100644
61753--- a/fs/nfsd/nfs4proc.c
61754+++ b/fs/nfsd/nfs4proc.c
61755@@ -1168,7 +1168,7 @@ struct nfsd4_operation {
61756 nfsd4op_rsize op_rsize_bop;
61757 stateid_getter op_get_currentstateid;
61758 stateid_setter op_set_currentstateid;
61759-};
61760+} __do_const;
61761
61762 static struct nfsd4_operation nfsd4_ops[];
61763
61764diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
61765index ee7237f..e3ae60a 100644
61766--- a/fs/nfsd/nfs4xdr.c
61767+++ b/fs/nfsd/nfs4xdr.c
61768@@ -1523,7 +1523,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
61769
61770 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
61771
61772-static nfsd4_dec nfsd4_dec_ops[] = {
61773+static const nfsd4_dec nfsd4_dec_ops[] = {
61774 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
61775 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
61776 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
61777diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
61778index b6af150..f6ec5e3 100644
61779--- a/fs/nfsd/nfscache.c
61780+++ b/fs/nfsd/nfscache.c
61781@@ -547,14 +547,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
61782 {
61783 struct svc_cacherep *rp = rqstp->rq_cacherep;
61784 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
61785- int len;
61786+ long len;
61787 size_t bufsize = 0;
61788
61789 if (!rp)
61790 return;
61791
61792- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
61793- len >>= 2;
61794+ if (statp) {
61795+ len = (char*)statp - (char*)resv->iov_base;
61796+ len = resv->iov_len - len;
61797+ len >>= 2;
61798+ }
61799
61800 /* Don't cache excessive amounts of data and XDR failures */
61801 if (!statp || len > (256 >> 2)) {
61802diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
61803index 7eea63c..a35f4fb 100644
61804--- a/fs/nfsd/vfs.c
61805+++ b/fs/nfsd/vfs.c
61806@@ -993,7 +993,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
61807 } else {
61808 oldfs = get_fs();
61809 set_fs(KERNEL_DS);
61810- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
61811+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
61812 set_fs(oldfs);
61813 }
61814
61815@@ -1084,7 +1084,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
61816
61817 /* Write the data. */
61818 oldfs = get_fs(); set_fs(KERNEL_DS);
61819- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
61820+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
61821 set_fs(oldfs);
61822 if (host_err < 0)
61823 goto out_nfserr;
61824@@ -1629,7 +1629,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
61825 */
61826
61827 oldfs = get_fs(); set_fs(KERNEL_DS);
61828- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
61829+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
61830 set_fs(oldfs);
61831
61832 if (host_err < 0)
61833diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
61834index fea6bd5..8ee9d81 100644
61835--- a/fs/nls/nls_base.c
61836+++ b/fs/nls/nls_base.c
61837@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
61838
61839 int register_nls(struct nls_table * nls)
61840 {
61841- struct nls_table ** tmp = &tables;
61842+ struct nls_table *tmp = tables;
61843
61844 if (nls->next)
61845 return -EBUSY;
61846
61847 spin_lock(&nls_lock);
61848- while (*tmp) {
61849- if (nls == *tmp) {
61850+ while (tmp) {
61851+ if (nls == tmp) {
61852 spin_unlock(&nls_lock);
61853 return -EBUSY;
61854 }
61855- tmp = &(*tmp)->next;
61856+ tmp = tmp->next;
61857 }
61858- nls->next = tables;
61859+ pax_open_kernel();
61860+ *(struct nls_table **)&nls->next = tables;
61861+ pax_close_kernel();
61862 tables = nls;
61863 spin_unlock(&nls_lock);
61864 return 0;
61865@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
61866
61867 int unregister_nls(struct nls_table * nls)
61868 {
61869- struct nls_table ** tmp = &tables;
61870+ struct nls_table * const * tmp = &tables;
61871
61872 spin_lock(&nls_lock);
61873 while (*tmp) {
61874 if (nls == *tmp) {
61875- *tmp = nls->next;
61876+ pax_open_kernel();
61877+ *(struct nls_table **)tmp = nls->next;
61878+ pax_close_kernel();
61879 spin_unlock(&nls_lock);
61880 return 0;
61881 }
61882diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
61883index 7424929..35f6be5 100644
61884--- a/fs/nls/nls_euc-jp.c
61885+++ b/fs/nls/nls_euc-jp.c
61886@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
61887 p_nls = load_nls("cp932");
61888
61889 if (p_nls) {
61890- table.charset2upper = p_nls->charset2upper;
61891- table.charset2lower = p_nls->charset2lower;
61892+ pax_open_kernel();
61893+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
61894+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
61895+ pax_close_kernel();
61896 return register_nls(&table);
61897 }
61898
61899diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
61900index e7bc1d7..06bd4bb 100644
61901--- a/fs/nls/nls_koi8-ru.c
61902+++ b/fs/nls/nls_koi8-ru.c
61903@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
61904 p_nls = load_nls("koi8-u");
61905
61906 if (p_nls) {
61907- table.charset2upper = p_nls->charset2upper;
61908- table.charset2lower = p_nls->charset2lower;
61909+ pax_open_kernel();
61910+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
61911+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
61912+ pax_close_kernel();
61913 return register_nls(&table);
61914 }
61915
61916diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
61917index 6663511..7668ca4 100644
61918--- a/fs/notify/fanotify/fanotify_user.c
61919+++ b/fs/notify/fanotify/fanotify_user.c
61920@@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
61921
61922 fd = fanotify_event_metadata.fd;
61923 ret = -EFAULT;
61924- if (copy_to_user(buf, &fanotify_event_metadata,
61925- fanotify_event_metadata.event_len))
61926+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
61927+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
61928 goto out_close_fd;
61929
61930 ret = prepare_for_access_response(group, event, fd);
61931diff --git a/fs/notify/notification.c b/fs/notify/notification.c
61932index 7b51b05..5ea5ef6 100644
61933--- a/fs/notify/notification.c
61934+++ b/fs/notify/notification.c
61935@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
61936 * get set to 0 so it will never get 'freed'
61937 */
61938 static struct fsnotify_event *q_overflow_event;
61939-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
61940+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
61941
61942 /**
61943 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
61944@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
61945 */
61946 u32 fsnotify_get_cookie(void)
61947 {
61948- return atomic_inc_return(&fsnotify_sync_cookie);
61949+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
61950 }
61951 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
61952
61953diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
61954index 9e38daf..5727cae 100644
61955--- a/fs/ntfs/dir.c
61956+++ b/fs/ntfs/dir.c
61957@@ -1310,7 +1310,7 @@ find_next_index_buffer:
61958 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
61959 ~(s64)(ndir->itype.index.block_size - 1)));
61960 /* Bounds checks. */
61961- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
61962+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
61963 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
61964 "inode 0x%lx or driver bug.", vdir->i_ino);
61965 goto err_out;
61966diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
61967index ea4ba9d..1e13d34 100644
61968--- a/fs/ntfs/file.c
61969+++ b/fs/ntfs/file.c
61970@@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
61971 char *addr;
61972 size_t total = 0;
61973 unsigned len;
61974- int left;
61975+ unsigned left;
61976
61977 do {
61978 len = PAGE_CACHE_SIZE - ofs;
61979diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
61980index 82650d5..db37dcf 100644
61981--- a/fs/ntfs/super.c
61982+++ b/fs/ntfs/super.c
61983@@ -685,7 +685,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
61984 if (!silent)
61985 ntfs_error(sb, "Primary boot sector is invalid.");
61986 } else if (!silent)
61987- ntfs_error(sb, read_err_str, "primary");
61988+ ntfs_error(sb, read_err_str, "%s", "primary");
61989 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
61990 if (bh_primary)
61991 brelse(bh_primary);
61992@@ -701,7 +701,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
61993 goto hotfix_primary_boot_sector;
61994 brelse(bh_backup);
61995 } else if (!silent)
61996- ntfs_error(sb, read_err_str, "backup");
61997+ ntfs_error(sb, read_err_str, "%s", "backup");
61998 /* Try to read NT3.51- backup boot sector. */
61999 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
62000 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
62001@@ -712,7 +712,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
62002 "sector.");
62003 brelse(bh_backup);
62004 } else if (!silent)
62005- ntfs_error(sb, read_err_str, "backup");
62006+ ntfs_error(sb, read_err_str, "%s", "backup");
62007 /* We failed. Cleanup and return. */
62008 if (bh_primary)
62009 brelse(bh_primary);
62010diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
62011index cd5496b..26a1055 100644
62012--- a/fs/ocfs2/localalloc.c
62013+++ b/fs/ocfs2/localalloc.c
62014@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
62015 goto bail;
62016 }
62017
62018- atomic_inc(&osb->alloc_stats.moves);
62019+ atomic_inc_unchecked(&osb->alloc_stats.moves);
62020
62021 bail:
62022 if (handle)
62023diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
62024index 3a90347..c40bef8 100644
62025--- a/fs/ocfs2/ocfs2.h
62026+++ b/fs/ocfs2/ocfs2.h
62027@@ -235,11 +235,11 @@ enum ocfs2_vol_state
62028
62029 struct ocfs2_alloc_stats
62030 {
62031- atomic_t moves;
62032- atomic_t local_data;
62033- atomic_t bitmap_data;
62034- atomic_t bg_allocs;
62035- atomic_t bg_extends;
62036+ atomic_unchecked_t moves;
62037+ atomic_unchecked_t local_data;
62038+ atomic_unchecked_t bitmap_data;
62039+ atomic_unchecked_t bg_allocs;
62040+ atomic_unchecked_t bg_extends;
62041 };
62042
62043 enum ocfs2_local_alloc_state
62044diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
62045index 2c91452..77a3cd2 100644
62046--- a/fs/ocfs2/suballoc.c
62047+++ b/fs/ocfs2/suballoc.c
62048@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
62049 mlog_errno(status);
62050 goto bail;
62051 }
62052- atomic_inc(&osb->alloc_stats.bg_extends);
62053+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
62054
62055 /* You should never ask for this much metadata */
62056 BUG_ON(bits_wanted >
62057@@ -2000,7 +2000,7 @@ int ocfs2_claim_metadata(handle_t *handle,
62058 mlog_errno(status);
62059 goto bail;
62060 }
62061- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62062+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62063
62064 *suballoc_loc = res.sr_bg_blkno;
62065 *suballoc_bit_start = res.sr_bit_offset;
62066@@ -2164,7 +2164,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
62067 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
62068 res->sr_bits);
62069
62070- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62071+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62072
62073 BUG_ON(res->sr_bits != 1);
62074
62075@@ -2206,7 +2206,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
62076 mlog_errno(status);
62077 goto bail;
62078 }
62079- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62080+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
62081
62082 BUG_ON(res.sr_bits != 1);
62083
62084@@ -2310,7 +2310,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
62085 cluster_start,
62086 num_clusters);
62087 if (!status)
62088- atomic_inc(&osb->alloc_stats.local_data);
62089+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
62090 } else {
62091 if (min_clusters > (osb->bitmap_cpg - 1)) {
62092 /* The only paths asking for contiguousness
62093@@ -2336,7 +2336,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
62094 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
62095 res.sr_bg_blkno,
62096 res.sr_bit_offset);
62097- atomic_inc(&osb->alloc_stats.bitmap_data);
62098+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
62099 *num_clusters = res.sr_bits;
62100 }
62101 }
62102diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
62103index c414929..5c9ee542 100644
62104--- a/fs/ocfs2/super.c
62105+++ b/fs/ocfs2/super.c
62106@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
62107 "%10s => GlobalAllocs: %d LocalAllocs: %d "
62108 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
62109 "Stats",
62110- atomic_read(&osb->alloc_stats.bitmap_data),
62111- atomic_read(&osb->alloc_stats.local_data),
62112- atomic_read(&osb->alloc_stats.bg_allocs),
62113- atomic_read(&osb->alloc_stats.moves),
62114- atomic_read(&osb->alloc_stats.bg_extends));
62115+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
62116+ atomic_read_unchecked(&osb->alloc_stats.local_data),
62117+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
62118+ atomic_read_unchecked(&osb->alloc_stats.moves),
62119+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
62120
62121 out += snprintf(buf + out, len - out,
62122 "%10s => State: %u Descriptor: %llu Size: %u bits "
62123@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
62124 spin_lock_init(&osb->osb_xattr_lock);
62125 ocfs2_init_steal_slots(osb);
62126
62127- atomic_set(&osb->alloc_stats.moves, 0);
62128- atomic_set(&osb->alloc_stats.local_data, 0);
62129- atomic_set(&osb->alloc_stats.bitmap_data, 0);
62130- atomic_set(&osb->alloc_stats.bg_allocs, 0);
62131- atomic_set(&osb->alloc_stats.bg_extends, 0);
62132+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
62133+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
62134+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
62135+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
62136+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
62137
62138 /* Copy the blockcheck stats from the superblock probe */
62139 osb->osb_ecc_stats = *stats;
62140diff --git a/fs/open.c b/fs/open.c
62141index 4b3e1ed..1c84599 100644
62142--- a/fs/open.c
62143+++ b/fs/open.c
62144@@ -32,6 +32,8 @@
62145 #include <linux/dnotify.h>
62146 #include <linux/compat.h>
62147
62148+#define CREATE_TRACE_POINTS
62149+#include <trace/events/fs.h>
62150 #include "internal.h"
62151
62152 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
62153@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
62154 error = locks_verify_truncate(inode, NULL, length);
62155 if (!error)
62156 error = security_path_truncate(path);
62157+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
62158+ error = -EACCES;
62159 if (!error)
62160 error = do_truncate(path->dentry, length, 0, NULL);
62161
62162@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
62163 error = locks_verify_truncate(inode, f.file, length);
62164 if (!error)
62165 error = security_path_truncate(&f.file->f_path);
62166+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
62167+ error = -EACCES;
62168 if (!error)
62169 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
62170 sb_end_write(inode->i_sb);
62171@@ -361,6 +367,9 @@ retry:
62172 if (__mnt_is_readonly(path.mnt))
62173 res = -EROFS;
62174
62175+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
62176+ res = -EACCES;
62177+
62178 out_path_release:
62179 path_put(&path);
62180 if (retry_estale(res, lookup_flags)) {
62181@@ -392,6 +401,8 @@ retry:
62182 if (error)
62183 goto dput_and_out;
62184
62185+ gr_log_chdir(path.dentry, path.mnt);
62186+
62187 set_fs_pwd(current->fs, &path);
62188
62189 dput_and_out:
62190@@ -421,6 +432,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
62191 goto out_putf;
62192
62193 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
62194+
62195+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
62196+ error = -EPERM;
62197+
62198+ if (!error)
62199+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
62200+
62201 if (!error)
62202 set_fs_pwd(current->fs, &f.file->f_path);
62203 out_putf:
62204@@ -450,7 +468,13 @@ retry:
62205 if (error)
62206 goto dput_and_out;
62207
62208+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
62209+ goto dput_and_out;
62210+
62211 set_fs_root(current->fs, &path);
62212+
62213+ gr_handle_chroot_chdir(&path);
62214+
62215 error = 0;
62216 dput_and_out:
62217 path_put(&path);
62218@@ -474,6 +498,16 @@ static int chmod_common(struct path *path, umode_t mode)
62219 return error;
62220 retry_deleg:
62221 mutex_lock(&inode->i_mutex);
62222+
62223+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
62224+ error = -EACCES;
62225+ goto out_unlock;
62226+ }
62227+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
62228+ error = -EACCES;
62229+ goto out_unlock;
62230+ }
62231+
62232 error = security_path_chmod(path, mode);
62233 if (error)
62234 goto out_unlock;
62235@@ -539,6 +573,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
62236 uid = make_kuid(current_user_ns(), user);
62237 gid = make_kgid(current_user_ns(), group);
62238
62239+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
62240+ return -EACCES;
62241+
62242 newattrs.ia_valid = ATTR_CTIME;
62243 if (user != (uid_t) -1) {
62244 if (!uid_valid(uid))
62245@@ -990,6 +1027,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
62246 } else {
62247 fsnotify_open(f);
62248 fd_install(fd, f);
62249+ trace_do_sys_open(tmp->name, flags, mode);
62250 }
62251 }
62252 putname(tmp);
62253diff --git a/fs/pipe.c b/fs/pipe.c
62254index 0e0752e..7cfdd50 100644
62255--- a/fs/pipe.c
62256+++ b/fs/pipe.c
62257@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
62258
62259 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
62260 {
62261- if (pipe->files)
62262+ if (atomic_read(&pipe->files))
62263 mutex_lock_nested(&pipe->mutex, subclass);
62264 }
62265
62266@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
62267
62268 void pipe_unlock(struct pipe_inode_info *pipe)
62269 {
62270- if (pipe->files)
62271+ if (atomic_read(&pipe->files))
62272 mutex_unlock(&pipe->mutex);
62273 }
62274 EXPORT_SYMBOL(pipe_unlock);
62275@@ -449,9 +449,9 @@ redo:
62276 }
62277 if (bufs) /* More to do? */
62278 continue;
62279- if (!pipe->writers)
62280+ if (!atomic_read(&pipe->writers))
62281 break;
62282- if (!pipe->waiting_writers) {
62283+ if (!atomic_read(&pipe->waiting_writers)) {
62284 /* syscall merging: Usually we must not sleep
62285 * if O_NONBLOCK is set, or if we got some data.
62286 * But if a writer sleeps in kernel space, then
62287@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
62288 ret = 0;
62289 __pipe_lock(pipe);
62290
62291- if (!pipe->readers) {
62292+ if (!atomic_read(&pipe->readers)) {
62293 send_sig(SIGPIPE, current, 0);
62294 ret = -EPIPE;
62295 goto out;
62296@@ -562,7 +562,7 @@ redo1:
62297 for (;;) {
62298 int bufs;
62299
62300- if (!pipe->readers) {
62301+ if (!atomic_read(&pipe->readers)) {
62302 send_sig(SIGPIPE, current, 0);
62303 if (!ret)
62304 ret = -EPIPE;
62305@@ -653,9 +653,9 @@ redo2:
62306 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
62307 do_wakeup = 0;
62308 }
62309- pipe->waiting_writers++;
62310+ atomic_inc(&pipe->waiting_writers);
62311 pipe_wait(pipe);
62312- pipe->waiting_writers--;
62313+ atomic_dec(&pipe->waiting_writers);
62314 }
62315 out:
62316 __pipe_unlock(pipe);
62317@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
62318 mask = 0;
62319 if (filp->f_mode & FMODE_READ) {
62320 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
62321- if (!pipe->writers && filp->f_version != pipe->w_counter)
62322+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
62323 mask |= POLLHUP;
62324 }
62325
62326@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
62327 * Most Unices do not set POLLERR for FIFOs but on Linux they
62328 * behave exactly like pipes for poll().
62329 */
62330- if (!pipe->readers)
62331+ if (!atomic_read(&pipe->readers))
62332 mask |= POLLERR;
62333 }
62334
62335@@ -731,7 +731,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
62336 int kill = 0;
62337
62338 spin_lock(&inode->i_lock);
62339- if (!--pipe->files) {
62340+ if (atomic_dec_and_test(&pipe->files)) {
62341 inode->i_pipe = NULL;
62342 kill = 1;
62343 }
62344@@ -748,11 +748,11 @@ pipe_release(struct inode *inode, struct file *file)
62345
62346 __pipe_lock(pipe);
62347 if (file->f_mode & FMODE_READ)
62348- pipe->readers--;
62349+ atomic_dec(&pipe->readers);
62350 if (file->f_mode & FMODE_WRITE)
62351- pipe->writers--;
62352+ atomic_dec(&pipe->writers);
62353
62354- if (pipe->readers || pipe->writers) {
62355+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
62356 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
62357 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
62358 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
62359@@ -817,7 +817,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
62360 kfree(pipe);
62361 }
62362
62363-static struct vfsmount *pipe_mnt __read_mostly;
62364+struct vfsmount *pipe_mnt __read_mostly;
62365
62366 /*
62367 * pipefs_dname() is called from d_path().
62368@@ -847,8 +847,9 @@ static struct inode * get_pipe_inode(void)
62369 goto fail_iput;
62370
62371 inode->i_pipe = pipe;
62372- pipe->files = 2;
62373- pipe->readers = pipe->writers = 1;
62374+ atomic_set(&pipe->files, 2);
62375+ atomic_set(&pipe->readers, 1);
62376+ atomic_set(&pipe->writers, 1);
62377 inode->i_fop = &pipefifo_fops;
62378
62379 /*
62380@@ -1027,17 +1028,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
62381 spin_lock(&inode->i_lock);
62382 if (inode->i_pipe) {
62383 pipe = inode->i_pipe;
62384- pipe->files++;
62385+ atomic_inc(&pipe->files);
62386 spin_unlock(&inode->i_lock);
62387 } else {
62388 spin_unlock(&inode->i_lock);
62389 pipe = alloc_pipe_info();
62390 if (!pipe)
62391 return -ENOMEM;
62392- pipe->files = 1;
62393+ atomic_set(&pipe->files, 1);
62394 spin_lock(&inode->i_lock);
62395 if (unlikely(inode->i_pipe)) {
62396- inode->i_pipe->files++;
62397+ atomic_inc(&inode->i_pipe->files);
62398 spin_unlock(&inode->i_lock);
62399 free_pipe_info(pipe);
62400 pipe = inode->i_pipe;
62401@@ -1062,10 +1063,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
62402 * opened, even when there is no process writing the FIFO.
62403 */
62404 pipe->r_counter++;
62405- if (pipe->readers++ == 0)
62406+ if (atomic_inc_return(&pipe->readers) == 1)
62407 wake_up_partner(pipe);
62408
62409- if (!is_pipe && !pipe->writers) {
62410+ if (!is_pipe && !atomic_read(&pipe->writers)) {
62411 if ((filp->f_flags & O_NONBLOCK)) {
62412 /* suppress POLLHUP until we have
62413 * seen a writer */
62414@@ -1084,14 +1085,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
62415 * errno=ENXIO when there is no process reading the FIFO.
62416 */
62417 ret = -ENXIO;
62418- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
62419+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
62420 goto err;
62421
62422 pipe->w_counter++;
62423- if (!pipe->writers++)
62424+ if (atomic_inc_return(&pipe->writers) == 1)
62425 wake_up_partner(pipe);
62426
62427- if (!is_pipe && !pipe->readers) {
62428+ if (!is_pipe && !atomic_read(&pipe->readers)) {
62429 if (wait_for_partner(pipe, &pipe->r_counter))
62430 goto err_wr;
62431 }
62432@@ -1105,11 +1106,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
62433 * the process can at least talk to itself.
62434 */
62435
62436- pipe->readers++;
62437- pipe->writers++;
62438+ atomic_inc(&pipe->readers);
62439+ atomic_inc(&pipe->writers);
62440 pipe->r_counter++;
62441 pipe->w_counter++;
62442- if (pipe->readers == 1 || pipe->writers == 1)
62443+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
62444 wake_up_partner(pipe);
62445 break;
62446
62447@@ -1123,13 +1124,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
62448 return 0;
62449
62450 err_rd:
62451- if (!--pipe->readers)
62452+ if (atomic_dec_and_test(&pipe->readers))
62453 wake_up_interruptible(&pipe->wait);
62454 ret = -ERESTARTSYS;
62455 goto err;
62456
62457 err_wr:
62458- if (!--pipe->writers)
62459+ if (atomic_dec_and_test(&pipe->writers))
62460 wake_up_interruptible(&pipe->wait);
62461 ret = -ERESTARTSYS;
62462 goto err;
62463diff --git a/fs/posix_acl.c b/fs/posix_acl.c
62464index 8bd2135..eab9adb 100644
62465--- a/fs/posix_acl.c
62466+++ b/fs/posix_acl.c
62467@@ -19,6 +19,7 @@
62468 #include <linux/sched.h>
62469 #include <linux/posix_acl.h>
62470 #include <linux/export.h>
62471+#include <linux/grsecurity.h>
62472
62473 #include <linux/errno.h>
62474
62475@@ -183,7 +184,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
62476 }
62477 }
62478 if (mode_p)
62479- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
62480+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
62481 return not_equiv;
62482 }
62483
62484@@ -331,7 +332,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
62485 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
62486 }
62487
62488- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
62489+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
62490 return not_equiv;
62491 }
62492
62493@@ -389,6 +390,8 @@ posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
62494 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
62495 int err = -ENOMEM;
62496 if (clone) {
62497+ *mode_p &= ~gr_acl_umask();
62498+
62499 err = posix_acl_create_masq(clone, mode_p);
62500 if (err < 0) {
62501 posix_acl_release(clone);
62502diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
62503index 2183fcf..3c32a98 100644
62504--- a/fs/proc/Kconfig
62505+++ b/fs/proc/Kconfig
62506@@ -30,7 +30,7 @@ config PROC_FS
62507
62508 config PROC_KCORE
62509 bool "/proc/kcore support" if !ARM
62510- depends on PROC_FS && MMU
62511+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
62512 help
62513 Provides a virtual ELF core file of the live kernel. This can
62514 be read with gdb and other ELF tools. No modifications can be
62515@@ -38,8 +38,8 @@ config PROC_KCORE
62516
62517 config PROC_VMCORE
62518 bool "/proc/vmcore support"
62519- depends on PROC_FS && CRASH_DUMP
62520- default y
62521+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
62522+ default n
62523 help
62524 Exports the dump image of crashed kernel in ELF format.
62525
62526@@ -63,8 +63,8 @@ config PROC_SYSCTL
62527 limited in memory.
62528
62529 config PROC_PAGE_MONITOR
62530- default y
62531- depends on PROC_FS && MMU
62532+ default n
62533+ depends on PROC_FS && MMU && !GRKERNSEC
62534 bool "Enable /proc page monitoring" if EXPERT
62535 help
62536 Various /proc files exist to monitor process memory utilization:
62537diff --git a/fs/proc/array.c b/fs/proc/array.c
62538index 1bd2077..2f7cfd5 100644
62539--- a/fs/proc/array.c
62540+++ b/fs/proc/array.c
62541@@ -60,6 +60,7 @@
62542 #include <linux/tty.h>
62543 #include <linux/string.h>
62544 #include <linux/mman.h>
62545+#include <linux/grsecurity.h>
62546 #include <linux/proc_fs.h>
62547 #include <linux/ioport.h>
62548 #include <linux/uaccess.h>
62549@@ -365,6 +366,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
62550 seq_putc(m, '\n');
62551 }
62552
62553+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62554+static inline void task_pax(struct seq_file *m, struct task_struct *p)
62555+{
62556+ if (p->mm)
62557+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
62558+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
62559+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
62560+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
62561+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
62562+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
62563+ else
62564+ seq_printf(m, "PaX:\t-----\n");
62565+}
62566+#endif
62567+
62568 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
62569 struct pid *pid, struct task_struct *task)
62570 {
62571@@ -383,9 +399,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
62572 task_cpus_allowed(m, task);
62573 cpuset_task_status_allowed(m, task);
62574 task_context_switch_counts(m, task);
62575+
62576+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62577+ task_pax(m, task);
62578+#endif
62579+
62580+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
62581+ task_grsec_rbac(m, task);
62582+#endif
62583+
62584 return 0;
62585 }
62586
62587+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62588+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
62589+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
62590+ _mm->pax_flags & MF_PAX_SEGMEXEC))
62591+#endif
62592+
62593 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
62594 struct pid *pid, struct task_struct *task, int whole)
62595 {
62596@@ -407,6 +438,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
62597 char tcomm[sizeof(task->comm)];
62598 unsigned long flags;
62599
62600+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62601+ if (current->exec_id != m->exec_id) {
62602+ gr_log_badprocpid("stat");
62603+ return 0;
62604+ }
62605+#endif
62606+
62607 state = *get_task_state(task);
62608 vsize = eip = esp = 0;
62609 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
62610@@ -478,6 +516,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
62611 gtime = task_gtime(task);
62612 }
62613
62614+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62615+ if (PAX_RAND_FLAGS(mm)) {
62616+ eip = 0;
62617+ esp = 0;
62618+ wchan = 0;
62619+ }
62620+#endif
62621+#ifdef CONFIG_GRKERNSEC_HIDESYM
62622+ wchan = 0;
62623+ eip =0;
62624+ esp =0;
62625+#endif
62626+
62627 /* scale priority and nice values from timeslices to -20..20 */
62628 /* to make it look like a "normal" Unix priority/nice value */
62629 priority = task_prio(task);
62630@@ -514,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
62631 seq_put_decimal_ull(m, ' ', vsize);
62632 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
62633 seq_put_decimal_ull(m, ' ', rsslim);
62634+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62635+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
62636+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
62637+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
62638+#else
62639 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
62640 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
62641 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
62642+#endif
62643 seq_put_decimal_ull(m, ' ', esp);
62644 seq_put_decimal_ull(m, ' ', eip);
62645 /* The signal information here is obsolete.
62646@@ -538,7 +595,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
62647 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
62648 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
62649
62650- if (mm && permitted) {
62651+ if (mm && permitted
62652+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62653+ && !PAX_RAND_FLAGS(mm)
62654+#endif
62655+ ) {
62656 seq_put_decimal_ull(m, ' ', mm->start_data);
62657 seq_put_decimal_ull(m, ' ', mm->end_data);
62658 seq_put_decimal_ull(m, ' ', mm->start_brk);
62659@@ -576,8 +637,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
62660 struct pid *pid, struct task_struct *task)
62661 {
62662 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
62663- struct mm_struct *mm = get_task_mm(task);
62664+ struct mm_struct *mm;
62665
62666+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62667+ if (current->exec_id != m->exec_id) {
62668+ gr_log_badprocpid("statm");
62669+ return 0;
62670+ }
62671+#endif
62672+ mm = get_task_mm(task);
62673 if (mm) {
62674 size = task_statm(mm, &shared, &text, &data, &resident);
62675 mmput(mm);
62676@@ -600,6 +668,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
62677 return 0;
62678 }
62679
62680+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
62681+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
62682+{
62683+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
62684+}
62685+#endif
62686+
62687 #ifdef CONFIG_CHECKPOINT_RESTORE
62688 static struct pid *
62689 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
62690diff --git a/fs/proc/base.c b/fs/proc/base.c
62691index b59a34b..68a79e8 100644
62692--- a/fs/proc/base.c
62693+++ b/fs/proc/base.c
62694@@ -113,6 +113,14 @@ struct pid_entry {
62695 union proc_op op;
62696 };
62697
62698+struct getdents_callback {
62699+ struct linux_dirent __user * current_dir;
62700+ struct linux_dirent __user * previous;
62701+ struct file * file;
62702+ int count;
62703+ int error;
62704+};
62705+
62706 #define NOD(NAME, MODE, IOP, FOP, OP) { \
62707 .name = (NAME), \
62708 .len = sizeof(NAME) - 1, \
62709@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
62710 if (!mm->arg_end)
62711 goto out_mm; /* Shh! No looking before we're done */
62712
62713+ if (gr_acl_handle_procpidmem(task))
62714+ goto out_mm;
62715+
62716 len = mm->arg_end - mm->arg_start;
62717
62718 if (len > PAGE_SIZE)
62719@@ -237,12 +248,28 @@ out:
62720 return res;
62721 }
62722
62723+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62724+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
62725+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
62726+ _mm->pax_flags & MF_PAX_SEGMEXEC))
62727+#endif
62728+
62729 static int proc_pid_auxv(struct task_struct *task, char *buffer)
62730 {
62731 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
62732 int res = PTR_ERR(mm);
62733 if (mm && !IS_ERR(mm)) {
62734 unsigned int nwords = 0;
62735+
62736+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62737+ /* allow if we're currently ptracing this task */
62738+ if (PAX_RAND_FLAGS(mm) &&
62739+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
62740+ mmput(mm);
62741+ return 0;
62742+ }
62743+#endif
62744+
62745 do {
62746 nwords += 2;
62747 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
62748@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
62749 }
62750
62751
62752-#ifdef CONFIG_KALLSYMS
62753+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62754 /*
62755 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
62756 * Returns the resolved symbol. If that fails, simply return the address.
62757@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
62758 mutex_unlock(&task->signal->cred_guard_mutex);
62759 }
62760
62761-#ifdef CONFIG_STACKTRACE
62762+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62763
62764 #define MAX_STACK_TRACE_DEPTH 64
62765
62766@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
62767 return count;
62768 }
62769
62770-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
62771+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
62772 static int proc_pid_syscall(struct task_struct *task, char *buffer)
62773 {
62774 long nr;
62775@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
62776 /************************************************************************/
62777
62778 /* permission checks */
62779-static int proc_fd_access_allowed(struct inode *inode)
62780+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
62781 {
62782 struct task_struct *task;
62783 int allowed = 0;
62784@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
62785 */
62786 task = get_proc_task(inode);
62787 if (task) {
62788- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
62789+ if (log)
62790+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
62791+ else
62792+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
62793 put_task_struct(task);
62794 }
62795 return allowed;
62796@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
62797 struct task_struct *task,
62798 int hide_pid_min)
62799 {
62800+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
62801+ return false;
62802+
62803+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62804+ rcu_read_lock();
62805+ {
62806+ const struct cred *tmpcred = current_cred();
62807+ const struct cred *cred = __task_cred(task);
62808+
62809+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
62810+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62811+ || in_group_p(grsec_proc_gid)
62812+#endif
62813+ ) {
62814+ rcu_read_unlock();
62815+ return true;
62816+ }
62817+ }
62818+ rcu_read_unlock();
62819+
62820+ if (!pid->hide_pid)
62821+ return false;
62822+#endif
62823+
62824 if (pid->hide_pid < hide_pid_min)
62825 return true;
62826 if (in_group_p(pid->pid_gid))
62827 return true;
62828+
62829 return ptrace_may_access(task, PTRACE_MODE_READ);
62830 }
62831
62832@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
62833 put_task_struct(task);
62834
62835 if (!has_perms) {
62836+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62837+ {
62838+#else
62839 if (pid->hide_pid == 2) {
62840+#endif
62841 /*
62842 * Let's make getdents(), stat(), and open()
62843 * consistent with each other. If a process
62844@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
62845 if (!task)
62846 return -ESRCH;
62847
62848+ if (gr_acl_handle_procpidmem(task)) {
62849+ put_task_struct(task);
62850+ return -EPERM;
62851+ }
62852+
62853 mm = mm_access(task, mode);
62854 put_task_struct(task);
62855
62856@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
62857
62858 file->private_data = mm;
62859
62860+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62861+ file->f_version = current->exec_id;
62862+#endif
62863+
62864 return 0;
62865 }
62866
62867@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
62868 ssize_t copied;
62869 char *page;
62870
62871+#ifdef CONFIG_GRKERNSEC
62872+ if (write)
62873+ return -EPERM;
62874+#endif
62875+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62876+ if (file->f_version != current->exec_id) {
62877+ gr_log_badprocpid("mem");
62878+ return 0;
62879+ }
62880+#endif
62881+
62882 if (!mm)
62883 return 0;
62884
62885@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
62886 goto free;
62887
62888 while (count > 0) {
62889- int this_len = min_t(int, count, PAGE_SIZE);
62890+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
62891
62892 if (write && copy_from_user(page, buf, this_len)) {
62893 copied = -EFAULT;
62894@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
62895 if (!mm)
62896 return 0;
62897
62898+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62899+ if (file->f_version != current->exec_id) {
62900+ gr_log_badprocpid("environ");
62901+ return 0;
62902+ }
62903+#endif
62904+
62905 page = (char *)__get_free_page(GFP_TEMPORARY);
62906 if (!page)
62907 return -ENOMEM;
62908@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
62909 goto free;
62910 while (count > 0) {
62911 size_t this_len, max_len;
62912- int retval;
62913+ ssize_t retval;
62914
62915 if (src >= (mm->env_end - mm->env_start))
62916 break;
62917@@ -1467,7 +1553,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
62918 int error = -EACCES;
62919
62920 /* Are we allowed to snoop on the tasks file descriptors? */
62921- if (!proc_fd_access_allowed(inode))
62922+ if (!proc_fd_access_allowed(inode, 0))
62923 goto out;
62924
62925 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
62926@@ -1511,8 +1597,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
62927 struct path path;
62928
62929 /* Are we allowed to snoop on the tasks file descriptors? */
62930- if (!proc_fd_access_allowed(inode))
62931- goto out;
62932+ /* logging this is needed for learning on chromium to work properly,
62933+ but we don't want to flood the logs from 'ps' which does a readlink
62934+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
62935+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
62936+ */
62937+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
62938+ if (!proc_fd_access_allowed(inode,0))
62939+ goto out;
62940+ } else {
62941+ if (!proc_fd_access_allowed(inode,1))
62942+ goto out;
62943+ }
62944
62945 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
62946 if (error)
62947@@ -1562,7 +1658,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
62948 rcu_read_lock();
62949 cred = __task_cred(task);
62950 inode->i_uid = cred->euid;
62951+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62952+ inode->i_gid = grsec_proc_gid;
62953+#else
62954 inode->i_gid = cred->egid;
62955+#endif
62956 rcu_read_unlock();
62957 }
62958 security_task_to_inode(task, inode);
62959@@ -1598,10 +1698,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
62960 return -ENOENT;
62961 }
62962 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
62963+#ifdef CONFIG_GRKERNSEC_PROC_USER
62964+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
62965+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62966+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
62967+#endif
62968 task_dumpable(task)) {
62969 cred = __task_cred(task);
62970 stat->uid = cred->euid;
62971+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62972+ stat->gid = grsec_proc_gid;
62973+#else
62974 stat->gid = cred->egid;
62975+#endif
62976 }
62977 }
62978 rcu_read_unlock();
62979@@ -1639,11 +1748,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
62980
62981 if (task) {
62982 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
62983+#ifdef CONFIG_GRKERNSEC_PROC_USER
62984+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
62985+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62986+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
62987+#endif
62988 task_dumpable(task)) {
62989 rcu_read_lock();
62990 cred = __task_cred(task);
62991 inode->i_uid = cred->euid;
62992+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
62993+ inode->i_gid = grsec_proc_gid;
62994+#else
62995 inode->i_gid = cred->egid;
62996+#endif
62997 rcu_read_unlock();
62998 } else {
62999 inode->i_uid = GLOBAL_ROOT_UID;
63000@@ -2173,6 +2291,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
63001 if (!task)
63002 goto out_no_task;
63003
63004+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
63005+ goto out;
63006+
63007 /*
63008 * Yes, it does not scale. And it should not. Don't add
63009 * new entries into /proc/<tgid>/ without very good reasons.
63010@@ -2203,6 +2324,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
63011 if (!task)
63012 return -ENOENT;
63013
63014+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
63015+ goto out;
63016+
63017 if (!dir_emit_dots(file, ctx))
63018 goto out;
63019
63020@@ -2592,7 +2716,7 @@ static const struct pid_entry tgid_base_stuff[] = {
63021 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
63022 #endif
63023 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
63024-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
63025+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
63026 INF("syscall", S_IRUGO, proc_pid_syscall),
63027 #endif
63028 INF("cmdline", S_IRUGO, proc_pid_cmdline),
63029@@ -2617,10 +2741,10 @@ static const struct pid_entry tgid_base_stuff[] = {
63030 #ifdef CONFIG_SECURITY
63031 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
63032 #endif
63033-#ifdef CONFIG_KALLSYMS
63034+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63035 INF("wchan", S_IRUGO, proc_pid_wchan),
63036 #endif
63037-#ifdef CONFIG_STACKTRACE
63038+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63039 ONE("stack", S_IRUGO, proc_pid_stack),
63040 #endif
63041 #ifdef CONFIG_SCHEDSTATS
63042@@ -2654,6 +2778,9 @@ static const struct pid_entry tgid_base_stuff[] = {
63043 #ifdef CONFIG_HARDWALL
63044 INF("hardwall", S_IRUGO, proc_pid_hardwall),
63045 #endif
63046+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
63047+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
63048+#endif
63049 #ifdef CONFIG_USER_NS
63050 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
63051 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
63052@@ -2784,7 +2911,14 @@ static int proc_pid_instantiate(struct inode *dir,
63053 if (!inode)
63054 goto out;
63055
63056+#ifdef CONFIG_GRKERNSEC_PROC_USER
63057+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
63058+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63059+ inode->i_gid = grsec_proc_gid;
63060+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
63061+#else
63062 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
63063+#endif
63064 inode->i_op = &proc_tgid_base_inode_operations;
63065 inode->i_fop = &proc_tgid_base_operations;
63066 inode->i_flags|=S_IMMUTABLE;
63067@@ -2822,7 +2956,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
63068 if (!task)
63069 goto out;
63070
63071+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
63072+ goto out_put_task;
63073+
63074 result = proc_pid_instantiate(dir, dentry, task, NULL);
63075+out_put_task:
63076 put_task_struct(task);
63077 out:
63078 return ERR_PTR(result);
63079@@ -2928,7 +3066,7 @@ static const struct pid_entry tid_base_stuff[] = {
63080 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
63081 #endif
63082 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
63083-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
63084+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
63085 INF("syscall", S_IRUGO, proc_pid_syscall),
63086 #endif
63087 INF("cmdline", S_IRUGO, proc_pid_cmdline),
63088@@ -2955,10 +3093,10 @@ static const struct pid_entry tid_base_stuff[] = {
63089 #ifdef CONFIG_SECURITY
63090 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
63091 #endif
63092-#ifdef CONFIG_KALLSYMS
63093+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63094 INF("wchan", S_IRUGO, proc_pid_wchan),
63095 #endif
63096-#ifdef CONFIG_STACKTRACE
63097+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
63098 ONE("stack", S_IRUGO, proc_pid_stack),
63099 #endif
63100 #ifdef CONFIG_SCHEDSTATS
63101diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
63102index 82676e3..5f8518a 100644
63103--- a/fs/proc/cmdline.c
63104+++ b/fs/proc/cmdline.c
63105@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
63106
63107 static int __init proc_cmdline_init(void)
63108 {
63109+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63110+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
63111+#else
63112 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
63113+#endif
63114 return 0;
63115 }
63116 module_init(proc_cmdline_init);
63117diff --git a/fs/proc/devices.c b/fs/proc/devices.c
63118index b143471..bb105e5 100644
63119--- a/fs/proc/devices.c
63120+++ b/fs/proc/devices.c
63121@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
63122
63123 static int __init proc_devices_init(void)
63124 {
63125+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63126+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
63127+#else
63128 proc_create("devices", 0, NULL, &proc_devinfo_operations);
63129+#endif
63130 return 0;
63131 }
63132 module_init(proc_devices_init);
63133diff --git a/fs/proc/fd.c b/fs/proc/fd.c
63134index 985ea88..d118a0a 100644
63135--- a/fs/proc/fd.c
63136+++ b/fs/proc/fd.c
63137@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
63138 if (!task)
63139 return -ENOENT;
63140
63141- files = get_files_struct(task);
63142+ if (!gr_acl_handle_procpidmem(task))
63143+ files = get_files_struct(task);
63144 put_task_struct(task);
63145
63146 if (files) {
63147@@ -283,11 +284,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
63148 */
63149 int proc_fd_permission(struct inode *inode, int mask)
63150 {
63151+ struct task_struct *task;
63152 int rv = generic_permission(inode, mask);
63153- if (rv == 0)
63154- return 0;
63155+
63156 if (task_tgid(current) == proc_pid(inode))
63157 rv = 0;
63158+
63159+ task = get_proc_task(inode);
63160+ if (task == NULL)
63161+ return rv;
63162+
63163+ if (gr_acl_handle_procpidmem(task))
63164+ rv = -EACCES;
63165+
63166+ put_task_struct(task);
63167+
63168 return rv;
63169 }
63170
63171diff --git a/fs/proc/inode.c b/fs/proc/inode.c
63172index 124fc43..8afbb02 100644
63173--- a/fs/proc/inode.c
63174+++ b/fs/proc/inode.c
63175@@ -23,11 +23,17 @@
63176 #include <linux/slab.h>
63177 #include <linux/mount.h>
63178 #include <linux/magic.h>
63179+#include <linux/grsecurity.h>
63180
63181 #include <asm/uaccess.h>
63182
63183 #include "internal.h"
63184
63185+#ifdef CONFIG_PROC_SYSCTL
63186+extern const struct inode_operations proc_sys_inode_operations;
63187+extern const struct inode_operations proc_sys_dir_operations;
63188+#endif
63189+
63190 static void proc_evict_inode(struct inode *inode)
63191 {
63192 struct proc_dir_entry *de;
63193@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
63194 ns = PROC_I(inode)->ns.ns;
63195 if (ns_ops && ns)
63196 ns_ops->put(ns);
63197+
63198+#ifdef CONFIG_PROC_SYSCTL
63199+ if (inode->i_op == &proc_sys_inode_operations ||
63200+ inode->i_op == &proc_sys_dir_operations)
63201+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
63202+#endif
63203+
63204 }
63205
63206 static struct kmem_cache * proc_inode_cachep;
63207@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
63208 if (de->mode) {
63209 inode->i_mode = de->mode;
63210 inode->i_uid = de->uid;
63211+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
63212+ inode->i_gid = grsec_proc_gid;
63213+#else
63214 inode->i_gid = de->gid;
63215+#endif
63216 }
63217 if (de->size)
63218 inode->i_size = de->size;
63219diff --git a/fs/proc/internal.h b/fs/proc/internal.h
63220index 651d09a..3d7f0bf 100644
63221--- a/fs/proc/internal.h
63222+++ b/fs/proc/internal.h
63223@@ -48,7 +48,7 @@ struct proc_dir_entry {
63224 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
63225 u8 namelen;
63226 char name[];
63227-};
63228+} __randomize_layout;
63229
63230 union proc_op {
63231 int (*proc_get_link)(struct dentry *, struct path *);
63232@@ -67,7 +67,7 @@ struct proc_inode {
63233 struct ctl_table *sysctl_entry;
63234 struct proc_ns ns;
63235 struct inode vfs_inode;
63236-};
63237+} __randomize_layout;
63238
63239 /*
63240 * General functions
63241@@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
63242 struct pid *, struct task_struct *);
63243 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
63244 struct pid *, struct task_struct *);
63245+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
63246+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
63247+#endif
63248
63249 /*
63250 * base.c
63251diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
63252index 05029c0..7ea1987 100644
63253--- a/fs/proc/interrupts.c
63254+++ b/fs/proc/interrupts.c
63255@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
63256
63257 static int __init proc_interrupts_init(void)
63258 {
63259+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63260+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
63261+#else
63262 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
63263+#endif
63264 return 0;
63265 }
63266 module_init(proc_interrupts_init);
63267diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
63268index 5ed0e52..a1c1f2e 100644
63269--- a/fs/proc/kcore.c
63270+++ b/fs/proc/kcore.c
63271@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
63272 * the addresses in the elf_phdr on our list.
63273 */
63274 start = kc_offset_to_vaddr(*fpos - elf_buflen);
63275- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
63276+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
63277+ if (tsz > buflen)
63278 tsz = buflen;
63279-
63280+
63281 while (buflen) {
63282 struct kcore_list *m;
63283
63284@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
63285 kfree(elf_buf);
63286 } else {
63287 if (kern_addr_valid(start)) {
63288- unsigned long n;
63289+ char *elf_buf;
63290+ mm_segment_t oldfs;
63291
63292- n = copy_to_user(buffer, (char *)start, tsz);
63293- /*
63294- * We cannot distinguish between fault on source
63295- * and fault on destination. When this happens
63296- * we clear too and hope it will trigger the
63297- * EFAULT again.
63298- */
63299- if (n) {
63300- if (clear_user(buffer + tsz - n,
63301- n))
63302+ elf_buf = kmalloc(tsz, GFP_KERNEL);
63303+ if (!elf_buf)
63304+ return -ENOMEM;
63305+ oldfs = get_fs();
63306+ set_fs(KERNEL_DS);
63307+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
63308+ set_fs(oldfs);
63309+ if (copy_to_user(buffer, elf_buf, tsz)) {
63310+ kfree(elf_buf);
63311 return -EFAULT;
63312+ }
63313 }
63314+ set_fs(oldfs);
63315+ kfree(elf_buf);
63316 } else {
63317 if (clear_user(buffer, tsz))
63318 return -EFAULT;
63319@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
63320
63321 static int open_kcore(struct inode *inode, struct file *filp)
63322 {
63323+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
63324+ return -EPERM;
63325+#endif
63326 if (!capable(CAP_SYS_RAWIO))
63327 return -EPERM;
63328 if (kcore_need_update)
63329diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
63330index a77d2b2..a9153f0 100644
63331--- a/fs/proc/meminfo.c
63332+++ b/fs/proc/meminfo.c
63333@@ -150,7 +150,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
63334 vmi.used >> 10,
63335 vmi.largest_chunk >> 10
63336 #ifdef CONFIG_MEMORY_FAILURE
63337- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
63338+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
63339 #endif
63340 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
63341 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
63342diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
63343index 5f9bc8a..5c35f08 100644
63344--- a/fs/proc/nommu.c
63345+++ b/fs/proc/nommu.c
63346@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
63347
63348 if (file) {
63349 seq_pad(m, ' ');
63350- seq_path(m, &file->f_path, "");
63351+ seq_path(m, &file->f_path, "\n\\");
63352 }
63353
63354 seq_putc(m, '\n');
63355diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
63356index 4677bb7..408e936 100644
63357--- a/fs/proc/proc_net.c
63358+++ b/fs/proc/proc_net.c
63359@@ -23,6 +23,7 @@
63360 #include <linux/nsproxy.h>
63361 #include <net/net_namespace.h>
63362 #include <linux/seq_file.h>
63363+#include <linux/grsecurity.h>
63364
63365 #include "internal.h"
63366
63367@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
63368 struct task_struct *task;
63369 struct nsproxy *ns;
63370 struct net *net = NULL;
63371+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63372+ const struct cred *cred = current_cred();
63373+#endif
63374+
63375+#ifdef CONFIG_GRKERNSEC_PROC_USER
63376+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
63377+ return net;
63378+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63379+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
63380+ return net;
63381+#endif
63382
63383 rcu_read_lock();
63384 task = pid_task(proc_pid(dir), PIDTYPE_PID);
63385diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
63386index 7129046..6914844 100644
63387--- a/fs/proc/proc_sysctl.c
63388+++ b/fs/proc/proc_sysctl.c
63389@@ -11,13 +11,21 @@
63390 #include <linux/namei.h>
63391 #include <linux/mm.h>
63392 #include <linux/module.h>
63393+#include <linux/nsproxy.h>
63394+#ifdef CONFIG_GRKERNSEC
63395+#include <net/net_namespace.h>
63396+#endif
63397 #include "internal.h"
63398
63399+extern int gr_handle_chroot_sysctl(const int op);
63400+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
63401+ const int op);
63402+
63403 static const struct dentry_operations proc_sys_dentry_operations;
63404 static const struct file_operations proc_sys_file_operations;
63405-static const struct inode_operations proc_sys_inode_operations;
63406+const struct inode_operations proc_sys_inode_operations;
63407 static const struct file_operations proc_sys_dir_file_operations;
63408-static const struct inode_operations proc_sys_dir_operations;
63409+const struct inode_operations proc_sys_dir_operations;
63410
63411 void proc_sys_poll_notify(struct ctl_table_poll *poll)
63412 {
63413@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
63414
63415 err = NULL;
63416 d_set_d_op(dentry, &proc_sys_dentry_operations);
63417+
63418+ gr_handle_proc_create(dentry, inode);
63419+
63420 d_add(dentry, inode);
63421
63422 out:
63423@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
63424 struct inode *inode = file_inode(filp);
63425 struct ctl_table_header *head = grab_header(inode);
63426 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
63427+ int op = write ? MAY_WRITE : MAY_READ;
63428 ssize_t error;
63429 size_t res;
63430
63431@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
63432 * and won't be until we finish.
63433 */
63434 error = -EPERM;
63435- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
63436+ if (sysctl_perm(head, table, op))
63437 goto out;
63438
63439 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
63440@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
63441 if (!table->proc_handler)
63442 goto out;
63443
63444+#ifdef CONFIG_GRKERNSEC
63445+ error = -EPERM;
63446+ if (gr_handle_chroot_sysctl(op))
63447+ goto out;
63448+ dget(filp->f_path.dentry);
63449+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
63450+ dput(filp->f_path.dentry);
63451+ goto out;
63452+ }
63453+ dput(filp->f_path.dentry);
63454+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
63455+ goto out;
63456+ if (write) {
63457+ if (current->nsproxy->net_ns != table->extra2) {
63458+ if (!capable(CAP_SYS_ADMIN))
63459+ goto out;
63460+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
63461+ goto out;
63462+ }
63463+#endif
63464+
63465 /* careful: calling conventions are nasty here */
63466 res = count;
63467 error = table->proc_handler(table, write, buf, &res, ppos);
63468@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
63469 return false;
63470 } else {
63471 d_set_d_op(child, &proc_sys_dentry_operations);
63472+
63473+ gr_handle_proc_create(child, inode);
63474+
63475 d_add(child, inode);
63476 }
63477 } else {
63478@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
63479 if ((*pos)++ < ctx->pos)
63480 return true;
63481
63482+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
63483+ return 0;
63484+
63485 if (unlikely(S_ISLNK(table->mode)))
63486 res = proc_sys_link_fill_cache(file, ctx, head, table);
63487 else
63488@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
63489 if (IS_ERR(head))
63490 return PTR_ERR(head);
63491
63492+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
63493+ return -ENOENT;
63494+
63495 generic_fillattr(inode, stat);
63496 if (table)
63497 stat->mode = (stat->mode & S_IFMT) | table->mode;
63498@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
63499 .llseek = generic_file_llseek,
63500 };
63501
63502-static const struct inode_operations proc_sys_inode_operations = {
63503+const struct inode_operations proc_sys_inode_operations = {
63504 .permission = proc_sys_permission,
63505 .setattr = proc_sys_setattr,
63506 .getattr = proc_sys_getattr,
63507 };
63508
63509-static const struct inode_operations proc_sys_dir_operations = {
63510+const struct inode_operations proc_sys_dir_operations = {
63511 .lookup = proc_sys_lookup,
63512 .permission = proc_sys_permission,
63513 .setattr = proc_sys_setattr,
63514@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
63515 static struct ctl_dir *new_dir(struct ctl_table_set *set,
63516 const char *name, int namelen)
63517 {
63518- struct ctl_table *table;
63519+ ctl_table_no_const *table;
63520 struct ctl_dir *new;
63521 struct ctl_node *node;
63522 char *new_name;
63523@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
63524 return NULL;
63525
63526 node = (struct ctl_node *)(new + 1);
63527- table = (struct ctl_table *)(node + 1);
63528+ table = (ctl_table_no_const *)(node + 1);
63529 new_name = (char *)(table + 2);
63530 memcpy(new_name, name, namelen);
63531 new_name[namelen] = '\0';
63532@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
63533 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
63534 struct ctl_table_root *link_root)
63535 {
63536- struct ctl_table *link_table, *entry, *link;
63537+ ctl_table_no_const *link_table, *link;
63538+ struct ctl_table *entry;
63539 struct ctl_table_header *links;
63540 struct ctl_node *node;
63541 char *link_name;
63542@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
63543 return NULL;
63544
63545 node = (struct ctl_node *)(links + 1);
63546- link_table = (struct ctl_table *)(node + nr_entries);
63547+ link_table = (ctl_table_no_const *)(node + nr_entries);
63548 link_name = (char *)&link_table[nr_entries + 1];
63549
63550 for (link = link_table, entry = table; entry->procname; link++, entry++) {
63551@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
63552 struct ctl_table_header ***subheader, struct ctl_table_set *set,
63553 struct ctl_table *table)
63554 {
63555- struct ctl_table *ctl_table_arg = NULL;
63556- struct ctl_table *entry, *files;
63557+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
63558+ struct ctl_table *entry;
63559 int nr_files = 0;
63560 int nr_dirs = 0;
63561 int err = -ENOMEM;
63562@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
63563 nr_files++;
63564 }
63565
63566- files = table;
63567 /* If there are mixed files and directories we need a new table */
63568 if (nr_dirs && nr_files) {
63569- struct ctl_table *new;
63570+ ctl_table_no_const *new;
63571 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
63572 GFP_KERNEL);
63573 if (!files)
63574@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
63575 /* Register everything except a directory full of subdirectories */
63576 if (nr_files || !nr_dirs) {
63577 struct ctl_table_header *header;
63578- header = __register_sysctl_table(set, path, files);
63579+ header = __register_sysctl_table(set, path, files ? files : table);
63580 if (!header) {
63581 kfree(ctl_table_arg);
63582 goto out;
63583diff --git a/fs/proc/root.c b/fs/proc/root.c
63584index 87dbcbe..55e1b4d 100644
63585--- a/fs/proc/root.c
63586+++ b/fs/proc/root.c
63587@@ -186,7 +186,15 @@ void __init proc_root_init(void)
63588 #ifdef CONFIG_PROC_DEVICETREE
63589 proc_device_tree_init();
63590 #endif
63591+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63592+#ifdef CONFIG_GRKERNSEC_PROC_USER
63593+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
63594+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63595+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
63596+#endif
63597+#else
63598 proc_mkdir("bus", NULL);
63599+#endif
63600 proc_sys_init();
63601 }
63602
63603diff --git a/fs/proc/stat.c b/fs/proc/stat.c
63604index 1cf86c0..0ee1ca5 100644
63605--- a/fs/proc/stat.c
63606+++ b/fs/proc/stat.c
63607@@ -11,6 +11,7 @@
63608 #include <linux/irqnr.h>
63609 #include <asm/cputime.h>
63610 #include <linux/tick.h>
63611+#include <linux/grsecurity.h>
63612
63613 #ifndef arch_irq_stat_cpu
63614 #define arch_irq_stat_cpu(cpu) 0
63615@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
63616 u64 sum_softirq = 0;
63617 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
63618 struct timespec boottime;
63619+ int unrestricted = 1;
63620+
63621+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63622+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63623+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
63624+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
63625+ && !in_group_p(grsec_proc_gid)
63626+#endif
63627+ )
63628+ unrestricted = 0;
63629+#endif
63630+#endif
63631
63632 user = nice = system = idle = iowait =
63633 irq = softirq = steal = 0;
63634@@ -94,6 +107,7 @@ static int show_stat(struct seq_file *p, void *v)
63635 getboottime(&boottime);
63636 jif = boottime.tv_sec;
63637
63638+ if (unrestricted) {
63639 for_each_possible_cpu(i) {
63640 user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
63641 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
63642@@ -116,6 +130,7 @@ static int show_stat(struct seq_file *p, void *v)
63643 }
63644 }
63645 sum += arch_irq_stat();
63646+ }
63647
63648 seq_puts(p, "cpu ");
63649 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
63650@@ -131,6 +146,7 @@ static int show_stat(struct seq_file *p, void *v)
63651 seq_putc(p, '\n');
63652
63653 for_each_online_cpu(i) {
63654+ if (unrestricted) {
63655 /* Copy values here to work around gcc-2.95.3, gcc-2.96 */
63656 user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
63657 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
63658@@ -142,6 +158,7 @@ static int show_stat(struct seq_file *p, void *v)
63659 steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
63660 guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
63661 guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
63662+ }
63663 seq_printf(p, "cpu%d", i);
63664 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
63665 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
63666@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
63667
63668 /* sum again ? it could be updated? */
63669 for_each_irq_nr(j)
63670- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
63671+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
63672
63673 seq_printf(p,
63674 "\nctxt %llu\n"
63675@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
63676 "processes %lu\n"
63677 "procs_running %lu\n"
63678 "procs_blocked %lu\n",
63679- nr_context_switches(),
63680+ unrestricted ? nr_context_switches() : 0ULL,
63681 (unsigned long)jif,
63682- total_forks,
63683- nr_running(),
63684- nr_iowait());
63685+ unrestricted ? total_forks : 0UL,
63686+ unrestricted ? nr_running() : 0UL,
63687+ unrestricted ? nr_iowait() : 0UL);
63688
63689 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
63690
63691diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
63692index fb52b54..5fc7c14 100644
63693--- a/fs/proc/task_mmu.c
63694+++ b/fs/proc/task_mmu.c
63695@@ -12,12 +12,19 @@
63696 #include <linux/swap.h>
63697 #include <linux/swapops.h>
63698 #include <linux/mmu_notifier.h>
63699+#include <linux/grsecurity.h>
63700
63701 #include <asm/elf.h>
63702 #include <asm/uaccess.h>
63703 #include <asm/tlbflush.h>
63704 #include "internal.h"
63705
63706+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63707+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
63708+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
63709+ _mm->pax_flags & MF_PAX_SEGMEXEC))
63710+#endif
63711+
63712 void task_mem(struct seq_file *m, struct mm_struct *mm)
63713 {
63714 unsigned long data, text, lib, swap;
63715@@ -53,8 +60,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
63716 "VmExe:\t%8lu kB\n"
63717 "VmLib:\t%8lu kB\n"
63718 "VmPTE:\t%8lu kB\n"
63719- "VmSwap:\t%8lu kB\n",
63720- hiwater_vm << (PAGE_SHIFT-10),
63721+ "VmSwap:\t%8lu kB\n"
63722+
63723+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63724+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
63725+#endif
63726+
63727+ ,hiwater_vm << (PAGE_SHIFT-10),
63728 total_vm << (PAGE_SHIFT-10),
63729 mm->locked_vm << (PAGE_SHIFT-10),
63730 mm->pinned_vm << (PAGE_SHIFT-10),
63731@@ -64,7 +76,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
63732 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
63733 (PTRS_PER_PTE * sizeof(pte_t) *
63734 atomic_long_read(&mm->nr_ptes)) >> 10,
63735- swap << (PAGE_SHIFT-10));
63736+ swap << (PAGE_SHIFT-10)
63737+
63738+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63739+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63740+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
63741+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
63742+#else
63743+ , mm->context.user_cs_base
63744+ , mm->context.user_cs_limit
63745+#endif
63746+#endif
63747+
63748+ );
63749 }
63750
63751 unsigned long task_vsize(struct mm_struct *mm)
63752@@ -270,13 +294,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63753 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
63754 }
63755
63756- /* We don't show the stack guard page in /proc/maps */
63757+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63758+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
63759+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
63760+#else
63761 start = vma->vm_start;
63762- if (stack_guard_page_start(vma, start))
63763- start += PAGE_SIZE;
63764 end = vma->vm_end;
63765- if (stack_guard_page_end(vma, end))
63766- end -= PAGE_SIZE;
63767+#endif
63768
63769 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
63770 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
63771@@ -286,7 +310,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63772 flags & VM_WRITE ? 'w' : '-',
63773 flags & VM_EXEC ? 'x' : '-',
63774 flags & VM_MAYSHARE ? 's' : 'p',
63775+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63776+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
63777+#else
63778 pgoff,
63779+#endif
63780 MAJOR(dev), MINOR(dev), ino);
63781
63782 /*
63783@@ -295,7 +323,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63784 */
63785 if (file) {
63786 seq_pad(m, ' ');
63787- seq_path(m, &file->f_path, "\n");
63788+ seq_path(m, &file->f_path, "\n\\");
63789 goto done;
63790 }
63791
63792@@ -321,8 +349,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
63793 * Thread stack in /proc/PID/task/TID/maps or
63794 * the main process stack.
63795 */
63796- if (!is_pid || (vma->vm_start <= mm->start_stack &&
63797- vma->vm_end >= mm->start_stack)) {
63798+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
63799+ (vma->vm_start <= mm->start_stack &&
63800+ vma->vm_end >= mm->start_stack)) {
63801 name = "[stack]";
63802 } else {
63803 /* Thread stack in /proc/PID/maps */
63804@@ -346,6 +375,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
63805 struct proc_maps_private *priv = m->private;
63806 struct task_struct *task = priv->task;
63807
63808+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63809+ if (current->exec_id != m->exec_id) {
63810+ gr_log_badprocpid("maps");
63811+ return 0;
63812+ }
63813+#endif
63814+
63815 show_map_vma(m, vma, is_pid);
63816
63817 if (m->count < m->size) /* vma is copied successfully */
63818@@ -586,12 +622,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
63819 .private = &mss,
63820 };
63821
63822+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63823+ if (current->exec_id != m->exec_id) {
63824+ gr_log_badprocpid("smaps");
63825+ return 0;
63826+ }
63827+#endif
63828 memset(&mss, 0, sizeof mss);
63829- mss.vma = vma;
63830- /* mmap_sem is held in m_start */
63831- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
63832- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
63833-
63834+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63835+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
63836+#endif
63837+ mss.vma = vma;
63838+ /* mmap_sem is held in m_start */
63839+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
63840+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
63841+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63842+ }
63843+#endif
63844 show_map_vma(m, vma, is_pid);
63845
63846 seq_printf(m,
63847@@ -609,7 +656,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
63848 "KernelPageSize: %8lu kB\n"
63849 "MMUPageSize: %8lu kB\n"
63850 "Locked: %8lu kB\n",
63851+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63852+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
63853+#else
63854 (vma->vm_end - vma->vm_start) >> 10,
63855+#endif
63856 mss.resident >> 10,
63857 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
63858 mss.shared_clean >> 10,
63859@@ -1387,6 +1438,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
63860 char buffer[64];
63861 int nid;
63862
63863+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63864+ if (current->exec_id != m->exec_id) {
63865+ gr_log_badprocpid("numa_maps");
63866+ return 0;
63867+ }
63868+#endif
63869+
63870 if (!mm)
63871 return 0;
63872
63873@@ -1404,11 +1462,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
63874 mpol_to_str(buffer, sizeof(buffer), pol);
63875 mpol_cond_put(pol);
63876
63877+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63878+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
63879+#else
63880 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
63881+#endif
63882
63883 if (file) {
63884 seq_printf(m, " file=");
63885- seq_path(m, &file->f_path, "\n\t= ");
63886+ seq_path(m, &file->f_path, "\n\t\\= ");
63887 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
63888 seq_printf(m, " heap");
63889 } else {
63890diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
63891index 678455d..ebd3245 100644
63892--- a/fs/proc/task_nommu.c
63893+++ b/fs/proc/task_nommu.c
63894@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
63895 else
63896 bytes += kobjsize(mm);
63897
63898- if (current->fs && current->fs->users > 1)
63899+ if (current->fs && atomic_read(&current->fs->users) > 1)
63900 sbytes += kobjsize(current->fs);
63901 else
63902 bytes += kobjsize(current->fs);
63903@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
63904
63905 if (file) {
63906 seq_pad(m, ' ');
63907- seq_path(m, &file->f_path, "");
63908+ seq_path(m, &file->f_path, "\n\\");
63909 } else if (mm) {
63910 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
63911
63912diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
63913index 9100d69..51cd925 100644
63914--- a/fs/proc/vmcore.c
63915+++ b/fs/proc/vmcore.c
63916@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
63917 nr_bytes = count;
63918
63919 /* If pfn is not ram, return zeros for sparse dump files */
63920- if (pfn_is_ram(pfn) == 0)
63921- memset(buf, 0, nr_bytes);
63922- else {
63923+ if (pfn_is_ram(pfn) == 0) {
63924+ if (userbuf) {
63925+ if (clear_user((char __force_user *)buf, nr_bytes))
63926+ return -EFAULT;
63927+ } else
63928+ memset(buf, 0, nr_bytes);
63929+ } else {
63930 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
63931 offset, userbuf);
63932 if (tmp < 0)
63933@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
63934 static int copy_to(void *target, void *src, size_t size, int userbuf)
63935 {
63936 if (userbuf) {
63937- if (copy_to_user((char __user *) target, src, size))
63938+ if (copy_to_user((char __force_user *) target, src, size))
63939 return -EFAULT;
63940 } else {
63941 memcpy(target, src, size);
63942@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
63943 if (*fpos < m->offset + m->size) {
63944 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
63945 start = m->paddr + *fpos - m->offset;
63946- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
63947+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
63948 if (tmp < 0)
63949 return tmp;
63950 buflen -= tsz;
63951@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
63952 static ssize_t read_vmcore(struct file *file, char __user *buffer,
63953 size_t buflen, loff_t *fpos)
63954 {
63955- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
63956+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
63957 }
63958
63959 /*
63960diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
63961index b00fcc9..e0c6381 100644
63962--- a/fs/qnx6/qnx6.h
63963+++ b/fs/qnx6/qnx6.h
63964@@ -74,7 +74,7 @@ enum {
63965 BYTESEX_BE,
63966 };
63967
63968-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
63969+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
63970 {
63971 if (sbi->s_bytesex == BYTESEX_LE)
63972 return le64_to_cpu((__force __le64)n);
63973@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
63974 return (__force __fs64)cpu_to_be64(n);
63975 }
63976
63977-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
63978+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
63979 {
63980 if (sbi->s_bytesex == BYTESEX_LE)
63981 return le32_to_cpu((__force __le32)n);
63982diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
63983index 72d2917..c917c12 100644
63984--- a/fs/quota/netlink.c
63985+++ b/fs/quota/netlink.c
63986@@ -45,7 +45,7 @@ static struct genl_family quota_genl_family = {
63987 void quota_send_warning(struct kqid qid, dev_t dev,
63988 const char warntype)
63989 {
63990- static atomic_t seq;
63991+ static atomic_unchecked_t seq;
63992 struct sk_buff *skb;
63993 void *msg_head;
63994 int ret;
63995@@ -61,7 +61,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
63996 "VFS: Not enough memory to send quota warning.\n");
63997 return;
63998 }
63999- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
64000+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
64001 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
64002 if (!msg_head) {
64003 printk(KERN_ERR
64004diff --git a/fs/read_write.c b/fs/read_write.c
64005index cfa18df..c110979 100644
64006--- a/fs/read_write.c
64007+++ b/fs/read_write.c
64008@@ -438,7 +438,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
64009
64010 old_fs = get_fs();
64011 set_fs(get_ds());
64012- p = (__force const char __user *)buf;
64013+ p = (const char __force_user *)buf;
64014 if (count > MAX_RW_COUNT)
64015 count = MAX_RW_COUNT;
64016 if (file->f_op->write)
64017diff --git a/fs/readdir.c b/fs/readdir.c
64018index 5b53d99..a6c3049 100644
64019--- a/fs/readdir.c
64020+++ b/fs/readdir.c
64021@@ -17,6 +17,7 @@
64022 #include <linux/security.h>
64023 #include <linux/syscalls.h>
64024 #include <linux/unistd.h>
64025+#include <linux/namei.h>
64026
64027 #include <asm/uaccess.h>
64028
64029@@ -69,6 +70,7 @@ struct old_linux_dirent {
64030 struct readdir_callback {
64031 struct dir_context ctx;
64032 struct old_linux_dirent __user * dirent;
64033+ struct file * file;
64034 int result;
64035 };
64036
64037@@ -86,6 +88,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
64038 buf->result = -EOVERFLOW;
64039 return -EOVERFLOW;
64040 }
64041+
64042+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
64043+ return 0;
64044+
64045 buf->result++;
64046 dirent = buf->dirent;
64047 if (!access_ok(VERIFY_WRITE, dirent,
64048@@ -117,6 +123,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
64049 if (!f.file)
64050 return -EBADF;
64051
64052+ buf.file = f.file;
64053 error = iterate_dir(f.file, &buf.ctx);
64054 if (buf.result)
64055 error = buf.result;
64056@@ -142,6 +149,7 @@ struct getdents_callback {
64057 struct dir_context ctx;
64058 struct linux_dirent __user * current_dir;
64059 struct linux_dirent __user * previous;
64060+ struct file * file;
64061 int count;
64062 int error;
64063 };
64064@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
64065 buf->error = -EOVERFLOW;
64066 return -EOVERFLOW;
64067 }
64068+
64069+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
64070+ return 0;
64071+
64072 dirent = buf->previous;
64073 if (dirent) {
64074 if (__put_user(offset, &dirent->d_off))
64075@@ -208,6 +220,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
64076 if (!f.file)
64077 return -EBADF;
64078
64079+ buf.file = f.file;
64080 error = iterate_dir(f.file, &buf.ctx);
64081 if (error >= 0)
64082 error = buf.error;
64083@@ -226,6 +239,7 @@ struct getdents_callback64 {
64084 struct dir_context ctx;
64085 struct linux_dirent64 __user * current_dir;
64086 struct linux_dirent64 __user * previous;
64087+ struct file *file;
64088 int count;
64089 int error;
64090 };
64091@@ -241,6 +255,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
64092 buf->error = -EINVAL; /* only used if we fail.. */
64093 if (reclen > buf->count)
64094 return -EINVAL;
64095+
64096+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
64097+ return 0;
64098+
64099 dirent = buf->previous;
64100 if (dirent) {
64101 if (__put_user(offset, &dirent->d_off))
64102@@ -288,6 +306,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
64103 if (!f.file)
64104 return -EBADF;
64105
64106+ buf.file = f.file;
64107 error = iterate_dir(f.file, &buf.ctx);
64108 if (error >= 0)
64109 error = buf.error;
64110diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
64111index 2b7882b..1c5ef48 100644
64112--- a/fs/reiserfs/do_balan.c
64113+++ b/fs/reiserfs/do_balan.c
64114@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
64115 return;
64116 }
64117
64118- atomic_inc(&(fs_generation(tb->tb_sb)));
64119+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
64120 do_balance_starts(tb);
64121
64122 /* balance leaf returns 0 except if combining L R and S into
64123diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
64124index ee382ef..f4eb6eb5 100644
64125--- a/fs/reiserfs/item_ops.c
64126+++ b/fs/reiserfs/item_ops.c
64127@@ -725,18 +725,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
64128 }
64129
64130 static struct item_operations errcatch_ops = {
64131- errcatch_bytes_number,
64132- errcatch_decrement_key,
64133- errcatch_is_left_mergeable,
64134- errcatch_print_item,
64135- errcatch_check_item,
64136+ .bytes_number = errcatch_bytes_number,
64137+ .decrement_key = errcatch_decrement_key,
64138+ .is_left_mergeable = errcatch_is_left_mergeable,
64139+ .print_item = errcatch_print_item,
64140+ .check_item = errcatch_check_item,
64141
64142- errcatch_create_vi,
64143- errcatch_check_left,
64144- errcatch_check_right,
64145- errcatch_part_size,
64146- errcatch_unit_num,
64147- errcatch_print_vi
64148+ .create_vi = errcatch_create_vi,
64149+ .check_left = errcatch_check_left,
64150+ .check_right = errcatch_check_right,
64151+ .part_size = errcatch_part_size,
64152+ .unit_num = errcatch_unit_num,
64153+ .print_vi = errcatch_print_vi
64154 };
64155
64156 //////////////////////////////////////////////////////////////////////////////
64157diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
64158index a958444..42b2323 100644
64159--- a/fs/reiserfs/procfs.c
64160+++ b/fs/reiserfs/procfs.c
64161@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
64162 "SMALL_TAILS " : "NO_TAILS ",
64163 replay_only(sb) ? "REPLAY_ONLY " : "",
64164 convert_reiserfs(sb) ? "CONV " : "",
64165- atomic_read(&r->s_generation_counter),
64166+ atomic_read_unchecked(&r->s_generation_counter),
64167 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
64168 SF(s_do_balance), SF(s_unneeded_left_neighbor),
64169 SF(s_good_search_by_key_reada), SF(s_bmaps),
64170diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
64171index f8adaee..0eeeeca 100644
64172--- a/fs/reiserfs/reiserfs.h
64173+++ b/fs/reiserfs/reiserfs.h
64174@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
64175 /* Comment? -Hans */
64176 wait_queue_head_t s_wait;
64177 /* To be obsoleted soon by per buffer seals.. -Hans */
64178- atomic_t s_generation_counter; // increased by one every time the
64179+ atomic_unchecked_t s_generation_counter; // increased by one every time the
64180 // tree gets re-balanced
64181 unsigned long s_properties; /* File system properties. Currently holds
64182 on-disk FS format */
64183@@ -1982,7 +1982,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
64184 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
64185
64186 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
64187-#define get_generation(s) atomic_read (&fs_generation(s))
64188+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
64189 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
64190 #define __fs_changed(gen,s) (gen != get_generation (s))
64191 #define fs_changed(gen,s) \
64192diff --git a/fs/select.c b/fs/select.c
64193index 467bb1c..cf9d65a 100644
64194--- a/fs/select.c
64195+++ b/fs/select.c
64196@@ -20,6 +20,7 @@
64197 #include <linux/export.h>
64198 #include <linux/slab.h>
64199 #include <linux/poll.h>
64200+#include <linux/security.h>
64201 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
64202 #include <linux/file.h>
64203 #include <linux/fdtable.h>
64204@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
64205 struct poll_list *walk = head;
64206 unsigned long todo = nfds;
64207
64208+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
64209 if (nfds > rlimit(RLIMIT_NOFILE))
64210 return -EINVAL;
64211
64212diff --git a/fs/seq_file.c b/fs/seq_file.c
64213index 1d641bb..e600623 100644
64214--- a/fs/seq_file.c
64215+++ b/fs/seq_file.c
64216@@ -10,6 +10,7 @@
64217 #include <linux/seq_file.h>
64218 #include <linux/slab.h>
64219 #include <linux/cred.h>
64220+#include <linux/sched.h>
64221
64222 #include <asm/uaccess.h>
64223 #include <asm/page.h>
64224@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
64225 #ifdef CONFIG_USER_NS
64226 p->user_ns = file->f_cred->user_ns;
64227 #endif
64228+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64229+ p->exec_id = current->exec_id;
64230+#endif
64231
64232 /*
64233 * Wrappers around seq_open(e.g. swaps_open) need to be
64234@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
64235 return 0;
64236 }
64237 if (!m->buf) {
64238- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
64239+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
64240 if (!m->buf)
64241 return -ENOMEM;
64242 }
64243@@ -137,7 +141,7 @@ Eoverflow:
64244 m->op->stop(m, p);
64245 kfree(m->buf);
64246 m->count = 0;
64247- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
64248+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
64249 return !m->buf ? -ENOMEM : -EAGAIN;
64250 }
64251
64252@@ -153,7 +157,7 @@ Eoverflow:
64253 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
64254 {
64255 struct seq_file *m = file->private_data;
64256- size_t copied = 0;
64257+ ssize_t copied = 0;
64258 loff_t pos;
64259 size_t n;
64260 void *p;
64261@@ -192,7 +196,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
64262
64263 /* grab buffer if we didn't have one */
64264 if (!m->buf) {
64265- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
64266+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
64267 if (!m->buf)
64268 goto Enomem;
64269 }
64270@@ -234,7 +238,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
64271 m->op->stop(m, p);
64272 kfree(m->buf);
64273 m->count = 0;
64274- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
64275+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
64276 if (!m->buf)
64277 goto Enomem;
64278 m->version = 0;
64279@@ -584,7 +588,7 @@ static void single_stop(struct seq_file *p, void *v)
64280 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
64281 void *data)
64282 {
64283- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
64284+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
64285 int res = -ENOMEM;
64286
64287 if (op) {
64288diff --git a/fs/splice.c b/fs/splice.c
64289index 12028fa..a6f2619 100644
64290--- a/fs/splice.c
64291+++ b/fs/splice.c
64292@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
64293 pipe_lock(pipe);
64294
64295 for (;;) {
64296- if (!pipe->readers) {
64297+ if (!atomic_read(&pipe->readers)) {
64298 send_sig(SIGPIPE, current, 0);
64299 if (!ret)
64300 ret = -EPIPE;
64301@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
64302 page_nr++;
64303 ret += buf->len;
64304
64305- if (pipe->files)
64306+ if (atomic_read(&pipe->files))
64307 do_wakeup = 1;
64308
64309 if (!--spd->nr_pages)
64310@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
64311 do_wakeup = 0;
64312 }
64313
64314- pipe->waiting_writers++;
64315+ atomic_inc(&pipe->waiting_writers);
64316 pipe_wait(pipe);
64317- pipe->waiting_writers--;
64318+ atomic_dec(&pipe->waiting_writers);
64319 }
64320
64321 pipe_unlock(pipe);
64322@@ -583,7 +583,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
64323 old_fs = get_fs();
64324 set_fs(get_ds());
64325 /* The cast to a user pointer is valid due to the set_fs() */
64326- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
64327+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
64328 set_fs(old_fs);
64329
64330 return res;
64331@@ -598,7 +598,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
64332 old_fs = get_fs();
64333 set_fs(get_ds());
64334 /* The cast to a user pointer is valid due to the set_fs() */
64335- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
64336+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
64337 set_fs(old_fs);
64338
64339 return res;
64340@@ -651,7 +651,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
64341 goto err;
64342
64343 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
64344- vec[i].iov_base = (void __user *) page_address(page);
64345+ vec[i].iov_base = (void __force_user *) page_address(page);
64346 vec[i].iov_len = this_len;
64347 spd.pages[i] = page;
64348 spd.nr_pages++;
64349@@ -847,7 +847,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
64350 ops->release(pipe, buf);
64351 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
64352 pipe->nrbufs--;
64353- if (pipe->files)
64354+ if (atomic_read(&pipe->files))
64355 sd->need_wakeup = true;
64356 }
64357
64358@@ -872,10 +872,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
64359 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
64360 {
64361 while (!pipe->nrbufs) {
64362- if (!pipe->writers)
64363+ if (!atomic_read(&pipe->writers))
64364 return 0;
64365
64366- if (!pipe->waiting_writers && sd->num_spliced)
64367+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
64368 return 0;
64369
64370 if (sd->flags & SPLICE_F_NONBLOCK)
64371@@ -1197,7 +1197,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
64372 * out of the pipe right after the splice_to_pipe(). So set
64373 * PIPE_READERS appropriately.
64374 */
64375- pipe->readers = 1;
64376+ atomic_set(&pipe->readers, 1);
64377
64378 current->splice_pipe = pipe;
64379 }
64380@@ -1493,6 +1493,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
64381
64382 partial[buffers].offset = off;
64383 partial[buffers].len = plen;
64384+ partial[buffers].private = 0;
64385
64386 off = 0;
64387 len -= plen;
64388@@ -1795,9 +1796,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
64389 ret = -ERESTARTSYS;
64390 break;
64391 }
64392- if (!pipe->writers)
64393+ if (!atomic_read(&pipe->writers))
64394 break;
64395- if (!pipe->waiting_writers) {
64396+ if (!atomic_read(&pipe->waiting_writers)) {
64397 if (flags & SPLICE_F_NONBLOCK) {
64398 ret = -EAGAIN;
64399 break;
64400@@ -1829,7 +1830,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
64401 pipe_lock(pipe);
64402
64403 while (pipe->nrbufs >= pipe->buffers) {
64404- if (!pipe->readers) {
64405+ if (!atomic_read(&pipe->readers)) {
64406 send_sig(SIGPIPE, current, 0);
64407 ret = -EPIPE;
64408 break;
64409@@ -1842,9 +1843,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
64410 ret = -ERESTARTSYS;
64411 break;
64412 }
64413- pipe->waiting_writers++;
64414+ atomic_inc(&pipe->waiting_writers);
64415 pipe_wait(pipe);
64416- pipe->waiting_writers--;
64417+ atomic_dec(&pipe->waiting_writers);
64418 }
64419
64420 pipe_unlock(pipe);
64421@@ -1880,14 +1881,14 @@ retry:
64422 pipe_double_lock(ipipe, opipe);
64423
64424 do {
64425- if (!opipe->readers) {
64426+ if (!atomic_read(&opipe->readers)) {
64427 send_sig(SIGPIPE, current, 0);
64428 if (!ret)
64429 ret = -EPIPE;
64430 break;
64431 }
64432
64433- if (!ipipe->nrbufs && !ipipe->writers)
64434+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
64435 break;
64436
64437 /*
64438@@ -1984,7 +1985,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
64439 pipe_double_lock(ipipe, opipe);
64440
64441 do {
64442- if (!opipe->readers) {
64443+ if (!atomic_read(&opipe->readers)) {
64444 send_sig(SIGPIPE, current, 0);
64445 if (!ret)
64446 ret = -EPIPE;
64447@@ -2029,7 +2030,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
64448 * return EAGAIN if we have the potential of some data in the
64449 * future, otherwise just return 0
64450 */
64451- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
64452+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
64453 ret = -EAGAIN;
64454
64455 pipe_unlock(ipipe);
64456diff --git a/fs/stat.c b/fs/stat.c
64457index ae0c3ce..9ee641c 100644
64458--- a/fs/stat.c
64459+++ b/fs/stat.c
64460@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
64461 stat->gid = inode->i_gid;
64462 stat->rdev = inode->i_rdev;
64463 stat->size = i_size_read(inode);
64464- stat->atime = inode->i_atime;
64465- stat->mtime = inode->i_mtime;
64466+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
64467+ stat->atime = inode->i_ctime;
64468+ stat->mtime = inode->i_ctime;
64469+ } else {
64470+ stat->atime = inode->i_atime;
64471+ stat->mtime = inode->i_mtime;
64472+ }
64473 stat->ctime = inode->i_ctime;
64474 stat->blksize = (1 << inode->i_blkbits);
64475 stat->blocks = inode->i_blocks;
64476@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
64477 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
64478 {
64479 struct inode *inode = path->dentry->d_inode;
64480+ int retval;
64481
64482- if (inode->i_op->getattr)
64483- return inode->i_op->getattr(path->mnt, path->dentry, stat);
64484+ if (inode->i_op->getattr) {
64485+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
64486+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
64487+ stat->atime = stat->ctime;
64488+ stat->mtime = stat->ctime;
64489+ }
64490+ return retval;
64491+ }
64492
64493 generic_fillattr(inode, stat);
64494 return 0;
64495diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
64496index 5e73d66..4f165fd 100644
64497--- a/fs/sysfs/dir.c
64498+++ b/fs/sysfs/dir.c
64499@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
64500 *
64501 * Returns 31 bit hash of ns + name (so it fits in an off_t )
64502 */
64503-static unsigned int sysfs_name_hash(const char *name, const void *ns)
64504+static unsigned int sysfs_name_hash(const unsigned char *name, const void *ns)
64505 {
64506 unsigned long hash = init_name_hash();
64507 unsigned int len = strlen(name);
64508@@ -676,6 +676,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
64509 struct sysfs_dirent *sd;
64510 int rc;
64511
64512+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
64513+ const char *parent_name = parent_sd->s_name;
64514+
64515+ mode = S_IFDIR | S_IRWXU;
64516+
64517+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
64518+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
64519+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
64520+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
64521+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
64522+#endif
64523+
64524 /* allocate */
64525 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
64526 if (!sd)
64527diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
64528index 35e7d08..4d6e676 100644
64529--- a/fs/sysfs/file.c
64530+++ b/fs/sysfs/file.c
64531@@ -42,7 +42,7 @@ static DEFINE_MUTEX(sysfs_open_file_mutex);
64532
64533 struct sysfs_open_dirent {
64534 atomic_t refcnt;
64535- atomic_t event;
64536+ atomic_unchecked_t event;
64537 wait_queue_head_t poll;
64538 struct list_head files; /* goes through sysfs_open_file.list */
64539 };
64540@@ -112,7 +112,7 @@ static int sysfs_seq_show(struct seq_file *sf, void *v)
64541 return -ENODEV;
64542 }
64543
64544- of->event = atomic_read(&of->sd->s_attr.open->event);
64545+ of->event = atomic_read_unchecked(&of->sd->s_attr.open->event);
64546
64547 /*
64548 * Lookup @ops and invoke show(). Control may reach here via seq
64549@@ -365,12 +365,12 @@ static int sysfs_bin_page_mkwrite(struct vm_area_struct *vma,
64550 return ret;
64551 }
64552
64553-static int sysfs_bin_access(struct vm_area_struct *vma, unsigned long addr,
64554- void *buf, int len, int write)
64555+static ssize_t sysfs_bin_access(struct vm_area_struct *vma, unsigned long addr,
64556+ void *buf, size_t len, int write)
64557 {
64558 struct file *file = vma->vm_file;
64559 struct sysfs_open_file *of = sysfs_of(file);
64560- int ret;
64561+ ssize_t ret;
64562
64563 if (!of->vm_ops)
64564 return -EINVAL;
64565@@ -564,7 +564,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
64566 return -ENOMEM;
64567
64568 atomic_set(&new_od->refcnt, 0);
64569- atomic_set(&new_od->event, 1);
64570+ atomic_set_unchecked(&new_od->event, 1);
64571 init_waitqueue_head(&new_od->poll);
64572 INIT_LIST_HEAD(&new_od->files);
64573 goto retry;
64574@@ -768,7 +768,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
64575
64576 sysfs_put_active(attr_sd);
64577
64578- if (of->event != atomic_read(&od->event))
64579+ if (of->event != atomic_read_unchecked(&od->event))
64580 goto trigger;
64581
64582 return DEFAULT_POLLMASK;
64583@@ -787,7 +787,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
64584 if (!WARN_ON(sysfs_type(sd) != SYSFS_KOBJ_ATTR)) {
64585 od = sd->s_attr.open;
64586 if (od) {
64587- atomic_inc(&od->event);
64588+ atomic_inc_unchecked(&od->event);
64589 wake_up_interruptible(&od->poll);
64590 }
64591 }
64592diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
64593index 3ae3f1b..081a26c 100644
64594--- a/fs/sysfs/symlink.c
64595+++ b/fs/sysfs/symlink.c
64596@@ -314,7 +314,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64597 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd,
64598 void *cookie)
64599 {
64600- char *page = nd_get_link(nd);
64601+ const char *page = nd_get_link(nd);
64602 if (!IS_ERR(page))
64603 free_page((unsigned long)page);
64604 }
64605diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
64606index 69d4889..a810bd4 100644
64607--- a/fs/sysv/sysv.h
64608+++ b/fs/sysv/sysv.h
64609@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
64610 #endif
64611 }
64612
64613-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
64614+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
64615 {
64616 if (sbi->s_bytesex == BYTESEX_PDP)
64617 return PDP_swab((__force __u32)n);
64618diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
64619index e18b988..f1d4ad0f 100644
64620--- a/fs/ubifs/io.c
64621+++ b/fs/ubifs/io.c
64622@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
64623 return err;
64624 }
64625
64626-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
64627+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
64628 {
64629 int err;
64630
64631diff --git a/fs/udf/misc.c b/fs/udf/misc.c
64632index c175b4d..8f36a16 100644
64633--- a/fs/udf/misc.c
64634+++ b/fs/udf/misc.c
64635@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
64636
64637 u8 udf_tag_checksum(const struct tag *t)
64638 {
64639- u8 *data = (u8 *)t;
64640+ const u8 *data = (const u8 *)t;
64641 u8 checksum = 0;
64642 int i;
64643 for (i = 0; i < sizeof(struct tag); ++i)
64644diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
64645index 8d974c4..b82f6ec 100644
64646--- a/fs/ufs/swab.h
64647+++ b/fs/ufs/swab.h
64648@@ -22,7 +22,7 @@ enum {
64649 BYTESEX_BE
64650 };
64651
64652-static inline u64
64653+static inline u64 __intentional_overflow(-1)
64654 fs64_to_cpu(struct super_block *sbp, __fs64 n)
64655 {
64656 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
64657@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
64658 return (__force __fs64)cpu_to_be64(n);
64659 }
64660
64661-static inline u32
64662+static inline u32 __intentional_overflow(-1)
64663 fs32_to_cpu(struct super_block *sbp, __fs32 n)
64664 {
64665 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
64666diff --git a/fs/utimes.c b/fs/utimes.c
64667index aa138d6..5f3a811 100644
64668--- a/fs/utimes.c
64669+++ b/fs/utimes.c
64670@@ -1,6 +1,7 @@
64671 #include <linux/compiler.h>
64672 #include <linux/file.h>
64673 #include <linux/fs.h>
64674+#include <linux/security.h>
64675 #include <linux/linkage.h>
64676 #include <linux/mount.h>
64677 #include <linux/namei.h>
64678@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
64679 }
64680 }
64681 retry_deleg:
64682+
64683+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
64684+ error = -EACCES;
64685+ goto mnt_drop_write_and_out;
64686+ }
64687+
64688 mutex_lock(&inode->i_mutex);
64689 error = notify_change(path->dentry, &newattrs, &delegated_inode);
64690 mutex_unlock(&inode->i_mutex);
64691diff --git a/fs/xattr.c b/fs/xattr.c
64692index 3377dff..f394815 100644
64693--- a/fs/xattr.c
64694+++ b/fs/xattr.c
64695@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
64696 return rc;
64697 }
64698
64699+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
64700+ssize_t
64701+pax_getxattr(struct dentry *dentry, void *value, size_t size)
64702+{
64703+ struct inode *inode = dentry->d_inode;
64704+ ssize_t error;
64705+
64706+ error = inode_permission(inode, MAY_EXEC);
64707+ if (error)
64708+ return error;
64709+
64710+ if (inode->i_op->getxattr)
64711+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
64712+ else
64713+ error = -EOPNOTSUPP;
64714+
64715+ return error;
64716+}
64717+EXPORT_SYMBOL(pax_getxattr);
64718+#endif
64719+
64720 ssize_t
64721 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
64722 {
64723@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
64724 * Extended attribute SET operations
64725 */
64726 static long
64727-setxattr(struct dentry *d, const char __user *name, const void __user *value,
64728+setxattr(struct path *path, const char __user *name, const void __user *value,
64729 size_t size, int flags)
64730 {
64731 int error;
64732@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
64733 posix_acl_fix_xattr_from_user(kvalue, size);
64734 }
64735
64736- error = vfs_setxattr(d, kname, kvalue, size, flags);
64737+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
64738+ error = -EACCES;
64739+ goto out;
64740+ }
64741+
64742+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
64743 out:
64744 if (vvalue)
64745 vfree(vvalue);
64746@@ -377,7 +403,7 @@ retry:
64747 return error;
64748 error = mnt_want_write(path.mnt);
64749 if (!error) {
64750- error = setxattr(path.dentry, name, value, size, flags);
64751+ error = setxattr(&path, name, value, size, flags);
64752 mnt_drop_write(path.mnt);
64753 }
64754 path_put(&path);
64755@@ -401,7 +427,7 @@ retry:
64756 return error;
64757 error = mnt_want_write(path.mnt);
64758 if (!error) {
64759- error = setxattr(path.dentry, name, value, size, flags);
64760+ error = setxattr(&path, name, value, size, flags);
64761 mnt_drop_write(path.mnt);
64762 }
64763 path_put(&path);
64764@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
64765 const void __user *,value, size_t, size, int, flags)
64766 {
64767 struct fd f = fdget(fd);
64768- struct dentry *dentry;
64769 int error = -EBADF;
64770
64771 if (!f.file)
64772 return error;
64773- dentry = f.file->f_path.dentry;
64774- audit_inode(NULL, dentry, 0);
64775+ audit_inode(NULL, f.file->f_path.dentry, 0);
64776 error = mnt_want_write_file(f.file);
64777 if (!error) {
64778- error = setxattr(dentry, name, value, size, flags);
64779+ error = setxattr(&f.file->f_path, name, value, size, flags);
64780 mnt_drop_write_file(f.file);
64781 }
64782 fdput(f);
64783@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
64784 * Extended attribute REMOVE operations
64785 */
64786 static long
64787-removexattr(struct dentry *d, const char __user *name)
64788+removexattr(struct path *path, const char __user *name)
64789 {
64790 int error;
64791 char kname[XATTR_NAME_MAX + 1];
64792@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
64793 if (error < 0)
64794 return error;
64795
64796- return vfs_removexattr(d, kname);
64797+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
64798+ return -EACCES;
64799+
64800+ return vfs_removexattr(path->dentry, kname);
64801 }
64802
64803 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
64804@@ -652,7 +679,7 @@ retry:
64805 return error;
64806 error = mnt_want_write(path.mnt);
64807 if (!error) {
64808- error = removexattr(path.dentry, name);
64809+ error = removexattr(&path, name);
64810 mnt_drop_write(path.mnt);
64811 }
64812 path_put(&path);
64813@@ -675,7 +702,7 @@ retry:
64814 return error;
64815 error = mnt_want_write(path.mnt);
64816 if (!error) {
64817- error = removexattr(path.dentry, name);
64818+ error = removexattr(&path, name);
64819 mnt_drop_write(path.mnt);
64820 }
64821 path_put(&path);
64822@@ -689,16 +716,16 @@ retry:
64823 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
64824 {
64825 struct fd f = fdget(fd);
64826- struct dentry *dentry;
64827+ struct path *path;
64828 int error = -EBADF;
64829
64830 if (!f.file)
64831 return error;
64832- dentry = f.file->f_path.dentry;
64833- audit_inode(NULL, dentry, 0);
64834+ path = &f.file->f_path;
64835+ audit_inode(NULL, path->dentry, 0);
64836 error = mnt_want_write_file(f.file);
64837 if (!error) {
64838- error = removexattr(dentry, name);
64839+ error = removexattr(path, name);
64840 mnt_drop_write_file(f.file);
64841 }
64842 fdput(f);
64843diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
64844index 9fbea87..417b3c2 100644
64845--- a/fs/xattr_acl.c
64846+++ b/fs/xattr_acl.c
64847@@ -10,6 +10,7 @@
64848 #include <linux/posix_acl_xattr.h>
64849 #include <linux/gfp.h>
64850 #include <linux/user_namespace.h>
64851+#include <linux/grsecurity.h>
64852
64853 /*
64854 * Fix up the uids and gids in posix acl extended attributes in place.
64855@@ -76,11 +77,12 @@ struct posix_acl *
64856 posix_acl_from_xattr(struct user_namespace *user_ns,
64857 const void *value, size_t size)
64858 {
64859- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
64860- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
64861+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
64862+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
64863 int count;
64864 struct posix_acl *acl;
64865 struct posix_acl_entry *acl_e;
64866+ umode_t umask = gr_acl_umask();
64867
64868 if (!value)
64869 return NULL;
64870@@ -106,12 +108,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
64871
64872 switch(acl_e->e_tag) {
64873 case ACL_USER_OBJ:
64874+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
64875+ break;
64876 case ACL_GROUP_OBJ:
64877 case ACL_MASK:
64878+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
64879+ break;
64880 case ACL_OTHER:
64881+ acl_e->e_perm &= ~(umask & S_IRWXO);
64882 break;
64883
64884 case ACL_USER:
64885+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
64886 acl_e->e_uid =
64887 make_kuid(user_ns,
64888 le32_to_cpu(entry->e_id));
64889@@ -119,6 +127,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
64890 goto fail;
64891 break;
64892 case ACL_GROUP:
64893+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
64894 acl_e->e_gid =
64895 make_kgid(user_ns,
64896 le32_to_cpu(entry->e_id));
64897diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
64898index 3b2c14b..de031fe 100644
64899--- a/fs/xfs/xfs_bmap.c
64900+++ b/fs/xfs/xfs_bmap.c
64901@@ -584,7 +584,7 @@ xfs_bmap_validate_ret(
64902
64903 #else
64904 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
64905-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
64906+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
64907 #endif /* DEBUG */
64908
64909 /*
64910diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
64911index c4e50c6..8ba93e3 100644
64912--- a/fs/xfs/xfs_dir2_readdir.c
64913+++ b/fs/xfs/xfs_dir2_readdir.c
64914@@ -160,7 +160,12 @@ xfs_dir2_sf_getdents(
64915 ino = dp->d_ops->sf_get_ino(sfp, sfep);
64916 filetype = dp->d_ops->sf_get_ftype(sfep);
64917 ctx->pos = off & 0x7fffffff;
64918- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
64919+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
64920+ char name[sfep->namelen];
64921+ memcpy(name, sfep->name, sfep->namelen);
64922+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(mp, filetype)))
64923+ return 0;
64924+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
64925 xfs_dir3_get_dtype(mp, filetype)))
64926 return 0;
64927 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
64928diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
64929index 33ad9a7..82c18ba 100644
64930--- a/fs/xfs/xfs_ioctl.c
64931+++ b/fs/xfs/xfs_ioctl.c
64932@@ -126,7 +126,7 @@ xfs_find_handle(
64933 }
64934
64935 error = -EFAULT;
64936- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
64937+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
64938 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
64939 goto out_put;
64940
64941diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
64942index 104455b..764c512 100644
64943--- a/fs/xfs/xfs_iops.c
64944+++ b/fs/xfs/xfs_iops.c
64945@@ -397,7 +397,7 @@ xfs_vn_put_link(
64946 struct nameidata *nd,
64947 void *p)
64948 {
64949- char *s = nd_get_link(nd);
64950+ const char *s = nd_get_link(nd);
64951
64952 if (!IS_ERR(s))
64953 kfree(s);
64954diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
64955new file mode 100644
64956index 0000000..031e895
64957--- /dev/null
64958+++ b/grsecurity/Kconfig
64959@@ -0,0 +1,1157 @@
64960+#
64961+# grecurity configuration
64962+#
64963+menu "Memory Protections"
64964+depends on GRKERNSEC
64965+
64966+config GRKERNSEC_KMEM
64967+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
64968+ default y if GRKERNSEC_CONFIG_AUTO
64969+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
64970+ help
64971+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
64972+ be written to or read from to modify or leak the contents of the running
64973+ kernel. /dev/port will also not be allowed to be opened, and support
64974+ for /dev/cpu/*/msr and kexec will be removed. If you have module
64975+ support disabled, enabling this will close up six ways that are
64976+ currently used to insert malicious code into the running kernel.
64977+
64978+ Even with this feature enabled, we still highly recommend that
64979+ you use the RBAC system, as it is still possible for an attacker to
64980+ modify the running kernel through other more obscure methods.
64981+
64982+ Enabling this feature will prevent the "cpupower" and "powertop" tools
64983+ from working.
64984+
64985+ It is highly recommended that you say Y here if you meet all the
64986+ conditions above.
64987+
64988+config GRKERNSEC_VM86
64989+ bool "Restrict VM86 mode"
64990+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
64991+ depends on X86_32
64992+
64993+ help
64994+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
64995+ make use of a special execution mode on 32bit x86 processors called
64996+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
64997+ video cards and will still work with this option enabled. The purpose
64998+ of the option is to prevent exploitation of emulation errors in
64999+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
65000+ Nearly all users should be able to enable this option.
65001+
65002+config GRKERNSEC_IO
65003+ bool "Disable privileged I/O"
65004+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
65005+ depends on X86
65006+ select RTC_CLASS
65007+ select RTC_INTF_DEV
65008+ select RTC_DRV_CMOS
65009+
65010+ help
65011+ If you say Y here, all ioperm and iopl calls will return an error.
65012+ Ioperm and iopl can be used to modify the running kernel.
65013+ Unfortunately, some programs need this access to operate properly,
65014+ the most notable of which are XFree86 and hwclock. hwclock can be
65015+ remedied by having RTC support in the kernel, so real-time
65016+ clock support is enabled if this option is enabled, to ensure
65017+ that hwclock operates correctly.
65018+
65019+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
65020+ you may not be able to boot into a graphical environment with this
65021+ option enabled. In this case, you should use the RBAC system instead.
65022+
65023+config GRKERNSEC_JIT_HARDEN
65024+ bool "Harden BPF JIT against spray attacks"
65025+ default y if GRKERNSEC_CONFIG_AUTO
65026+ depends on BPF_JIT && X86
65027+ help
65028+ If you say Y here, the native code generated by the kernel's Berkeley
65029+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
65030+ attacks that attempt to fit attacker-beneficial instructions in
65031+ 32bit immediate fields of JIT-generated native instructions. The
65032+ attacker will generally aim to cause an unintended instruction sequence
65033+ of JIT-generated native code to execute by jumping into the middle of
65034+ a generated instruction. This feature effectively randomizes the 32bit
65035+ immediate constants present in the generated code to thwart such attacks.
65036+
65037+ If you're using KERNEXEC, it's recommended that you enable this option
65038+ to supplement the hardening of the kernel.
65039+
65040+config GRKERNSEC_PERF_HARDEN
65041+ bool "Disable unprivileged PERF_EVENTS usage by default"
65042+ default y if GRKERNSEC_CONFIG_AUTO
65043+ depends on PERF_EVENTS
65044+ help
65045+ If you say Y here, the range of acceptable values for the
65046+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
65047+ default to a new value: 3. When the sysctl is set to this value, no
65048+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
65049+
65050+ Though PERF_EVENTS can be used legitimately for performance monitoring
65051+ and low-level application profiling, it is forced on regardless of
65052+ configuration, has been at fault for several vulnerabilities, and
65053+ creates new opportunities for side channels and other information leaks.
65054+
65055+ This feature puts PERF_EVENTS into a secure default state and permits
65056+ the administrator to change out of it temporarily if unprivileged
65057+ application profiling is needed.
65058+
65059+config GRKERNSEC_RAND_THREADSTACK
65060+ bool "Insert random gaps between thread stacks"
65061+ default y if GRKERNSEC_CONFIG_AUTO
65062+ depends on PAX_RANDMMAP && !PPC
65063+ help
65064+ If you say Y here, a random-sized gap will be enforced between allocated
65065+ thread stacks. Glibc's NPTL and other threading libraries that
65066+ pass MAP_STACK to the kernel for thread stack allocation are supported.
65067+ The implementation currently provides 8 bits of entropy for the gap.
65068+
65069+ Many distributions do not compile threaded remote services with the
65070+ -fstack-check argument to GCC, causing the variable-sized stack-based
65071+ allocator, alloca(), to not probe the stack on allocation. This
65072+ permits an unbounded alloca() to skip over any guard page and potentially
65073+ modify another thread's stack reliably. An enforced random gap
65074+ reduces the reliability of such an attack and increases the chance
65075+ that such a read/write to another thread's stack instead lands in
65076+ an unmapped area, causing a crash and triggering grsecurity's
65077+ anti-bruteforcing logic.
65078+
65079+config GRKERNSEC_PROC_MEMMAP
65080+ bool "Harden ASLR against information leaks and entropy reduction"
65081+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
65082+ depends on PAX_NOEXEC || PAX_ASLR
65083+ help
65084+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
65085+ give no information about the addresses of its mappings if
65086+ PaX features that rely on random addresses are enabled on the task.
65087+ In addition to sanitizing this information and disabling other
65088+ dangerous sources of information, this option causes reads of sensitive
65089+ /proc/<pid> entries where the file descriptor was opened in a different
65090+ task than the one performing the read. Such attempts are logged.
65091+ This option also limits argv/env strings for suid/sgid binaries
65092+ to 512KB to prevent a complete exhaustion of the stack entropy provided
65093+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
65094+ binaries to prevent alternative mmap layouts from being abused.
65095+
65096+ If you use PaX it is essential that you say Y here as it closes up
65097+ several holes that make full ASLR useless locally.
65098+
65099+config GRKERNSEC_BRUTE
65100+ bool "Deter exploit bruteforcing"
65101+ default y if GRKERNSEC_CONFIG_AUTO
65102+ help
65103+ If you say Y here, attempts to bruteforce exploits against forking
65104+ daemons such as apache or sshd, as well as against suid/sgid binaries
65105+ will be deterred. When a child of a forking daemon is killed by PaX
65106+ or crashes due to an illegal instruction or other suspicious signal,
65107+ the parent process will be delayed 30 seconds upon every subsequent
65108+ fork until the administrator is able to assess the situation and
65109+ restart the daemon.
65110+ In the suid/sgid case, the attempt is logged, the user has all their
65111+ existing instances of the suid/sgid binary terminated and will
65112+ be unable to execute any suid/sgid binaries for 15 minutes.
65113+
65114+ It is recommended that you also enable signal logging in the auditing
65115+ section so that logs are generated when a process triggers a suspicious
65116+ signal.
65117+ If the sysctl option is enabled, a sysctl option with name
65118+ "deter_bruteforce" is created.
65119+
65120+config GRKERNSEC_MODHARDEN
65121+ bool "Harden module auto-loading"
65122+ default y if GRKERNSEC_CONFIG_AUTO
65123+ depends on MODULES
65124+ help
65125+ If you say Y here, module auto-loading in response to use of some
65126+ feature implemented by an unloaded module will be restricted to
65127+ root users. Enabling this option helps defend against attacks
65128+ by unprivileged users who abuse the auto-loading behavior to
65129+ cause a vulnerable module to load that is then exploited.
65130+
65131+ If this option prevents a legitimate use of auto-loading for a
65132+ non-root user, the administrator can execute modprobe manually
65133+ with the exact name of the module mentioned in the alert log.
65134+ Alternatively, the administrator can add the module to the list
65135+ of modules loaded at boot by modifying init scripts.
65136+
65137+ Modification of init scripts will most likely be needed on
65138+ Ubuntu servers with encrypted home directory support enabled,
65139+ as the first non-root user logging in will cause the ecb(aes),
65140+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
65141+
65142+config GRKERNSEC_HIDESYM
65143+ bool "Hide kernel symbols"
65144+ default y if GRKERNSEC_CONFIG_AUTO
65145+ select PAX_USERCOPY_SLABS
65146+ help
65147+ If you say Y here, getting information on loaded modules, and
65148+ displaying all kernel symbols through a syscall will be restricted
65149+ to users with CAP_SYS_MODULE. For software compatibility reasons,
65150+ /proc/kallsyms will be restricted to the root user. The RBAC
65151+ system can hide that entry even from root.
65152+
65153+ This option also prevents leaking of kernel addresses through
65154+ several /proc entries.
65155+
65156+ Note that this option is only effective provided the following
65157+ conditions are met:
65158+ 1) The kernel using grsecurity is not precompiled by some distribution
65159+ 2) You have also enabled GRKERNSEC_DMESG
65160+ 3) You are using the RBAC system and hiding other files such as your
65161+ kernel image and System.map. Alternatively, enabling this option
65162+ causes the permissions on /boot, /lib/modules, and the kernel
65163+ source directory to change at compile time to prevent
65164+ reading by non-root users.
65165+ If the above conditions are met, this option will aid in providing a
65166+ useful protection against local kernel exploitation of overflows
65167+ and arbitrary read/write vulnerabilities.
65168+
65169+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
65170+ in addition to this feature.
65171+
65172+config GRKERNSEC_RANDSTRUCT
65173+ bool "Randomize layout of sensitive kernel structures"
65174+ default y if GRKERNSEC_CONFIG_AUTO
65175+ select GRKERNSEC_HIDESYM
65176+ select MODVERSIONS if MODULES
65177+ help
65178+ If you say Y here, the layouts of a number of sensitive kernel
65179+ structures (task, fs, cred, etc) and all structures composed entirely
65180+ of function pointers (aka "ops" structs) will be randomized at compile-time.
65181+ This can introduce the requirement of an additional infoleak
65182+ vulnerability for exploits targeting these structure types.
65183+
65184+ Enabling this feature will introduce some performance impact, slightly
65185+ increase memory usage, and prevent the use of forensic tools like
65186+ Volatility against the system (unless the kernel source tree isn't
65187+ cleaned after kernel installation).
65188+
65189+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
65190+ It remains after a make clean to allow for external modules to be compiled
65191+ with the existing seed and will be removed by a make mrproper or
65192+ make distclean.
65193+
65194+ Note that the implementation requires gcc 4.6.4. or newer. You may need
65195+ to install the supporting headers explicitly in addition to the normal
65196+ gcc package.
65197+
65198+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
65199+ bool "Use cacheline-aware structure randomization"
65200+ depends on GRKERNSEC_RANDSTRUCT
65201+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
65202+ help
65203+ If you say Y here, the RANDSTRUCT randomization will make a best effort
65204+ at restricting randomization to cacheline-sized groups of elements. It
65205+ will further not randomize bitfields in structures. This reduces the
65206+ performance hit of RANDSTRUCT at the cost of weakened randomization.
65207+
65208+config GRKERNSEC_KERN_LOCKOUT
65209+ bool "Active kernel exploit response"
65210+ default y if GRKERNSEC_CONFIG_AUTO
65211+ depends on X86 || ARM || PPC || SPARC
65212+ help
65213+ If you say Y here, when a PaX alert is triggered due to suspicious
65214+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
65215+ or an OOPS occurs due to bad memory accesses, instead of just
65216+ terminating the offending process (and potentially allowing
65217+ a subsequent exploit from the same user), we will take one of two
65218+ actions:
65219+ If the user was root, we will panic the system
65220+ If the user was non-root, we will log the attempt, terminate
65221+ all processes owned by the user, then prevent them from creating
65222+ any new processes until the system is restarted
65223+ This deters repeated kernel exploitation/bruteforcing attempts
65224+ and is useful for later forensics.
65225+
65226+config GRKERNSEC_OLD_ARM_USERLAND
65227+ bool "Old ARM userland compatibility"
65228+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
65229+ help
65230+ If you say Y here, stubs of executable code to perform such operations
65231+ as "compare-exchange" will be placed at fixed locations in the ARM vector
65232+ table. This is unfortunately needed for old ARM userland meant to run
65233+ across a wide range of processors. Without this option enabled,
65234+ the get_tls and data memory barrier stubs will be emulated by the kernel,
65235+ which is enough for Linaro userlands or other userlands designed for v6
65236+ and newer ARM CPUs. It's recommended that you try without this option enabled
65237+ first, and only enable it if your userland does not boot (it will likely fail
65238+ at init time).
65239+
65240+endmenu
65241+menu "Role Based Access Control Options"
65242+depends on GRKERNSEC
65243+
65244+config GRKERNSEC_RBAC_DEBUG
65245+ bool
65246+
65247+config GRKERNSEC_NO_RBAC
65248+ bool "Disable RBAC system"
65249+ help
65250+ If you say Y here, the /dev/grsec device will be removed from the kernel,
65251+ preventing the RBAC system from being enabled. You should only say Y
65252+ here if you have no intention of using the RBAC system, so as to prevent
65253+ an attacker with root access from misusing the RBAC system to hide files
65254+ and processes when loadable module support and /dev/[k]mem have been
65255+ locked down.
65256+
65257+config GRKERNSEC_ACL_HIDEKERN
65258+ bool "Hide kernel processes"
65259+ help
65260+ If you say Y here, all kernel threads will be hidden to all
65261+ processes but those whose subject has the "view hidden processes"
65262+ flag.
65263+
65264+config GRKERNSEC_ACL_MAXTRIES
65265+ int "Maximum tries before password lockout"
65266+ default 3
65267+ help
65268+ This option enforces the maximum number of times a user can attempt
65269+ to authorize themselves with the grsecurity RBAC system before being
65270+ denied the ability to attempt authorization again for a specified time.
65271+ The lower the number, the harder it will be to brute-force a password.
65272+
65273+config GRKERNSEC_ACL_TIMEOUT
65274+ int "Time to wait after max password tries, in seconds"
65275+ default 30
65276+ help
65277+ This option specifies the time the user must wait after attempting to
65278+ authorize to the RBAC system with the maximum number of invalid
65279+ passwords. The higher the number, the harder it will be to brute-force
65280+ a password.
65281+
65282+endmenu
65283+menu "Filesystem Protections"
65284+depends on GRKERNSEC
65285+
65286+config GRKERNSEC_PROC
65287+ bool "Proc restrictions"
65288+ default y if GRKERNSEC_CONFIG_AUTO
65289+ help
65290+ If you say Y here, the permissions of the /proc filesystem
65291+ will be altered to enhance system security and privacy. You MUST
65292+ choose either a user only restriction or a user and group restriction.
65293+ Depending upon the option you choose, you can either restrict users to
65294+ see only the processes they themselves run, or choose a group that can
65295+ view all processes and files normally restricted to root if you choose
65296+ the "restrict to user only" option. NOTE: If you're running identd or
65297+ ntpd as a non-root user, you will have to run it as the group you
65298+ specify here.
65299+
65300+config GRKERNSEC_PROC_USER
65301+ bool "Restrict /proc to user only"
65302+ depends on GRKERNSEC_PROC
65303+ help
65304+ If you say Y here, non-root users will only be able to view their own
65305+ processes, and restricts them from viewing network-related information,
65306+ and viewing kernel symbol and module information.
65307+
65308+config GRKERNSEC_PROC_USERGROUP
65309+ bool "Allow special group"
65310+ default y if GRKERNSEC_CONFIG_AUTO
65311+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
65312+ help
65313+ If you say Y here, you will be able to select a group that will be
65314+ able to view all processes and network-related information. If you've
65315+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
65316+ remain hidden. This option is useful if you want to run identd as
65317+ a non-root user. The group you select may also be chosen at boot time
65318+ via "grsec_proc_gid=" on the kernel commandline.
65319+
65320+config GRKERNSEC_PROC_GID
65321+ int "GID for special group"
65322+ depends on GRKERNSEC_PROC_USERGROUP
65323+ default 1001
65324+
65325+config GRKERNSEC_PROC_ADD
65326+ bool "Additional restrictions"
65327+ default y if GRKERNSEC_CONFIG_AUTO
65328+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
65329+ help
65330+ If you say Y here, additional restrictions will be placed on
65331+ /proc that keep normal users from viewing device information and
65332+ slabinfo information that could be useful for exploits.
65333+
65334+config GRKERNSEC_LINK
65335+ bool "Linking restrictions"
65336+ default y if GRKERNSEC_CONFIG_AUTO
65337+ help
65338+ If you say Y here, /tmp race exploits will be prevented, since users
65339+ will no longer be able to follow symlinks owned by other users in
65340+ world-writable +t directories (e.g. /tmp), unless the owner of the
65341+ symlink is the owner of the directory. users will also not be
65342+ able to hardlink to files they do not own. If the sysctl option is
65343+ enabled, a sysctl option with name "linking_restrictions" is created.
65344+
65345+config GRKERNSEC_SYMLINKOWN
65346+ bool "Kernel-enforced SymlinksIfOwnerMatch"
65347+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
65348+ help
65349+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
65350+ that prevents it from being used as a security feature. As Apache
65351+ verifies the symlink by performing a stat() against the target of
65352+ the symlink before it is followed, an attacker can setup a symlink
65353+ to point to a same-owned file, then replace the symlink with one
65354+ that targets another user's file just after Apache "validates" the
65355+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
65356+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
65357+ will be in place for the group you specify. If the sysctl option
65358+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
65359+ created.
65360+
65361+config GRKERNSEC_SYMLINKOWN_GID
65362+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
65363+ depends on GRKERNSEC_SYMLINKOWN
65364+ default 1006
65365+ help
65366+ Setting this GID determines what group kernel-enforced
65367+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
65368+ is enabled, a sysctl option with name "symlinkown_gid" is created.
65369+
65370+config GRKERNSEC_FIFO
65371+ bool "FIFO restrictions"
65372+ default y if GRKERNSEC_CONFIG_AUTO
65373+ help
65374+ If you say Y here, users will not be able to write to FIFOs they don't
65375+ own in world-writable +t directories (e.g. /tmp), unless the owner of
65376+ the FIFO is the same owner of the directory it's held in. If the sysctl
65377+ option is enabled, a sysctl option with name "fifo_restrictions" is
65378+ created.
65379+
65380+config GRKERNSEC_SYSFS_RESTRICT
65381+ bool "Sysfs/debugfs restriction"
65382+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
65383+ depends on SYSFS
65384+ help
65385+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
65386+ any filesystem normally mounted under it (e.g. debugfs) will be
65387+ mostly accessible only by root. These filesystems generally provide access
65388+ to hardware and debug information that isn't appropriate for unprivileged
65389+ users of the system. Sysfs and debugfs have also become a large source
65390+ of new vulnerabilities, ranging from infoleaks to local compromise.
65391+ There has been very little oversight with an eye toward security involved
65392+ in adding new exporters of information to these filesystems, so their
65393+ use is discouraged.
65394+ For reasons of compatibility, a few directories have been whitelisted
65395+ for access by non-root users:
65396+ /sys/fs/selinux
65397+ /sys/fs/fuse
65398+ /sys/devices/system/cpu
65399+
65400+config GRKERNSEC_ROFS
65401+ bool "Runtime read-only mount protection"
65402+ depends on SYSCTL
65403+ help
65404+ If you say Y here, a sysctl option with name "romount_protect" will
65405+ be created. By setting this option to 1 at runtime, filesystems
65406+ will be protected in the following ways:
65407+ * No new writable mounts will be allowed
65408+ * Existing read-only mounts won't be able to be remounted read/write
65409+ * Write operations will be denied on all block devices
65410+ This option acts independently of grsec_lock: once it is set to 1,
65411+ it cannot be turned off. Therefore, please be mindful of the resulting
65412+ behavior if this option is enabled in an init script on a read-only
65413+ filesystem.
65414+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
65415+ and GRKERNSEC_IO should be enabled and module loading disabled via
65416+ config or at runtime.
65417+ This feature is mainly intended for secure embedded systems.
65418+
65419+
65420+config GRKERNSEC_DEVICE_SIDECHANNEL
65421+ bool "Eliminate stat/notify-based device sidechannels"
65422+ default y if GRKERNSEC_CONFIG_AUTO
65423+ help
65424+ If you say Y here, timing analyses on block or character
65425+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
65426+ will be thwarted for unprivileged users. If a process without
65427+ CAP_MKNOD stats such a device, the last access and last modify times
65428+ will match the device's create time. No access or modify events
65429+ will be triggered through inotify/dnotify/fanotify for such devices.
65430+ This feature will prevent attacks that may at a minimum
65431+ allow an attacker to determine the administrator's password length.
65432+
65433+config GRKERNSEC_CHROOT
65434+ bool "Chroot jail restrictions"
65435+ default y if GRKERNSEC_CONFIG_AUTO
65436+ help
65437+ If you say Y here, you will be able to choose several options that will
65438+ make breaking out of a chrooted jail much more difficult. If you
65439+ encounter no software incompatibilities with the following options, it
65440+ is recommended that you enable each one.
65441+
65442+config GRKERNSEC_CHROOT_MOUNT
65443+ bool "Deny mounts"
65444+ default y if GRKERNSEC_CONFIG_AUTO
65445+ depends on GRKERNSEC_CHROOT
65446+ help
65447+ If you say Y here, processes inside a chroot will not be able to
65448+ mount or remount filesystems. If the sysctl option is enabled, a
65449+ sysctl option with name "chroot_deny_mount" is created.
65450+
65451+config GRKERNSEC_CHROOT_DOUBLE
65452+ bool "Deny double-chroots"
65453+ default y if GRKERNSEC_CONFIG_AUTO
65454+ depends on GRKERNSEC_CHROOT
65455+ help
65456+ If you say Y here, processes inside a chroot will not be able to chroot
65457+ again outside the chroot. This is a widely used method of breaking
65458+ out of a chroot jail and should not be allowed. If the sysctl
65459+ option is enabled, a sysctl option with name
65460+ "chroot_deny_chroot" is created.
65461+
65462+config GRKERNSEC_CHROOT_PIVOT
65463+ bool "Deny pivot_root in chroot"
65464+ default y if GRKERNSEC_CONFIG_AUTO
65465+ depends on GRKERNSEC_CHROOT
65466+ help
65467+ If you say Y here, processes inside a chroot will not be able to use
65468+ a function called pivot_root() that was introduced in Linux 2.3.41. It
65469+ works similar to chroot in that it changes the root filesystem. This
65470+ function could be misused in a chrooted process to attempt to break out
65471+ of the chroot, and therefore should not be allowed. If the sysctl
65472+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
65473+ created.
65474+
65475+config GRKERNSEC_CHROOT_CHDIR
65476+ bool "Enforce chdir(\"/\") on all chroots"
65477+ default y if GRKERNSEC_CONFIG_AUTO
65478+ depends on GRKERNSEC_CHROOT
65479+ help
65480+ If you say Y here, the current working directory of all newly-chrooted
65481+ applications will be set to the the root directory of the chroot.
65482+ The man page on chroot(2) states:
65483+ Note that this call does not change the current working
65484+ directory, so that `.' can be outside the tree rooted at
65485+ `/'. In particular, the super-user can escape from a
65486+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
65487+
65488+ It is recommended that you say Y here, since it's not known to break
65489+ any software. If the sysctl option is enabled, a sysctl option with
65490+ name "chroot_enforce_chdir" is created.
65491+
65492+config GRKERNSEC_CHROOT_CHMOD
65493+ bool "Deny (f)chmod +s"
65494+ default y if GRKERNSEC_CONFIG_AUTO
65495+ depends on GRKERNSEC_CHROOT
65496+ help
65497+ If you say Y here, processes inside a chroot will not be able to chmod
65498+ or fchmod files to make them have suid or sgid bits. This protects
65499+ against another published method of breaking a chroot. If the sysctl
65500+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
65501+ created.
65502+
65503+config GRKERNSEC_CHROOT_FCHDIR
65504+ bool "Deny fchdir out of chroot"
65505+ default y if GRKERNSEC_CONFIG_AUTO
65506+ depends on GRKERNSEC_CHROOT
65507+ help
65508+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
65509+ to a file descriptor of the chrooting process that points to a directory
65510+ outside the filesystem will be stopped. If the sysctl option
65511+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
65512+
65513+config GRKERNSEC_CHROOT_MKNOD
65514+ bool "Deny mknod"
65515+ default y if GRKERNSEC_CONFIG_AUTO
65516+ depends on GRKERNSEC_CHROOT
65517+ help
65518+ If you say Y here, processes inside a chroot will not be allowed to
65519+ mknod. The problem with using mknod inside a chroot is that it
65520+ would allow an attacker to create a device entry that is the same
65521+ as one on the physical root of your system, which could range from
65522+ anything from the console device to a device for your harddrive (which
65523+ they could then use to wipe the drive or steal data). It is recommended
65524+ that you say Y here, unless you run into software incompatibilities.
65525+ If the sysctl option is enabled, a sysctl option with name
65526+ "chroot_deny_mknod" is created.
65527+
65528+config GRKERNSEC_CHROOT_SHMAT
65529+ bool "Deny shmat() out of chroot"
65530+ default y if GRKERNSEC_CONFIG_AUTO
65531+ depends on GRKERNSEC_CHROOT
65532+ help
65533+ If you say Y here, processes inside a chroot will not be able to attach
65534+ to shared memory segments that were created outside of the chroot jail.
65535+ It is recommended that you say Y here. If the sysctl option is enabled,
65536+ a sysctl option with name "chroot_deny_shmat" is created.
65537+
65538+config GRKERNSEC_CHROOT_UNIX
65539+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
65540+ default y if GRKERNSEC_CONFIG_AUTO
65541+ depends on GRKERNSEC_CHROOT
65542+ help
65543+ If you say Y here, processes inside a chroot will not be able to
65544+ connect to abstract (meaning not belonging to a filesystem) Unix
65545+ domain sockets that were bound outside of a chroot. It is recommended
65546+ that you say Y here. If the sysctl option is enabled, a sysctl option
65547+ with name "chroot_deny_unix" is created.
65548+
65549+config GRKERNSEC_CHROOT_FINDTASK
65550+ bool "Protect outside processes"
65551+ default y if GRKERNSEC_CONFIG_AUTO
65552+ depends on GRKERNSEC_CHROOT
65553+ help
65554+ If you say Y here, processes inside a chroot will not be able to
65555+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
65556+ getsid, or view any process outside of the chroot. If the sysctl
65557+ option is enabled, a sysctl option with name "chroot_findtask" is
65558+ created.
65559+
65560+config GRKERNSEC_CHROOT_NICE
65561+ bool "Restrict priority changes"
65562+ default y if GRKERNSEC_CONFIG_AUTO
65563+ depends on GRKERNSEC_CHROOT
65564+ help
65565+ If you say Y here, processes inside a chroot will not be able to raise
65566+ the priority of processes in the chroot, or alter the priority of
65567+ processes outside the chroot. This provides more security than simply
65568+ removing CAP_SYS_NICE from the process' capability set. If the
65569+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
65570+ is created.
65571+
65572+config GRKERNSEC_CHROOT_SYSCTL
65573+ bool "Deny sysctl writes"
65574+ default y if GRKERNSEC_CONFIG_AUTO
65575+ depends on GRKERNSEC_CHROOT
65576+ help
65577+ If you say Y here, an attacker in a chroot will not be able to
65578+ write to sysctl entries, either by sysctl(2) or through a /proc
65579+ interface. It is strongly recommended that you say Y here. If the
65580+ sysctl option is enabled, a sysctl option with name
65581+ "chroot_deny_sysctl" is created.
65582+
65583+config GRKERNSEC_CHROOT_CAPS
65584+ bool "Capability restrictions"
65585+ default y if GRKERNSEC_CONFIG_AUTO
65586+ depends on GRKERNSEC_CHROOT
65587+ help
65588+ If you say Y here, the capabilities on all processes within a
65589+ chroot jail will be lowered to stop module insertion, raw i/o,
65590+ system and net admin tasks, rebooting the system, modifying immutable
65591+ files, modifying IPC owned by another, and changing the system time.
65592+ This is left an option because it can break some apps. Disable this
65593+ if your chrooted apps are having problems performing those kinds of
65594+ tasks. If the sysctl option is enabled, a sysctl option with
65595+ name "chroot_caps" is created.
65596+
65597+config GRKERNSEC_CHROOT_INITRD
65598+ bool "Exempt initrd tasks from restrictions"
65599+ default y if GRKERNSEC_CONFIG_AUTO
65600+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
65601+ help
65602+ If you say Y here, tasks started prior to init will be exempted from
65603+ grsecurity's chroot restrictions. This option is mainly meant to
65604+ resolve Plymouth's performing privileged operations unnecessarily
65605+ in a chroot.
65606+
65607+endmenu
65608+menu "Kernel Auditing"
65609+depends on GRKERNSEC
65610+
65611+config GRKERNSEC_AUDIT_GROUP
65612+ bool "Single group for auditing"
65613+ help
65614+ If you say Y here, the exec and chdir logging features will only operate
65615+ on a group you specify. This option is recommended if you only want to
65616+ watch certain users instead of having a large amount of logs from the
65617+ entire system. If the sysctl option is enabled, a sysctl option with
65618+ name "audit_group" is created.
65619+
65620+config GRKERNSEC_AUDIT_GID
65621+ int "GID for auditing"
65622+ depends on GRKERNSEC_AUDIT_GROUP
65623+ default 1007
65624+
65625+config GRKERNSEC_EXECLOG
65626+ bool "Exec logging"
65627+ help
65628+ If you say Y here, all execve() calls will be logged (since the
65629+ other exec*() calls are frontends to execve(), all execution
65630+ will be logged). Useful for shell-servers that like to keep track
65631+ of their users. If the sysctl option is enabled, a sysctl option with
65632+ name "exec_logging" is created.
65633+ WARNING: This option when enabled will produce a LOT of logs, especially
65634+ on an active system.
65635+
65636+config GRKERNSEC_RESLOG
65637+ bool "Resource logging"
65638+ default y if GRKERNSEC_CONFIG_AUTO
65639+ help
65640+ If you say Y here, all attempts to overstep resource limits will
65641+ be logged with the resource name, the requested size, and the current
65642+ limit. It is highly recommended that you say Y here. If the sysctl
65643+ option is enabled, a sysctl option with name "resource_logging" is
65644+ created. If the RBAC system is enabled, the sysctl value is ignored.
65645+
65646+config GRKERNSEC_CHROOT_EXECLOG
65647+ bool "Log execs within chroot"
65648+ help
65649+ If you say Y here, all executions inside a chroot jail will be logged
65650+ to syslog. This can cause a large amount of logs if certain
65651+ applications (eg. djb's daemontools) are installed on the system, and
65652+ is therefore left as an option. If the sysctl option is enabled, a
65653+ sysctl option with name "chroot_execlog" is created.
65654+
65655+config GRKERNSEC_AUDIT_PTRACE
65656+ bool "Ptrace logging"
65657+ help
65658+ If you say Y here, all attempts to attach to a process via ptrace
65659+ will be logged. If the sysctl option is enabled, a sysctl option
65660+ with name "audit_ptrace" is created.
65661+
65662+config GRKERNSEC_AUDIT_CHDIR
65663+ bool "Chdir logging"
65664+ help
65665+ If you say Y here, all chdir() calls will be logged. If the sysctl
65666+ option is enabled, a sysctl option with name "audit_chdir" is created.
65667+
65668+config GRKERNSEC_AUDIT_MOUNT
65669+ bool "(Un)Mount logging"
65670+ help
65671+ If you say Y here, all mounts and unmounts will be logged. If the
65672+ sysctl option is enabled, a sysctl option with name "audit_mount" is
65673+ created.
65674+
65675+config GRKERNSEC_SIGNAL
65676+ bool "Signal logging"
65677+ default y if GRKERNSEC_CONFIG_AUTO
65678+ help
65679+ If you say Y here, certain important signals will be logged, such as
65680+ SIGSEGV, which will as a result inform you of when a error in a program
65681+ occurred, which in some cases could mean a possible exploit attempt.
65682+ If the sysctl option is enabled, a sysctl option with name
65683+ "signal_logging" is created.
65684+
65685+config GRKERNSEC_FORKFAIL
65686+ bool "Fork failure logging"
65687+ help
65688+ If you say Y here, all failed fork() attempts will be logged.
65689+ This could suggest a fork bomb, or someone attempting to overstep
65690+ their process limit. If the sysctl option is enabled, a sysctl option
65691+ with name "forkfail_logging" is created.
65692+
65693+config GRKERNSEC_TIME
65694+ bool "Time change logging"
65695+ default y if GRKERNSEC_CONFIG_AUTO
65696+ help
65697+ If you say Y here, any changes of the system clock will be logged.
65698+ If the sysctl option is enabled, a sysctl option with name
65699+ "timechange_logging" is created.
65700+
65701+config GRKERNSEC_PROC_IPADDR
65702+ bool "/proc/<pid>/ipaddr support"
65703+ default y if GRKERNSEC_CONFIG_AUTO
65704+ help
65705+ If you say Y here, a new entry will be added to each /proc/<pid>
65706+ directory that contains the IP address of the person using the task.
65707+ The IP is carried across local TCP and AF_UNIX stream sockets.
65708+ This information can be useful for IDS/IPSes to perform remote response
65709+ to a local attack. The entry is readable by only the owner of the
65710+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
65711+ the RBAC system), and thus does not create privacy concerns.
65712+
65713+config GRKERNSEC_RWXMAP_LOG
65714+ bool 'Denied RWX mmap/mprotect logging'
65715+ default y if GRKERNSEC_CONFIG_AUTO
65716+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
65717+ help
65718+ If you say Y here, calls to mmap() and mprotect() with explicit
65719+ usage of PROT_WRITE and PROT_EXEC together will be logged when
65720+ denied by the PAX_MPROTECT feature. This feature will also
65721+ log other problematic scenarios that can occur when PAX_MPROTECT
65722+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
65723+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
65724+ is created.
65725+
65726+endmenu
65727+
65728+menu "Executable Protections"
65729+depends on GRKERNSEC
65730+
65731+config GRKERNSEC_DMESG
65732+ bool "Dmesg(8) restriction"
65733+ default y if GRKERNSEC_CONFIG_AUTO
65734+ help
65735+ If you say Y here, non-root users will not be able to use dmesg(8)
65736+ to view the contents of the kernel's circular log buffer.
65737+ The kernel's log buffer often contains kernel addresses and other
65738+ identifying information useful to an attacker in fingerprinting a
65739+ system for a targeted exploit.
65740+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
65741+ created.
65742+
65743+config GRKERNSEC_HARDEN_PTRACE
65744+ bool "Deter ptrace-based process snooping"
65745+ default y if GRKERNSEC_CONFIG_AUTO
65746+ help
65747+ If you say Y here, TTY sniffers and other malicious monitoring
65748+ programs implemented through ptrace will be defeated. If you
65749+ have been using the RBAC system, this option has already been
65750+ enabled for several years for all users, with the ability to make
65751+ fine-grained exceptions.
65752+
65753+ This option only affects the ability of non-root users to ptrace
65754+ processes that are not a descendent of the ptracing process.
65755+ This means that strace ./binary and gdb ./binary will still work,
65756+ but attaching to arbitrary processes will not. If the sysctl
65757+ option is enabled, a sysctl option with name "harden_ptrace" is
65758+ created.
65759+
65760+config GRKERNSEC_PTRACE_READEXEC
65761+ bool "Require read access to ptrace sensitive binaries"
65762+ default y if GRKERNSEC_CONFIG_AUTO
65763+ help
65764+ If you say Y here, unprivileged users will not be able to ptrace unreadable
65765+ binaries. This option is useful in environments that
65766+ remove the read bits (e.g. file mode 4711) from suid binaries to
65767+ prevent infoleaking of their contents. This option adds
65768+ consistency to the use of that file mode, as the binary could normally
65769+ be read out when run without privileges while ptracing.
65770+
65771+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
65772+ is created.
65773+
65774+config GRKERNSEC_SETXID
65775+ bool "Enforce consistent multithreaded privileges"
65776+ default y if GRKERNSEC_CONFIG_AUTO
65777+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
65778+ help
65779+ If you say Y here, a change from a root uid to a non-root uid
65780+ in a multithreaded application will cause the resulting uids,
65781+ gids, supplementary groups, and capabilities in that thread
65782+ to be propagated to the other threads of the process. In most
65783+ cases this is unnecessary, as glibc will emulate this behavior
65784+ on behalf of the application. Other libcs do not act in the
65785+ same way, allowing the other threads of the process to continue
65786+ running with root privileges. If the sysctl option is enabled,
65787+ a sysctl option with name "consistent_setxid" is created.
65788+
65789+config GRKERNSEC_HARDEN_IPC
65790+ bool "Disallow access to overly-permissive IPC objects"
65791+ default y if GRKERNSEC_CONFIG_AUTO
65792+ depends on SYSVIPC
65793+ help
65794+ If you say Y here, access to overly-permissive IPC objects (shared
65795+ memory, message queues, and semaphores) will be denied for processes
65796+ given the following criteria beyond normal permission checks:
65797+ 1) If the IPC object is world-accessible and the euid doesn't match
65798+ that of the creator or current uid for the IPC object
65799+ 2) If the IPC object is group-accessible and the egid doesn't
65800+ match that of the creator or current gid for the IPC object
65801+ It's a common error to grant too much permission to these objects,
65802+ with impact ranging from denial of service and information leaking to
65803+ privilege escalation. This feature was developed in response to
65804+ research by Tim Brown:
65805+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
65806+ who found hundreds of such insecure usages. Processes with
65807+ CAP_IPC_OWNER are still permitted to access these IPC objects.
65808+ If the sysctl option is enabled, a sysctl option with name
65809+ "harden_ipc" is created.
65810+
65811+config GRKERNSEC_TPE
65812+ bool "Trusted Path Execution (TPE)"
65813+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
65814+ help
65815+ If you say Y here, you will be able to choose a gid to add to the
65816+ supplementary groups of users you want to mark as "untrusted."
65817+ These users will not be able to execute any files that are not in
65818+ root-owned directories writable only by root. If the sysctl option
65819+ is enabled, a sysctl option with name "tpe" is created.
65820+
65821+config GRKERNSEC_TPE_ALL
65822+ bool "Partially restrict all non-root users"
65823+ depends on GRKERNSEC_TPE
65824+ help
65825+ If you say Y here, all non-root users will be covered under
65826+ a weaker TPE restriction. This is separate from, and in addition to,
65827+ the main TPE options that you have selected elsewhere. Thus, if a
65828+ "trusted" GID is chosen, this restriction applies to even that GID.
65829+ Under this restriction, all non-root users will only be allowed to
65830+ execute files in directories they own that are not group or
65831+ world-writable, or in directories owned by root and writable only by
65832+ root. If the sysctl option is enabled, a sysctl option with name
65833+ "tpe_restrict_all" is created.
65834+
65835+config GRKERNSEC_TPE_INVERT
65836+ bool "Invert GID option"
65837+ depends on GRKERNSEC_TPE
65838+ help
65839+ If you say Y here, the group you specify in the TPE configuration will
65840+ decide what group TPE restrictions will be *disabled* for. This
65841+ option is useful if you want TPE restrictions to be applied to most
65842+ users on the system. If the sysctl option is enabled, a sysctl option
65843+ with name "tpe_invert" is created. Unlike other sysctl options, this
65844+ entry will default to on for backward-compatibility.
65845+
65846+config GRKERNSEC_TPE_GID
65847+ int
65848+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
65849+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
65850+
65851+config GRKERNSEC_TPE_UNTRUSTED_GID
65852+ int "GID for TPE-untrusted users"
65853+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
65854+ default 1005
65855+ help
65856+ Setting this GID determines what group TPE restrictions will be
65857+ *enabled* for. If the sysctl option is enabled, a sysctl option
65858+ with name "tpe_gid" is created.
65859+
65860+config GRKERNSEC_TPE_TRUSTED_GID
65861+ int "GID for TPE-trusted users"
65862+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
65863+ default 1005
65864+ help
65865+ Setting this GID determines what group TPE restrictions will be
65866+ *disabled* for. If the sysctl option is enabled, a sysctl option
65867+ with name "tpe_gid" is created.
65868+
65869+endmenu
65870+menu "Network Protections"
65871+depends on GRKERNSEC
65872+
65873+config GRKERNSEC_RANDNET
65874+ bool "Larger entropy pools"
65875+ default y if GRKERNSEC_CONFIG_AUTO
65876+ help
65877+ If you say Y here, the entropy pools used for many features of Linux
65878+ and grsecurity will be doubled in size. Since several grsecurity
65879+ features use additional randomness, it is recommended that you say Y
65880+ here. Saying Y here has a similar effect as modifying
65881+ /proc/sys/kernel/random/poolsize.
65882+
65883+config GRKERNSEC_BLACKHOLE
65884+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
65885+ default y if GRKERNSEC_CONFIG_AUTO
65886+ depends on NET
65887+ help
65888+ If you say Y here, neither TCP resets nor ICMP
65889+ destination-unreachable packets will be sent in response to packets
65890+ sent to ports for which no associated listening process exists.
65891+ This feature supports both IPV4 and IPV6 and exempts the
65892+ loopback interface from blackholing. Enabling this feature
65893+ makes a host more resilient to DoS attacks and reduces network
65894+ visibility against scanners.
65895+
65896+ The blackhole feature as-implemented is equivalent to the FreeBSD
65897+ blackhole feature, as it prevents RST responses to all packets, not
65898+ just SYNs. Under most application behavior this causes no
65899+ problems, but applications (like haproxy) may not close certain
65900+ connections in a way that cleanly terminates them on the remote
65901+ end, leaving the remote host in LAST_ACK state. Because of this
65902+ side-effect and to prevent intentional LAST_ACK DoSes, this
65903+ feature also adds automatic mitigation against such attacks.
65904+ The mitigation drastically reduces the amount of time a socket
65905+ can spend in LAST_ACK state. If you're using haproxy and not
65906+ all servers it connects to have this option enabled, consider
65907+ disabling this feature on the haproxy host.
65908+
65909+ If the sysctl option is enabled, two sysctl options with names
65910+ "ip_blackhole" and "lastack_retries" will be created.
65911+ While "ip_blackhole" takes the standard zero/non-zero on/off
65912+ toggle, "lastack_retries" uses the same kinds of values as
65913+ "tcp_retries1" and "tcp_retries2". The default value of 4
65914+ prevents a socket from lasting more than 45 seconds in LAST_ACK
65915+ state.
65916+
65917+config GRKERNSEC_NO_SIMULT_CONNECT
65918+ bool "Disable TCP Simultaneous Connect"
65919+ default y if GRKERNSEC_CONFIG_AUTO
65920+ depends on NET
65921+ help
65922+ If you say Y here, a feature by Willy Tarreau will be enabled that
65923+ removes a weakness in Linux's strict implementation of TCP that
65924+ allows two clients to connect to each other without either entering
65925+ a listening state. The weakness allows an attacker to easily prevent
65926+ a client from connecting to a known server provided the source port
65927+ for the connection is guessed correctly.
65928+
65929+ As the weakness could be used to prevent an antivirus or IPS from
65930+ fetching updates, or prevent an SSL gateway from fetching a CRL,
65931+ it should be eliminated by enabling this option. Though Linux is
65932+ one of few operating systems supporting simultaneous connect, it
65933+ has no legitimate use in practice and is rarely supported by firewalls.
65934+
65935+config GRKERNSEC_SOCKET
65936+ bool "Socket restrictions"
65937+ depends on NET
65938+ help
65939+ If you say Y here, you will be able to choose from several options.
65940+ If you assign a GID on your system and add it to the supplementary
65941+ groups of users you want to restrict socket access to, this patch
65942+ will perform up to three things, based on the option(s) you choose.
65943+
65944+config GRKERNSEC_SOCKET_ALL
65945+ bool "Deny any sockets to group"
65946+ depends on GRKERNSEC_SOCKET
65947+ help
65948+ If you say Y here, you will be able to choose a GID of whose users will
65949+ be unable to connect to other hosts from your machine or run server
65950+ applications from your machine. If the sysctl option is enabled, a
65951+ sysctl option with name "socket_all" is created.
65952+
65953+config GRKERNSEC_SOCKET_ALL_GID
65954+ int "GID to deny all sockets for"
65955+ depends on GRKERNSEC_SOCKET_ALL
65956+ default 1004
65957+ help
65958+ Here you can choose the GID to disable socket access for. Remember to
65959+ add the users you want socket access disabled for to the GID
65960+ specified here. If the sysctl option is enabled, a sysctl option
65961+ with name "socket_all_gid" is created.
65962+
65963+config GRKERNSEC_SOCKET_CLIENT
65964+ bool "Deny client sockets to group"
65965+ depends on GRKERNSEC_SOCKET
65966+ help
65967+ If you say Y here, you will be able to choose a GID of whose users will
65968+ be unable to connect to other hosts from your machine, but will be
65969+ able to run servers. If this option is enabled, all users in the group
65970+ you specify will have to use passive mode when initiating ftp transfers
65971+ from the shell on your machine. If the sysctl option is enabled, a
65972+ sysctl option with name "socket_client" is created.
65973+
65974+config GRKERNSEC_SOCKET_CLIENT_GID
65975+ int "GID to deny client sockets for"
65976+ depends on GRKERNSEC_SOCKET_CLIENT
65977+ default 1003
65978+ help
65979+ Here you can choose the GID to disable client socket access for.
65980+ Remember to add the users you want client socket access disabled for to
65981+ the GID specified here. If the sysctl option is enabled, a sysctl
65982+ option with name "socket_client_gid" is created.
65983+
65984+config GRKERNSEC_SOCKET_SERVER
65985+ bool "Deny server sockets to group"
65986+ depends on GRKERNSEC_SOCKET
65987+ help
65988+ If you say Y here, you will be able to choose a GID of whose users will
65989+ be unable to run server applications from your machine. If the sysctl
65990+ option is enabled, a sysctl option with name "socket_server" is created.
65991+
65992+config GRKERNSEC_SOCKET_SERVER_GID
65993+ int "GID to deny server sockets for"
65994+ depends on GRKERNSEC_SOCKET_SERVER
65995+ default 1002
65996+ help
65997+ Here you can choose the GID to disable server socket access for.
65998+ Remember to add the users you want server socket access disabled for to
65999+ the GID specified here. If the sysctl option is enabled, a sysctl
66000+ option with name "socket_server_gid" is created.
66001+
66002+endmenu
66003+
66004+menu "Physical Protections"
66005+depends on GRKERNSEC
66006+
66007+config GRKERNSEC_DENYUSB
66008+ bool "Deny new USB connections after toggle"
66009+ default y if GRKERNSEC_CONFIG_AUTO
66010+ depends on SYSCTL && USB_SUPPORT
66011+ help
66012+ If you say Y here, a new sysctl option with name "deny_new_usb"
66013+ will be created. Setting its value to 1 will prevent any new
66014+ USB devices from being recognized by the OS. Any attempted USB
66015+ device insertion will be logged. This option is intended to be
66016+ used against custom USB devices designed to exploit vulnerabilities
66017+ in various USB device drivers.
66018+
66019+ For greatest effectiveness, this sysctl should be set after any
66020+ relevant init scripts. This option is safe to enable in distros
66021+ as each user can choose whether or not to toggle the sysctl.
66022+
66023+config GRKERNSEC_DENYUSB_FORCE
66024+ bool "Reject all USB devices not connected at boot"
66025+ select USB
66026+ depends on GRKERNSEC_DENYUSB
66027+ help
66028+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
66029+ that doesn't involve a sysctl entry. This option should only be
66030+ enabled if you're sure you want to deny all new USB connections
66031+ at runtime and don't want to modify init scripts. This should not
66032+ be enabled by distros. It forces the core USB code to be built
66033+ into the kernel image so that all devices connected at boot time
66034+ can be recognized and new USB device connections can be prevented
66035+ prior to init running.
66036+
66037+endmenu
66038+
66039+menu "Sysctl Support"
66040+depends on GRKERNSEC && SYSCTL
66041+
66042+config GRKERNSEC_SYSCTL
66043+ bool "Sysctl support"
66044+ default y if GRKERNSEC_CONFIG_AUTO
66045+ help
66046+ If you say Y here, you will be able to change the options that
66047+ grsecurity runs with at bootup, without having to recompile your
66048+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
66049+ to enable (1) or disable (0) various features. All the sysctl entries
66050+ are mutable until the "grsec_lock" entry is set to a non-zero value.
66051+ All features enabled in the kernel configuration are disabled at boot
66052+ if you do not say Y to the "Turn on features by default" option.
66053+ All options should be set at startup, and the grsec_lock entry should
66054+ be set to a non-zero value after all the options are set.
66055+ *THIS IS EXTREMELY IMPORTANT*
66056+
66057+config GRKERNSEC_SYSCTL_DISTRO
66058+ bool "Extra sysctl support for distro makers (READ HELP)"
66059+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
66060+ help
66061+ If you say Y here, additional sysctl options will be created
66062+ for features that affect processes running as root. Therefore,
66063+ it is critical when using this option that the grsec_lock entry be
66064+ enabled after boot. Only distros with prebuilt kernel packages
66065+ with this option enabled that can ensure grsec_lock is enabled
66066+ after boot should use this option.
66067+ *Failure to set grsec_lock after boot makes all grsec features
66068+ this option covers useless*
66069+
66070+ Currently this option creates the following sysctl entries:
66071+ "Disable Privileged I/O": "disable_priv_io"
66072+
66073+config GRKERNSEC_SYSCTL_ON
66074+ bool "Turn on features by default"
66075+ default y if GRKERNSEC_CONFIG_AUTO
66076+ depends on GRKERNSEC_SYSCTL
66077+ help
66078+ If you say Y here, instead of having all features enabled in the
66079+ kernel configuration disabled at boot time, the features will be
66080+ enabled at boot time. It is recommended you say Y here unless
66081+ there is some reason you would want all sysctl-tunable features to
66082+ be disabled by default. As mentioned elsewhere, it is important
66083+ to enable the grsec_lock entry once you have finished modifying
66084+ the sysctl entries.
66085+
66086+endmenu
66087+menu "Logging Options"
66088+depends on GRKERNSEC
66089+
66090+config GRKERNSEC_FLOODTIME
66091+ int "Seconds in between log messages (minimum)"
66092+ default 10
66093+ help
66094+ This option allows you to enforce the number of seconds between
66095+ grsecurity log messages. The default should be suitable for most
66096+ people, however, if you choose to change it, choose a value small enough
66097+ to allow informative logs to be produced, but large enough to
66098+ prevent flooding.
66099+
66100+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
66101+ any rate limiting on grsecurity log messages.
66102+
66103+config GRKERNSEC_FLOODBURST
66104+ int "Number of messages in a burst (maximum)"
66105+ default 6
66106+ help
66107+ This option allows you to choose the maximum number of messages allowed
66108+ within the flood time interval you chose in a separate option. The
66109+ default should be suitable for most people, however if you find that
66110+ many of your logs are being interpreted as flooding, you may want to
66111+ raise this value.
66112+
66113+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
66114+ any rate limiting on grsecurity log messages.
66115+
66116+endmenu
66117diff --git a/grsecurity/Makefile b/grsecurity/Makefile
66118new file mode 100644
66119index 0000000..5307c8a
66120--- /dev/null
66121+++ b/grsecurity/Makefile
66122@@ -0,0 +1,54 @@
66123+# grsecurity – access control and security hardening for Linux
66124+# All code in this directory and various hooks located throughout the Linux kernel are
66125+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
66126+# http://www.grsecurity.net spender@grsecurity.net
66127+#
66128+# This program is free software; you can redistribute it and/or
66129+# modify it under the terms of the GNU General Public License version 2
66130+# as published by the Free Software Foundation.
66131+#
66132+# This program is distributed in the hope that it will be useful,
66133+# but WITHOUT ANY WARRANTY; without even the implied warranty of
66134+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
66135+# GNU General Public License for more details.
66136+#
66137+# You should have received a copy of the GNU General Public License
66138+# along with this program; if not, write to the Free Software
66139+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
66140+
66141+KBUILD_CFLAGS += -Werror
66142+
66143+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
66144+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
66145+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
66146+ grsec_usb.o grsec_ipc.o
66147+
66148+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
66149+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
66150+ gracl_learn.o grsec_log.o gracl_policy.o
66151+ifdef CONFIG_COMPAT
66152+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
66153+endif
66154+
66155+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
66156+
66157+ifdef CONFIG_NET
66158+obj-y += grsec_sock.o
66159+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
66160+endif
66161+
66162+ifndef CONFIG_GRKERNSEC
66163+obj-y += grsec_disabled.o
66164+endif
66165+
66166+ifdef CONFIG_GRKERNSEC_HIDESYM
66167+extra-y := grsec_hidesym.o
66168+$(obj)/grsec_hidesym.o:
66169+ @-chmod -f 500 /boot
66170+ @-chmod -f 500 /lib/modules
66171+ @-chmod -f 500 /lib64/modules
66172+ @-chmod -f 500 /lib32/modules
66173+ @-chmod -f 700 .
66174+ @-chmod -f 700 $(objtree)
66175+ @echo ' grsec: protected kernel image paths'
66176+endif
66177diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
66178new file mode 100644
66179index 0000000..e56396f
66180--- /dev/null
66181+++ b/grsecurity/gracl.c
66182@@ -0,0 +1,2679 @@
66183+#include <linux/kernel.h>
66184+#include <linux/module.h>
66185+#include <linux/sched.h>
66186+#include <linux/mm.h>
66187+#include <linux/file.h>
66188+#include <linux/fs.h>
66189+#include <linux/namei.h>
66190+#include <linux/mount.h>
66191+#include <linux/tty.h>
66192+#include <linux/proc_fs.h>
66193+#include <linux/lglock.h>
66194+#include <linux/slab.h>
66195+#include <linux/vmalloc.h>
66196+#include <linux/types.h>
66197+#include <linux/sysctl.h>
66198+#include <linux/netdevice.h>
66199+#include <linux/ptrace.h>
66200+#include <linux/gracl.h>
66201+#include <linux/gralloc.h>
66202+#include <linux/security.h>
66203+#include <linux/grinternal.h>
66204+#include <linux/pid_namespace.h>
66205+#include <linux/stop_machine.h>
66206+#include <linux/fdtable.h>
66207+#include <linux/percpu.h>
66208+#include <linux/lglock.h>
66209+#include <linux/hugetlb.h>
66210+#include <linux/posix-timers.h>
66211+#include <linux/prefetch.h>
66212+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
66213+#include <linux/magic.h>
66214+#include <linux/pagemap.h>
66215+#include "../fs/btrfs/async-thread.h"
66216+#include "../fs/btrfs/ctree.h"
66217+#include "../fs/btrfs/btrfs_inode.h"
66218+#endif
66219+#include "../fs/mount.h"
66220+
66221+#include <asm/uaccess.h>
66222+#include <asm/errno.h>
66223+#include <asm/mman.h>
66224+
66225+#define FOR_EACH_ROLE_START(role) \
66226+ role = running_polstate.role_list; \
66227+ while (role) {
66228+
66229+#define FOR_EACH_ROLE_END(role) \
66230+ role = role->prev; \
66231+ }
66232+
66233+extern struct path gr_real_root;
66234+
66235+static struct gr_policy_state running_polstate;
66236+struct gr_policy_state *polstate = &running_polstate;
66237+extern struct gr_alloc_state *current_alloc_state;
66238+
66239+extern char *gr_shared_page[4];
66240+DEFINE_RWLOCK(gr_inode_lock);
66241+
66242+static unsigned int gr_status __read_only = GR_STATUS_INIT;
66243+
66244+#ifdef CONFIG_NET
66245+extern struct vfsmount *sock_mnt;
66246+#endif
66247+
66248+extern struct vfsmount *pipe_mnt;
66249+extern struct vfsmount *shm_mnt;
66250+
66251+#ifdef CONFIG_HUGETLBFS
66252+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
66253+#endif
66254+
66255+extern u16 acl_sp_role_value;
66256+extern struct acl_object_label *fakefs_obj_rw;
66257+extern struct acl_object_label *fakefs_obj_rwx;
66258+
66259+int gr_acl_is_enabled(void)
66260+{
66261+ return (gr_status & GR_READY);
66262+}
66263+
66264+void gr_enable_rbac_system(void)
66265+{
66266+ pax_open_kernel();
66267+ gr_status |= GR_READY;
66268+ pax_close_kernel();
66269+}
66270+
66271+int gr_rbac_disable(void *unused)
66272+{
66273+ pax_open_kernel();
66274+ gr_status &= ~GR_READY;
66275+ pax_close_kernel();
66276+
66277+ return 0;
66278+}
66279+
66280+static inline dev_t __get_dev(const struct dentry *dentry)
66281+{
66282+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
66283+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
66284+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
66285+ else
66286+#endif
66287+ return dentry->d_sb->s_dev;
66288+}
66289+
66290+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
66291+{
66292+ return __get_dev(dentry);
66293+}
66294+
66295+static char gr_task_roletype_to_char(struct task_struct *task)
66296+{
66297+ switch (task->role->roletype &
66298+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
66299+ GR_ROLE_SPECIAL)) {
66300+ case GR_ROLE_DEFAULT:
66301+ return 'D';
66302+ case GR_ROLE_USER:
66303+ return 'U';
66304+ case GR_ROLE_GROUP:
66305+ return 'G';
66306+ case GR_ROLE_SPECIAL:
66307+ return 'S';
66308+ }
66309+
66310+ return 'X';
66311+}
66312+
66313+char gr_roletype_to_char(void)
66314+{
66315+ return gr_task_roletype_to_char(current);
66316+}
66317+
66318+__inline__ int
66319+gr_acl_tpe_check(void)
66320+{
66321+ if (unlikely(!(gr_status & GR_READY)))
66322+ return 0;
66323+ if (current->role->roletype & GR_ROLE_TPE)
66324+ return 1;
66325+ else
66326+ return 0;
66327+}
66328+
66329+int
66330+gr_handle_rawio(const struct inode *inode)
66331+{
66332+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
66333+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
66334+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
66335+ !capable(CAP_SYS_RAWIO))
66336+ return 1;
66337+#endif
66338+ return 0;
66339+}
66340+
66341+int
66342+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
66343+{
66344+ if (likely(lena != lenb))
66345+ return 0;
66346+
66347+ return !memcmp(a, b, lena);
66348+}
66349+
66350+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
66351+{
66352+ *buflen -= namelen;
66353+ if (*buflen < 0)
66354+ return -ENAMETOOLONG;
66355+ *buffer -= namelen;
66356+ memcpy(*buffer, str, namelen);
66357+ return 0;
66358+}
66359+
66360+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
66361+{
66362+ return prepend(buffer, buflen, name->name, name->len);
66363+}
66364+
66365+static int prepend_path(const struct path *path, struct path *root,
66366+ char **buffer, int *buflen)
66367+{
66368+ struct dentry *dentry = path->dentry;
66369+ struct vfsmount *vfsmnt = path->mnt;
66370+ struct mount *mnt = real_mount(vfsmnt);
66371+ bool slash = false;
66372+ int error = 0;
66373+
66374+ while (dentry != root->dentry || vfsmnt != root->mnt) {
66375+ struct dentry * parent;
66376+
66377+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
66378+ /* Global root? */
66379+ if (!mnt_has_parent(mnt)) {
66380+ goto out;
66381+ }
66382+ dentry = mnt->mnt_mountpoint;
66383+ mnt = mnt->mnt_parent;
66384+ vfsmnt = &mnt->mnt;
66385+ continue;
66386+ }
66387+ parent = dentry->d_parent;
66388+ prefetch(parent);
66389+ spin_lock(&dentry->d_lock);
66390+ error = prepend_name(buffer, buflen, &dentry->d_name);
66391+ spin_unlock(&dentry->d_lock);
66392+ if (!error)
66393+ error = prepend(buffer, buflen, "/", 1);
66394+ if (error)
66395+ break;
66396+
66397+ slash = true;
66398+ dentry = parent;
66399+ }
66400+
66401+out:
66402+ if (!error && !slash)
66403+ error = prepend(buffer, buflen, "/", 1);
66404+
66405+ return error;
66406+}
66407+
66408+/* this must be called with mount_lock and rename_lock held */
66409+
66410+static char *__our_d_path(const struct path *path, struct path *root,
66411+ char *buf, int buflen)
66412+{
66413+ char *res = buf + buflen;
66414+ int error;
66415+
66416+ prepend(&res, &buflen, "\0", 1);
66417+ error = prepend_path(path, root, &res, &buflen);
66418+ if (error)
66419+ return ERR_PTR(error);
66420+
66421+ return res;
66422+}
66423+
66424+static char *
66425+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
66426+{
66427+ char *retval;
66428+
66429+ retval = __our_d_path(path, root, buf, buflen);
66430+ if (unlikely(IS_ERR(retval)))
66431+ retval = strcpy(buf, "<path too long>");
66432+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
66433+ retval[1] = '\0';
66434+
66435+ return retval;
66436+}
66437+
66438+static char *
66439+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
66440+ char *buf, int buflen)
66441+{
66442+ struct path path;
66443+ char *res;
66444+
66445+ path.dentry = (struct dentry *)dentry;
66446+ path.mnt = (struct vfsmount *)vfsmnt;
66447+
66448+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
66449+ by the RBAC system */
66450+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
66451+
66452+ return res;
66453+}
66454+
66455+static char *
66456+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
66457+ char *buf, int buflen)
66458+{
66459+ char *res;
66460+ struct path path;
66461+ struct path root;
66462+ struct task_struct *reaper = init_pid_ns.child_reaper;
66463+
66464+ path.dentry = (struct dentry *)dentry;
66465+ path.mnt = (struct vfsmount *)vfsmnt;
66466+
66467+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
66468+ get_fs_root(reaper->fs, &root);
66469+
66470+ read_seqlock_excl(&mount_lock);
66471+ write_seqlock(&rename_lock);
66472+ res = gen_full_path(&path, &root, buf, buflen);
66473+ write_sequnlock(&rename_lock);
66474+ read_sequnlock_excl(&mount_lock);
66475+
66476+ path_put(&root);
66477+ return res;
66478+}
66479+
66480+char *
66481+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
66482+{
66483+ char *ret;
66484+ read_seqlock_excl(&mount_lock);
66485+ write_seqlock(&rename_lock);
66486+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
66487+ PAGE_SIZE);
66488+ write_sequnlock(&rename_lock);
66489+ read_sequnlock_excl(&mount_lock);
66490+ return ret;
66491+}
66492+
66493+static char *
66494+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
66495+{
66496+ char *ret;
66497+ char *buf;
66498+ int buflen;
66499+
66500+ read_seqlock_excl(&mount_lock);
66501+ write_seqlock(&rename_lock);
66502+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
66503+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
66504+ buflen = (int)(ret - buf);
66505+ if (buflen >= 5)
66506+ prepend(&ret, &buflen, "/proc", 5);
66507+ else
66508+ ret = strcpy(buf, "<path too long>");
66509+ write_sequnlock(&rename_lock);
66510+ read_sequnlock_excl(&mount_lock);
66511+ return ret;
66512+}
66513+
66514+char *
66515+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
66516+{
66517+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
66518+ PAGE_SIZE);
66519+}
66520+
66521+char *
66522+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
66523+{
66524+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
66525+ PAGE_SIZE);
66526+}
66527+
66528+char *
66529+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
66530+{
66531+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
66532+ PAGE_SIZE);
66533+}
66534+
66535+char *
66536+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
66537+{
66538+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
66539+ PAGE_SIZE);
66540+}
66541+
66542+char *
66543+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
66544+{
66545+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
66546+ PAGE_SIZE);
66547+}
66548+
66549+__inline__ __u32
66550+to_gr_audit(const __u32 reqmode)
66551+{
66552+ /* masks off auditable permission flags, then shifts them to create
66553+ auditing flags, and adds the special case of append auditing if
66554+ we're requesting write */
66555+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
66556+}
66557+
66558+struct acl_role_label *
66559+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
66560+ const gid_t gid)
66561+{
66562+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
66563+ struct acl_role_label *match;
66564+ struct role_allowed_ip *ipp;
66565+ unsigned int x;
66566+ u32 curr_ip = task->signal->saved_ip;
66567+
66568+ match = state->acl_role_set.r_hash[index];
66569+
66570+ while (match) {
66571+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
66572+ for (x = 0; x < match->domain_child_num; x++) {
66573+ if (match->domain_children[x] == uid)
66574+ goto found;
66575+ }
66576+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
66577+ break;
66578+ match = match->next;
66579+ }
66580+found:
66581+ if (match == NULL) {
66582+ try_group:
66583+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
66584+ match = state->acl_role_set.r_hash[index];
66585+
66586+ while (match) {
66587+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
66588+ for (x = 0; x < match->domain_child_num; x++) {
66589+ if (match->domain_children[x] == gid)
66590+ goto found2;
66591+ }
66592+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
66593+ break;
66594+ match = match->next;
66595+ }
66596+found2:
66597+ if (match == NULL)
66598+ match = state->default_role;
66599+ if (match->allowed_ips == NULL)
66600+ return match;
66601+ else {
66602+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
66603+ if (likely
66604+ ((ntohl(curr_ip) & ipp->netmask) ==
66605+ (ntohl(ipp->addr) & ipp->netmask)))
66606+ return match;
66607+ }
66608+ match = state->default_role;
66609+ }
66610+ } else if (match->allowed_ips == NULL) {
66611+ return match;
66612+ } else {
66613+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
66614+ if (likely
66615+ ((ntohl(curr_ip) & ipp->netmask) ==
66616+ (ntohl(ipp->addr) & ipp->netmask)))
66617+ return match;
66618+ }
66619+ goto try_group;
66620+ }
66621+
66622+ return match;
66623+}
66624+
66625+static struct acl_role_label *
66626+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
66627+ const gid_t gid)
66628+{
66629+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
66630+}
66631+
66632+struct acl_subject_label *
66633+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
66634+ const struct acl_role_label *role)
66635+{
66636+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
66637+ struct acl_subject_label *match;
66638+
66639+ match = role->subj_hash[index];
66640+
66641+ while (match && (match->inode != ino || match->device != dev ||
66642+ (match->mode & GR_DELETED))) {
66643+ match = match->next;
66644+ }
66645+
66646+ if (match && !(match->mode & GR_DELETED))
66647+ return match;
66648+ else
66649+ return NULL;
66650+}
66651+
66652+struct acl_subject_label *
66653+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
66654+ const struct acl_role_label *role)
66655+{
66656+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
66657+ struct acl_subject_label *match;
66658+
66659+ match = role->subj_hash[index];
66660+
66661+ while (match && (match->inode != ino || match->device != dev ||
66662+ !(match->mode & GR_DELETED))) {
66663+ match = match->next;
66664+ }
66665+
66666+ if (match && (match->mode & GR_DELETED))
66667+ return match;
66668+ else
66669+ return NULL;
66670+}
66671+
66672+static struct acl_object_label *
66673+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
66674+ const struct acl_subject_label *subj)
66675+{
66676+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
66677+ struct acl_object_label *match;
66678+
66679+ match = subj->obj_hash[index];
66680+
66681+ while (match && (match->inode != ino || match->device != dev ||
66682+ (match->mode & GR_DELETED))) {
66683+ match = match->next;
66684+ }
66685+
66686+ if (match && !(match->mode & GR_DELETED))
66687+ return match;
66688+ else
66689+ return NULL;
66690+}
66691+
66692+static struct acl_object_label *
66693+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
66694+ const struct acl_subject_label *subj)
66695+{
66696+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
66697+ struct acl_object_label *match;
66698+
66699+ match = subj->obj_hash[index];
66700+
66701+ while (match && (match->inode != ino || match->device != dev ||
66702+ !(match->mode & GR_DELETED))) {
66703+ match = match->next;
66704+ }
66705+
66706+ if (match && (match->mode & GR_DELETED))
66707+ return match;
66708+
66709+ match = subj->obj_hash[index];
66710+
66711+ while (match && (match->inode != ino || match->device != dev ||
66712+ (match->mode & GR_DELETED))) {
66713+ match = match->next;
66714+ }
66715+
66716+ if (match && !(match->mode & GR_DELETED))
66717+ return match;
66718+ else
66719+ return NULL;
66720+}
66721+
66722+struct name_entry *
66723+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
66724+{
66725+ unsigned int len = strlen(name);
66726+ unsigned int key = full_name_hash(name, len);
66727+ unsigned int index = key % state->name_set.n_size;
66728+ struct name_entry *match;
66729+
66730+ match = state->name_set.n_hash[index];
66731+
66732+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
66733+ match = match->next;
66734+
66735+ return match;
66736+}
66737+
66738+static struct name_entry *
66739+lookup_name_entry(const char *name)
66740+{
66741+ return __lookup_name_entry(&running_polstate, name);
66742+}
66743+
66744+static struct name_entry *
66745+lookup_name_entry_create(const char *name)
66746+{
66747+ unsigned int len = strlen(name);
66748+ unsigned int key = full_name_hash(name, len);
66749+ unsigned int index = key % running_polstate.name_set.n_size;
66750+ struct name_entry *match;
66751+
66752+ match = running_polstate.name_set.n_hash[index];
66753+
66754+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
66755+ !match->deleted))
66756+ match = match->next;
66757+
66758+ if (match && match->deleted)
66759+ return match;
66760+
66761+ match = running_polstate.name_set.n_hash[index];
66762+
66763+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
66764+ match->deleted))
66765+ match = match->next;
66766+
66767+ if (match && !match->deleted)
66768+ return match;
66769+ else
66770+ return NULL;
66771+}
66772+
66773+static struct inodev_entry *
66774+lookup_inodev_entry(const ino_t ino, const dev_t dev)
66775+{
66776+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
66777+ struct inodev_entry *match;
66778+
66779+ match = running_polstate.inodev_set.i_hash[index];
66780+
66781+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
66782+ match = match->next;
66783+
66784+ return match;
66785+}
66786+
66787+void
66788+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
66789+{
66790+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
66791+ state->inodev_set.i_size);
66792+ struct inodev_entry **curr;
66793+
66794+ entry->prev = NULL;
66795+
66796+ curr = &state->inodev_set.i_hash[index];
66797+ if (*curr != NULL)
66798+ (*curr)->prev = entry;
66799+
66800+ entry->next = *curr;
66801+ *curr = entry;
66802+
66803+ return;
66804+}
66805+
66806+static void
66807+insert_inodev_entry(struct inodev_entry *entry)
66808+{
66809+ __insert_inodev_entry(&running_polstate, entry);
66810+}
66811+
66812+void
66813+insert_acl_obj_label(struct acl_object_label *obj,
66814+ struct acl_subject_label *subj)
66815+{
66816+ unsigned int index =
66817+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
66818+ struct acl_object_label **curr;
66819+
66820+ obj->prev = NULL;
66821+
66822+ curr = &subj->obj_hash[index];
66823+ if (*curr != NULL)
66824+ (*curr)->prev = obj;
66825+
66826+ obj->next = *curr;
66827+ *curr = obj;
66828+
66829+ return;
66830+}
66831+
66832+void
66833+insert_acl_subj_label(struct acl_subject_label *obj,
66834+ struct acl_role_label *role)
66835+{
66836+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
66837+ struct acl_subject_label **curr;
66838+
66839+ obj->prev = NULL;
66840+
66841+ curr = &role->subj_hash[index];
66842+ if (*curr != NULL)
66843+ (*curr)->prev = obj;
66844+
66845+ obj->next = *curr;
66846+ *curr = obj;
66847+
66848+ return;
66849+}
66850+
66851+/* derived from glibc fnmatch() 0: match, 1: no match*/
66852+
66853+static int
66854+glob_match(const char *p, const char *n)
66855+{
66856+ char c;
66857+
66858+ while ((c = *p++) != '\0') {
66859+ switch (c) {
66860+ case '?':
66861+ if (*n == '\0')
66862+ return 1;
66863+ else if (*n == '/')
66864+ return 1;
66865+ break;
66866+ case '\\':
66867+ if (*n != c)
66868+ return 1;
66869+ break;
66870+ case '*':
66871+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
66872+ if (*n == '/')
66873+ return 1;
66874+ else if (c == '?') {
66875+ if (*n == '\0')
66876+ return 1;
66877+ else
66878+ ++n;
66879+ }
66880+ }
66881+ if (c == '\0') {
66882+ return 0;
66883+ } else {
66884+ const char *endp;
66885+
66886+ if ((endp = strchr(n, '/')) == NULL)
66887+ endp = n + strlen(n);
66888+
66889+ if (c == '[') {
66890+ for (--p; n < endp; ++n)
66891+ if (!glob_match(p, n))
66892+ return 0;
66893+ } else if (c == '/') {
66894+ while (*n != '\0' && *n != '/')
66895+ ++n;
66896+ if (*n == '/' && !glob_match(p, n + 1))
66897+ return 0;
66898+ } else {
66899+ for (--p; n < endp; ++n)
66900+ if (*n == c && !glob_match(p, n))
66901+ return 0;
66902+ }
66903+
66904+ return 1;
66905+ }
66906+ case '[':
66907+ {
66908+ int not;
66909+ char cold;
66910+
66911+ if (*n == '\0' || *n == '/')
66912+ return 1;
66913+
66914+ not = (*p == '!' || *p == '^');
66915+ if (not)
66916+ ++p;
66917+
66918+ c = *p++;
66919+ for (;;) {
66920+ unsigned char fn = (unsigned char)*n;
66921+
66922+ if (c == '\0')
66923+ return 1;
66924+ else {
66925+ if (c == fn)
66926+ goto matched;
66927+ cold = c;
66928+ c = *p++;
66929+
66930+ if (c == '-' && *p != ']') {
66931+ unsigned char cend = *p++;
66932+
66933+ if (cend == '\0')
66934+ return 1;
66935+
66936+ if (cold <= fn && fn <= cend)
66937+ goto matched;
66938+
66939+ c = *p++;
66940+ }
66941+ }
66942+
66943+ if (c == ']')
66944+ break;
66945+ }
66946+ if (!not)
66947+ return 1;
66948+ break;
66949+ matched:
66950+ while (c != ']') {
66951+ if (c == '\0')
66952+ return 1;
66953+
66954+ c = *p++;
66955+ }
66956+ if (not)
66957+ return 1;
66958+ }
66959+ break;
66960+ default:
66961+ if (c != *n)
66962+ return 1;
66963+ }
66964+
66965+ ++n;
66966+ }
66967+
66968+ if (*n == '\0')
66969+ return 0;
66970+
66971+ if (*n == '/')
66972+ return 0;
66973+
66974+ return 1;
66975+}
66976+
66977+static struct acl_object_label *
66978+chk_glob_label(struct acl_object_label *globbed,
66979+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
66980+{
66981+ struct acl_object_label *tmp;
66982+
66983+ if (*path == NULL)
66984+ *path = gr_to_filename_nolock(dentry, mnt);
66985+
66986+ tmp = globbed;
66987+
66988+ while (tmp) {
66989+ if (!glob_match(tmp->filename, *path))
66990+ return tmp;
66991+ tmp = tmp->next;
66992+ }
66993+
66994+ return NULL;
66995+}
66996+
66997+static struct acl_object_label *
66998+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
66999+ const ino_t curr_ino, const dev_t curr_dev,
67000+ const struct acl_subject_label *subj, char **path, const int checkglob)
67001+{
67002+ struct acl_subject_label *tmpsubj;
67003+ struct acl_object_label *retval;
67004+ struct acl_object_label *retval2;
67005+
67006+ tmpsubj = (struct acl_subject_label *) subj;
67007+ read_lock(&gr_inode_lock);
67008+ do {
67009+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
67010+ if (retval) {
67011+ if (checkglob && retval->globbed) {
67012+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
67013+ if (retval2)
67014+ retval = retval2;
67015+ }
67016+ break;
67017+ }
67018+ } while ((tmpsubj = tmpsubj->parent_subject));
67019+ read_unlock(&gr_inode_lock);
67020+
67021+ return retval;
67022+}
67023+
67024+static __inline__ struct acl_object_label *
67025+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
67026+ struct dentry *curr_dentry,
67027+ const struct acl_subject_label *subj, char **path, const int checkglob)
67028+{
67029+ int newglob = checkglob;
67030+ ino_t inode;
67031+ dev_t device;
67032+
67033+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
67034+ as we don't want a / * rule to match instead of the / object
67035+ don't do this for create lookups that call this function though, since they're looking up
67036+ on the parent and thus need globbing checks on all paths
67037+ */
67038+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
67039+ newglob = GR_NO_GLOB;
67040+
67041+ spin_lock(&curr_dentry->d_lock);
67042+ inode = curr_dentry->d_inode->i_ino;
67043+ device = __get_dev(curr_dentry);
67044+ spin_unlock(&curr_dentry->d_lock);
67045+
67046+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
67047+}
67048+
67049+#ifdef CONFIG_HUGETLBFS
67050+static inline bool
67051+is_hugetlbfs_mnt(const struct vfsmount *mnt)
67052+{
67053+ int i;
67054+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
67055+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
67056+ return true;
67057+ }
67058+
67059+ return false;
67060+}
67061+#endif
67062+
67063+static struct acl_object_label *
67064+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
67065+ const struct acl_subject_label *subj, char *path, const int checkglob)
67066+{
67067+ struct dentry *dentry = (struct dentry *) l_dentry;
67068+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
67069+ struct mount *real_mnt = real_mount(mnt);
67070+ struct acl_object_label *retval;
67071+ struct dentry *parent;
67072+
67073+ read_seqlock_excl(&mount_lock);
67074+ write_seqlock(&rename_lock);
67075+
67076+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
67077+#ifdef CONFIG_NET
67078+ mnt == sock_mnt ||
67079+#endif
67080+#ifdef CONFIG_HUGETLBFS
67081+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
67082+#endif
67083+ /* ignore Eric Biederman */
67084+ IS_PRIVATE(l_dentry->d_inode))) {
67085+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
67086+ goto out;
67087+ }
67088+
67089+ for (;;) {
67090+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
67091+ break;
67092+
67093+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
67094+ if (!mnt_has_parent(real_mnt))
67095+ break;
67096+
67097+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
67098+ if (retval != NULL)
67099+ goto out;
67100+
67101+ dentry = real_mnt->mnt_mountpoint;
67102+ real_mnt = real_mnt->mnt_parent;
67103+ mnt = &real_mnt->mnt;
67104+ continue;
67105+ }
67106+
67107+ parent = dentry->d_parent;
67108+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
67109+ if (retval != NULL)
67110+ goto out;
67111+
67112+ dentry = parent;
67113+ }
67114+
67115+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
67116+
67117+ /* gr_real_root is pinned so we don't have to hold a reference */
67118+ if (retval == NULL)
67119+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
67120+out:
67121+ write_sequnlock(&rename_lock);
67122+ read_sequnlock_excl(&mount_lock);
67123+
67124+ BUG_ON(retval == NULL);
67125+
67126+ return retval;
67127+}
67128+
67129+static __inline__ struct acl_object_label *
67130+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
67131+ const struct acl_subject_label *subj)
67132+{
67133+ char *path = NULL;
67134+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
67135+}
67136+
67137+static __inline__ struct acl_object_label *
67138+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
67139+ const struct acl_subject_label *subj)
67140+{
67141+ char *path = NULL;
67142+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
67143+}
67144+
67145+static __inline__ struct acl_object_label *
67146+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
67147+ const struct acl_subject_label *subj, char *path)
67148+{
67149+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
67150+}
67151+
67152+struct acl_subject_label *
67153+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
67154+ const struct acl_role_label *role)
67155+{
67156+ struct dentry *dentry = (struct dentry *) l_dentry;
67157+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
67158+ struct mount *real_mnt = real_mount(mnt);
67159+ struct acl_subject_label *retval;
67160+ struct dentry *parent;
67161+
67162+ read_seqlock_excl(&mount_lock);
67163+ write_seqlock(&rename_lock);
67164+
67165+ for (;;) {
67166+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
67167+ break;
67168+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
67169+ if (!mnt_has_parent(real_mnt))
67170+ break;
67171+
67172+ spin_lock(&dentry->d_lock);
67173+ read_lock(&gr_inode_lock);
67174+ retval =
67175+ lookup_acl_subj_label(dentry->d_inode->i_ino,
67176+ __get_dev(dentry), role);
67177+ read_unlock(&gr_inode_lock);
67178+ spin_unlock(&dentry->d_lock);
67179+ if (retval != NULL)
67180+ goto out;
67181+
67182+ dentry = real_mnt->mnt_mountpoint;
67183+ real_mnt = real_mnt->mnt_parent;
67184+ mnt = &real_mnt->mnt;
67185+ continue;
67186+ }
67187+
67188+ spin_lock(&dentry->d_lock);
67189+ read_lock(&gr_inode_lock);
67190+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
67191+ __get_dev(dentry), role);
67192+ read_unlock(&gr_inode_lock);
67193+ parent = dentry->d_parent;
67194+ spin_unlock(&dentry->d_lock);
67195+
67196+ if (retval != NULL)
67197+ goto out;
67198+
67199+ dentry = parent;
67200+ }
67201+
67202+ spin_lock(&dentry->d_lock);
67203+ read_lock(&gr_inode_lock);
67204+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
67205+ __get_dev(dentry), role);
67206+ read_unlock(&gr_inode_lock);
67207+ spin_unlock(&dentry->d_lock);
67208+
67209+ if (unlikely(retval == NULL)) {
67210+ /* gr_real_root is pinned, we don't need to hold a reference */
67211+ read_lock(&gr_inode_lock);
67212+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
67213+ __get_dev(gr_real_root.dentry), role);
67214+ read_unlock(&gr_inode_lock);
67215+ }
67216+out:
67217+ write_sequnlock(&rename_lock);
67218+ read_sequnlock_excl(&mount_lock);
67219+
67220+ BUG_ON(retval == NULL);
67221+
67222+ return retval;
67223+}
67224+
67225+void
67226+assign_special_role(const char *rolename)
67227+{
67228+ struct acl_object_label *obj;
67229+ struct acl_role_label *r;
67230+ struct acl_role_label *assigned = NULL;
67231+ struct task_struct *tsk;
67232+ struct file *filp;
67233+
67234+ FOR_EACH_ROLE_START(r)
67235+ if (!strcmp(rolename, r->rolename) &&
67236+ (r->roletype & GR_ROLE_SPECIAL)) {
67237+ assigned = r;
67238+ break;
67239+ }
67240+ FOR_EACH_ROLE_END(r)
67241+
67242+ if (!assigned)
67243+ return;
67244+
67245+ read_lock(&tasklist_lock);
67246+ read_lock(&grsec_exec_file_lock);
67247+
67248+ tsk = current->real_parent;
67249+ if (tsk == NULL)
67250+ goto out_unlock;
67251+
67252+ filp = tsk->exec_file;
67253+ if (filp == NULL)
67254+ goto out_unlock;
67255+
67256+ tsk->is_writable = 0;
67257+ tsk->inherited = 0;
67258+
67259+ tsk->acl_sp_role = 1;
67260+ tsk->acl_role_id = ++acl_sp_role_value;
67261+ tsk->role = assigned;
67262+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
67263+
67264+ /* ignore additional mmap checks for processes that are writable
67265+ by the default ACL */
67266+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
67267+ if (unlikely(obj->mode & GR_WRITE))
67268+ tsk->is_writable = 1;
67269+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
67270+ if (unlikely(obj->mode & GR_WRITE))
67271+ tsk->is_writable = 1;
67272+
67273+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
67274+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
67275+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
67276+#endif
67277+
67278+out_unlock:
67279+ read_unlock(&grsec_exec_file_lock);
67280+ read_unlock(&tasklist_lock);
67281+ return;
67282+}
67283+
67284+
67285+static void
67286+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
67287+{
67288+ struct task_struct *task = current;
67289+ const struct cred *cred = current_cred();
67290+
67291+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
67292+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
67293+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
67294+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
67295+
67296+ return;
67297+}
67298+
67299+static void
67300+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
67301+{
67302+ struct task_struct *task = current;
67303+ const struct cred *cred = current_cred();
67304+
67305+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
67306+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
67307+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
67308+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
67309+
67310+ return;
67311+}
67312+
67313+static void
67314+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
67315+{
67316+ struct task_struct *task = current;
67317+ const struct cred *cred = current_cred();
67318+
67319+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
67320+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
67321+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
67322+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
67323+
67324+ return;
67325+}
67326+
67327+static void
67328+gr_set_proc_res(struct task_struct *task)
67329+{
67330+ struct acl_subject_label *proc;
67331+ unsigned short i;
67332+
67333+ proc = task->acl;
67334+
67335+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
67336+ return;
67337+
67338+ for (i = 0; i < RLIM_NLIMITS; i++) {
67339+ if (!(proc->resmask & (1U << i)))
67340+ continue;
67341+
67342+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
67343+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
67344+
67345+ if (i == RLIMIT_CPU)
67346+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
67347+ }
67348+
67349+ return;
67350+}
67351+
67352+/* both of the below must be called with
67353+ rcu_read_lock();
67354+ read_lock(&tasklist_lock);
67355+ read_lock(&grsec_exec_file_lock);
67356+*/
67357+
67358+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
67359+{
67360+ char *tmpname;
67361+ struct acl_subject_label *tmpsubj;
67362+ struct file *filp;
67363+ struct name_entry *nmatch;
67364+
67365+ filp = task->exec_file;
67366+ if (filp == NULL)
67367+ return NULL;
67368+
67369+ /* the following is to apply the correct subject
67370+ on binaries running when the RBAC system
67371+ is enabled, when the binaries have been
67372+ replaced or deleted since their execution
67373+ -----
67374+ when the RBAC system starts, the inode/dev
67375+ from exec_file will be one the RBAC system
67376+ is unaware of. It only knows the inode/dev
67377+ of the present file on disk, or the absence
67378+ of it.
67379+ */
67380+
67381+ if (filename)
67382+ nmatch = __lookup_name_entry(state, filename);
67383+ else {
67384+ preempt_disable();
67385+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
67386+
67387+ nmatch = __lookup_name_entry(state, tmpname);
67388+ preempt_enable();
67389+ }
67390+ tmpsubj = NULL;
67391+ if (nmatch) {
67392+ if (nmatch->deleted)
67393+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
67394+ else
67395+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
67396+ }
67397+ /* this also works for the reload case -- if we don't match a potentially inherited subject
67398+ then we fall back to a normal lookup based on the binary's ino/dev
67399+ */
67400+ if (tmpsubj == NULL)
67401+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
67402+
67403+ return tmpsubj;
67404+}
67405+
67406+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
67407+{
67408+ return __gr_get_subject_for_task(&running_polstate, task, filename);
67409+}
67410+
67411+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
67412+{
67413+ struct acl_object_label *obj;
67414+ struct file *filp;
67415+
67416+ filp = task->exec_file;
67417+
67418+ task->acl = subj;
67419+ task->is_writable = 0;
67420+ /* ignore additional mmap checks for processes that are writable
67421+ by the default ACL */
67422+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
67423+ if (unlikely(obj->mode & GR_WRITE))
67424+ task->is_writable = 1;
67425+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
67426+ if (unlikely(obj->mode & GR_WRITE))
67427+ task->is_writable = 1;
67428+
67429+ gr_set_proc_res(task);
67430+
67431+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
67432+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
67433+#endif
67434+}
67435+
67436+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
67437+{
67438+ __gr_apply_subject_to_task(&running_polstate, task, subj);
67439+}
67440+
67441+__u32
67442+gr_search_file(const struct dentry * dentry, const __u32 mode,
67443+ const struct vfsmount * mnt)
67444+{
67445+ __u32 retval = mode;
67446+ struct acl_subject_label *curracl;
67447+ struct acl_object_label *currobj;
67448+
67449+ if (unlikely(!(gr_status & GR_READY)))
67450+ return (mode & ~GR_AUDITS);
67451+
67452+ curracl = current->acl;
67453+
67454+ currobj = chk_obj_label(dentry, mnt, curracl);
67455+ retval = currobj->mode & mode;
67456+
67457+ /* if we're opening a specified transfer file for writing
67458+ (e.g. /dev/initctl), then transfer our role to init
67459+ */
67460+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
67461+ current->role->roletype & GR_ROLE_PERSIST)) {
67462+ struct task_struct *task = init_pid_ns.child_reaper;
67463+
67464+ if (task->role != current->role) {
67465+ struct acl_subject_label *subj;
67466+
67467+ task->acl_sp_role = 0;
67468+ task->acl_role_id = current->acl_role_id;
67469+ task->role = current->role;
67470+ rcu_read_lock();
67471+ read_lock(&grsec_exec_file_lock);
67472+ subj = gr_get_subject_for_task(task, NULL);
67473+ gr_apply_subject_to_task(task, subj);
67474+ read_unlock(&grsec_exec_file_lock);
67475+ rcu_read_unlock();
67476+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
67477+ }
67478+ }
67479+
67480+ if (unlikely
67481+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
67482+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
67483+ __u32 new_mode = mode;
67484+
67485+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
67486+
67487+ retval = new_mode;
67488+
67489+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
67490+ new_mode |= GR_INHERIT;
67491+
67492+ if (!(mode & GR_NOLEARN))
67493+ gr_log_learn(dentry, mnt, new_mode);
67494+ }
67495+
67496+ return retval;
67497+}
67498+
67499+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
67500+ const struct dentry *parent,
67501+ const struct vfsmount *mnt)
67502+{
67503+ struct name_entry *match;
67504+ struct acl_object_label *matchpo;
67505+ struct acl_subject_label *curracl;
67506+ char *path;
67507+
67508+ if (unlikely(!(gr_status & GR_READY)))
67509+ return NULL;
67510+
67511+ preempt_disable();
67512+ path = gr_to_filename_rbac(new_dentry, mnt);
67513+ match = lookup_name_entry_create(path);
67514+
67515+ curracl = current->acl;
67516+
67517+ if (match) {
67518+ read_lock(&gr_inode_lock);
67519+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
67520+ read_unlock(&gr_inode_lock);
67521+
67522+ if (matchpo) {
67523+ preempt_enable();
67524+ return matchpo;
67525+ }
67526+ }
67527+
67528+ // lookup parent
67529+
67530+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
67531+
67532+ preempt_enable();
67533+ return matchpo;
67534+}
67535+
67536+__u32
67537+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
67538+ const struct vfsmount * mnt, const __u32 mode)
67539+{
67540+ struct acl_object_label *matchpo;
67541+ __u32 retval;
67542+
67543+ if (unlikely(!(gr_status & GR_READY)))
67544+ return (mode & ~GR_AUDITS);
67545+
67546+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
67547+
67548+ retval = matchpo->mode & mode;
67549+
67550+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
67551+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
67552+ __u32 new_mode = mode;
67553+
67554+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
67555+
67556+ gr_log_learn(new_dentry, mnt, new_mode);
67557+ return new_mode;
67558+ }
67559+
67560+ return retval;
67561+}
67562+
67563+__u32
67564+gr_check_link(const struct dentry * new_dentry,
67565+ const struct dentry * parent_dentry,
67566+ const struct vfsmount * parent_mnt,
67567+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
67568+{
67569+ struct acl_object_label *obj;
67570+ __u32 oldmode, newmode;
67571+ __u32 needmode;
67572+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
67573+ GR_DELETE | GR_INHERIT;
67574+
67575+ if (unlikely(!(gr_status & GR_READY)))
67576+ return (GR_CREATE | GR_LINK);
67577+
67578+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
67579+ oldmode = obj->mode;
67580+
67581+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
67582+ newmode = obj->mode;
67583+
67584+ needmode = newmode & checkmodes;
67585+
67586+ // old name for hardlink must have at least the permissions of the new name
67587+ if ((oldmode & needmode) != needmode)
67588+ goto bad;
67589+
67590+ // if old name had restrictions/auditing, make sure the new name does as well
67591+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
67592+
67593+ // don't allow hardlinking of suid/sgid/fcapped files without permission
67594+ if (is_privileged_binary(old_dentry))
67595+ needmode |= GR_SETID;
67596+
67597+ if ((newmode & needmode) != needmode)
67598+ goto bad;
67599+
67600+ // enforce minimum permissions
67601+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
67602+ return newmode;
67603+bad:
67604+ needmode = oldmode;
67605+ if (is_privileged_binary(old_dentry))
67606+ needmode |= GR_SETID;
67607+
67608+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
67609+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
67610+ return (GR_CREATE | GR_LINK);
67611+ } else if (newmode & GR_SUPPRESS)
67612+ return GR_SUPPRESS;
67613+ else
67614+ return 0;
67615+}
67616+
67617+int
67618+gr_check_hidden_task(const struct task_struct *task)
67619+{
67620+ if (unlikely(!(gr_status & GR_READY)))
67621+ return 0;
67622+
67623+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
67624+ return 1;
67625+
67626+ return 0;
67627+}
67628+
67629+int
67630+gr_check_protected_task(const struct task_struct *task)
67631+{
67632+ if (unlikely(!(gr_status & GR_READY) || !task))
67633+ return 0;
67634+
67635+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
67636+ task->acl != current->acl)
67637+ return 1;
67638+
67639+ return 0;
67640+}
67641+
67642+int
67643+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
67644+{
67645+ struct task_struct *p;
67646+ int ret = 0;
67647+
67648+ if (unlikely(!(gr_status & GR_READY) || !pid))
67649+ return ret;
67650+
67651+ read_lock(&tasklist_lock);
67652+ do_each_pid_task(pid, type, p) {
67653+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
67654+ p->acl != current->acl) {
67655+ ret = 1;
67656+ goto out;
67657+ }
67658+ } while_each_pid_task(pid, type, p);
67659+out:
67660+ read_unlock(&tasklist_lock);
67661+
67662+ return ret;
67663+}
67664+
67665+void
67666+gr_copy_label(struct task_struct *tsk)
67667+{
67668+ struct task_struct *p = current;
67669+
67670+ tsk->inherited = p->inherited;
67671+ tsk->acl_sp_role = 0;
67672+ tsk->acl_role_id = p->acl_role_id;
67673+ tsk->acl = p->acl;
67674+ tsk->role = p->role;
67675+ tsk->signal->used_accept = 0;
67676+ tsk->signal->curr_ip = p->signal->curr_ip;
67677+ tsk->signal->saved_ip = p->signal->saved_ip;
67678+ if (p->exec_file)
67679+ get_file(p->exec_file);
67680+ tsk->exec_file = p->exec_file;
67681+ tsk->is_writable = p->is_writable;
67682+ if (unlikely(p->signal->used_accept)) {
67683+ p->signal->curr_ip = 0;
67684+ p->signal->saved_ip = 0;
67685+ }
67686+
67687+ return;
67688+}
67689+
67690+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
67691+
67692+int
67693+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
67694+{
67695+ unsigned int i;
67696+ __u16 num;
67697+ uid_t *uidlist;
67698+ uid_t curuid;
67699+ int realok = 0;
67700+ int effectiveok = 0;
67701+ int fsok = 0;
67702+ uid_t globalreal, globaleffective, globalfs;
67703+
67704+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
67705+ struct user_struct *user;
67706+
67707+ if (!uid_valid(real))
67708+ goto skipit;
67709+
67710+ /* find user based on global namespace */
67711+
67712+ globalreal = GR_GLOBAL_UID(real);
67713+
67714+ user = find_user(make_kuid(&init_user_ns, globalreal));
67715+ if (user == NULL)
67716+ goto skipit;
67717+
67718+ if (gr_process_kernel_setuid_ban(user)) {
67719+ /* for find_user */
67720+ free_uid(user);
67721+ return 1;
67722+ }
67723+
67724+ /* for find_user */
67725+ free_uid(user);
67726+
67727+skipit:
67728+#endif
67729+
67730+ if (unlikely(!(gr_status & GR_READY)))
67731+ return 0;
67732+
67733+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
67734+ gr_log_learn_uid_change(real, effective, fs);
67735+
67736+ num = current->acl->user_trans_num;
67737+ uidlist = current->acl->user_transitions;
67738+
67739+ if (uidlist == NULL)
67740+ return 0;
67741+
67742+ if (!uid_valid(real)) {
67743+ realok = 1;
67744+ globalreal = (uid_t)-1;
67745+ } else {
67746+ globalreal = GR_GLOBAL_UID(real);
67747+ }
67748+ if (!uid_valid(effective)) {
67749+ effectiveok = 1;
67750+ globaleffective = (uid_t)-1;
67751+ } else {
67752+ globaleffective = GR_GLOBAL_UID(effective);
67753+ }
67754+ if (!uid_valid(fs)) {
67755+ fsok = 1;
67756+ globalfs = (uid_t)-1;
67757+ } else {
67758+ globalfs = GR_GLOBAL_UID(fs);
67759+ }
67760+
67761+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
67762+ for (i = 0; i < num; i++) {
67763+ curuid = uidlist[i];
67764+ if (globalreal == curuid)
67765+ realok = 1;
67766+ if (globaleffective == curuid)
67767+ effectiveok = 1;
67768+ if (globalfs == curuid)
67769+ fsok = 1;
67770+ }
67771+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
67772+ for (i = 0; i < num; i++) {
67773+ curuid = uidlist[i];
67774+ if (globalreal == curuid)
67775+ break;
67776+ if (globaleffective == curuid)
67777+ break;
67778+ if (globalfs == curuid)
67779+ break;
67780+ }
67781+ /* not in deny list */
67782+ if (i == num) {
67783+ realok = 1;
67784+ effectiveok = 1;
67785+ fsok = 1;
67786+ }
67787+ }
67788+
67789+ if (realok && effectiveok && fsok)
67790+ return 0;
67791+ else {
67792+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
67793+ return 1;
67794+ }
67795+}
67796+
67797+int
67798+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
67799+{
67800+ unsigned int i;
67801+ __u16 num;
67802+ gid_t *gidlist;
67803+ gid_t curgid;
67804+ int realok = 0;
67805+ int effectiveok = 0;
67806+ int fsok = 0;
67807+ gid_t globalreal, globaleffective, globalfs;
67808+
67809+ if (unlikely(!(gr_status & GR_READY)))
67810+ return 0;
67811+
67812+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
67813+ gr_log_learn_gid_change(real, effective, fs);
67814+
67815+ num = current->acl->group_trans_num;
67816+ gidlist = current->acl->group_transitions;
67817+
67818+ if (gidlist == NULL)
67819+ return 0;
67820+
67821+ if (!gid_valid(real)) {
67822+ realok = 1;
67823+ globalreal = (gid_t)-1;
67824+ } else {
67825+ globalreal = GR_GLOBAL_GID(real);
67826+ }
67827+ if (!gid_valid(effective)) {
67828+ effectiveok = 1;
67829+ globaleffective = (gid_t)-1;
67830+ } else {
67831+ globaleffective = GR_GLOBAL_GID(effective);
67832+ }
67833+ if (!gid_valid(fs)) {
67834+ fsok = 1;
67835+ globalfs = (gid_t)-1;
67836+ } else {
67837+ globalfs = GR_GLOBAL_GID(fs);
67838+ }
67839+
67840+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
67841+ for (i = 0; i < num; i++) {
67842+ curgid = gidlist[i];
67843+ if (globalreal == curgid)
67844+ realok = 1;
67845+ if (globaleffective == curgid)
67846+ effectiveok = 1;
67847+ if (globalfs == curgid)
67848+ fsok = 1;
67849+ }
67850+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
67851+ for (i = 0; i < num; i++) {
67852+ curgid = gidlist[i];
67853+ if (globalreal == curgid)
67854+ break;
67855+ if (globaleffective == curgid)
67856+ break;
67857+ if (globalfs == curgid)
67858+ break;
67859+ }
67860+ /* not in deny list */
67861+ if (i == num) {
67862+ realok = 1;
67863+ effectiveok = 1;
67864+ fsok = 1;
67865+ }
67866+ }
67867+
67868+ if (realok && effectiveok && fsok)
67869+ return 0;
67870+ else {
67871+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
67872+ return 1;
67873+ }
67874+}
67875+
67876+extern int gr_acl_is_capable(const int cap);
67877+
67878+void
67879+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
67880+{
67881+ struct acl_role_label *role = task->role;
67882+ struct acl_subject_label *subj = NULL;
67883+ struct acl_object_label *obj;
67884+ struct file *filp;
67885+ uid_t uid;
67886+ gid_t gid;
67887+
67888+ if (unlikely(!(gr_status & GR_READY)))
67889+ return;
67890+
67891+ uid = GR_GLOBAL_UID(kuid);
67892+ gid = GR_GLOBAL_GID(kgid);
67893+
67894+ filp = task->exec_file;
67895+
67896+ /* kernel process, we'll give them the kernel role */
67897+ if (unlikely(!filp)) {
67898+ task->role = running_polstate.kernel_role;
67899+ task->acl = running_polstate.kernel_role->root_label;
67900+ return;
67901+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
67902+ /* save the current ip at time of role lookup so that the proper
67903+ IP will be learned for role_allowed_ip */
67904+ task->signal->saved_ip = task->signal->curr_ip;
67905+ role = lookup_acl_role_label(task, uid, gid);
67906+ }
67907+
67908+ /* don't change the role if we're not a privileged process */
67909+ if (role && task->role != role &&
67910+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
67911+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
67912+ return;
67913+
67914+ /* perform subject lookup in possibly new role
67915+ we can use this result below in the case where role == task->role
67916+ */
67917+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
67918+
67919+ /* if we changed uid/gid, but result in the same role
67920+ and are using inheritance, don't lose the inherited subject
67921+ if current subject is other than what normal lookup
67922+ would result in, we arrived via inheritance, don't
67923+ lose subject
67924+ */
67925+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
67926+ (subj == task->acl)))
67927+ task->acl = subj;
67928+
67929+ /* leave task->inherited unaffected */
67930+
67931+ task->role = role;
67932+
67933+ task->is_writable = 0;
67934+
67935+ /* ignore additional mmap checks for processes that are writable
67936+ by the default ACL */
67937+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
67938+ if (unlikely(obj->mode & GR_WRITE))
67939+ task->is_writable = 1;
67940+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
67941+ if (unlikely(obj->mode & GR_WRITE))
67942+ task->is_writable = 1;
67943+
67944+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
67945+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
67946+#endif
67947+
67948+ gr_set_proc_res(task);
67949+
67950+ return;
67951+}
67952+
67953+int
67954+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
67955+ const int unsafe_flags)
67956+{
67957+ struct task_struct *task = current;
67958+ struct acl_subject_label *newacl;
67959+ struct acl_object_label *obj;
67960+ __u32 retmode;
67961+
67962+ if (unlikely(!(gr_status & GR_READY)))
67963+ return 0;
67964+
67965+ newacl = chk_subj_label(dentry, mnt, task->role);
67966+
67967+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
67968+ did an exec
67969+ */
67970+ rcu_read_lock();
67971+ read_lock(&tasklist_lock);
67972+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
67973+ (task->parent->acl->mode & GR_POVERRIDE))) {
67974+ read_unlock(&tasklist_lock);
67975+ rcu_read_unlock();
67976+ goto skip_check;
67977+ }
67978+ read_unlock(&tasklist_lock);
67979+ rcu_read_unlock();
67980+
67981+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
67982+ !(task->role->roletype & GR_ROLE_GOD) &&
67983+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
67984+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
67985+ if (unsafe_flags & LSM_UNSAFE_SHARE)
67986+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
67987+ else
67988+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
67989+ return -EACCES;
67990+ }
67991+
67992+skip_check:
67993+
67994+ obj = chk_obj_label(dentry, mnt, task->acl);
67995+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
67996+
67997+ if (!(task->acl->mode & GR_INHERITLEARN) &&
67998+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
67999+ if (obj->nested)
68000+ task->acl = obj->nested;
68001+ else
68002+ task->acl = newacl;
68003+ task->inherited = 0;
68004+ } else {
68005+ task->inherited = 1;
68006+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
68007+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
68008+ }
68009+
68010+ task->is_writable = 0;
68011+
68012+ /* ignore additional mmap checks for processes that are writable
68013+ by the default ACL */
68014+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
68015+ if (unlikely(obj->mode & GR_WRITE))
68016+ task->is_writable = 1;
68017+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
68018+ if (unlikely(obj->mode & GR_WRITE))
68019+ task->is_writable = 1;
68020+
68021+ gr_set_proc_res(task);
68022+
68023+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
68024+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
68025+#endif
68026+ return 0;
68027+}
68028+
68029+/* always called with valid inodev ptr */
68030+static void
68031+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
68032+{
68033+ struct acl_object_label *matchpo;
68034+ struct acl_subject_label *matchps;
68035+ struct acl_subject_label *subj;
68036+ struct acl_role_label *role;
68037+ unsigned int x;
68038+
68039+ FOR_EACH_ROLE_START(role)
68040+ FOR_EACH_SUBJECT_START(role, subj, x)
68041+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
68042+ matchpo->mode |= GR_DELETED;
68043+ FOR_EACH_SUBJECT_END(subj,x)
68044+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
68045+ /* nested subjects aren't in the role's subj_hash table */
68046+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
68047+ matchpo->mode |= GR_DELETED;
68048+ FOR_EACH_NESTED_SUBJECT_END(subj)
68049+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
68050+ matchps->mode |= GR_DELETED;
68051+ FOR_EACH_ROLE_END(role)
68052+
68053+ inodev->nentry->deleted = 1;
68054+
68055+ return;
68056+}
68057+
68058+void
68059+gr_handle_delete(const ino_t ino, const dev_t dev)
68060+{
68061+ struct inodev_entry *inodev;
68062+
68063+ if (unlikely(!(gr_status & GR_READY)))
68064+ return;
68065+
68066+ write_lock(&gr_inode_lock);
68067+ inodev = lookup_inodev_entry(ino, dev);
68068+ if (inodev != NULL)
68069+ do_handle_delete(inodev, ino, dev);
68070+ write_unlock(&gr_inode_lock);
68071+
68072+ return;
68073+}
68074+
68075+static void
68076+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
68077+ const ino_t newinode, const dev_t newdevice,
68078+ struct acl_subject_label *subj)
68079+{
68080+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
68081+ struct acl_object_label *match;
68082+
68083+ match = subj->obj_hash[index];
68084+
68085+ while (match && (match->inode != oldinode ||
68086+ match->device != olddevice ||
68087+ !(match->mode & GR_DELETED)))
68088+ match = match->next;
68089+
68090+ if (match && (match->inode == oldinode)
68091+ && (match->device == olddevice)
68092+ && (match->mode & GR_DELETED)) {
68093+ if (match->prev == NULL) {
68094+ subj->obj_hash[index] = match->next;
68095+ if (match->next != NULL)
68096+ match->next->prev = NULL;
68097+ } else {
68098+ match->prev->next = match->next;
68099+ if (match->next != NULL)
68100+ match->next->prev = match->prev;
68101+ }
68102+ match->prev = NULL;
68103+ match->next = NULL;
68104+ match->inode = newinode;
68105+ match->device = newdevice;
68106+ match->mode &= ~GR_DELETED;
68107+
68108+ insert_acl_obj_label(match, subj);
68109+ }
68110+
68111+ return;
68112+}
68113+
68114+static void
68115+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
68116+ const ino_t newinode, const dev_t newdevice,
68117+ struct acl_role_label *role)
68118+{
68119+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
68120+ struct acl_subject_label *match;
68121+
68122+ match = role->subj_hash[index];
68123+
68124+ while (match && (match->inode != oldinode ||
68125+ match->device != olddevice ||
68126+ !(match->mode & GR_DELETED)))
68127+ match = match->next;
68128+
68129+ if (match && (match->inode == oldinode)
68130+ && (match->device == olddevice)
68131+ && (match->mode & GR_DELETED)) {
68132+ if (match->prev == NULL) {
68133+ role->subj_hash[index] = match->next;
68134+ if (match->next != NULL)
68135+ match->next->prev = NULL;
68136+ } else {
68137+ match->prev->next = match->next;
68138+ if (match->next != NULL)
68139+ match->next->prev = match->prev;
68140+ }
68141+ match->prev = NULL;
68142+ match->next = NULL;
68143+ match->inode = newinode;
68144+ match->device = newdevice;
68145+ match->mode &= ~GR_DELETED;
68146+
68147+ insert_acl_subj_label(match, role);
68148+ }
68149+
68150+ return;
68151+}
68152+
68153+static void
68154+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
68155+ const ino_t newinode, const dev_t newdevice)
68156+{
68157+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
68158+ struct inodev_entry *match;
68159+
68160+ match = running_polstate.inodev_set.i_hash[index];
68161+
68162+ while (match && (match->nentry->inode != oldinode ||
68163+ match->nentry->device != olddevice || !match->nentry->deleted))
68164+ match = match->next;
68165+
68166+ if (match && (match->nentry->inode == oldinode)
68167+ && (match->nentry->device == olddevice) &&
68168+ match->nentry->deleted) {
68169+ if (match->prev == NULL) {
68170+ running_polstate.inodev_set.i_hash[index] = match->next;
68171+ if (match->next != NULL)
68172+ match->next->prev = NULL;
68173+ } else {
68174+ match->prev->next = match->next;
68175+ if (match->next != NULL)
68176+ match->next->prev = match->prev;
68177+ }
68178+ match->prev = NULL;
68179+ match->next = NULL;
68180+ match->nentry->inode = newinode;
68181+ match->nentry->device = newdevice;
68182+ match->nentry->deleted = 0;
68183+
68184+ insert_inodev_entry(match);
68185+ }
68186+
68187+ return;
68188+}
68189+
68190+static void
68191+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
68192+{
68193+ struct acl_subject_label *subj;
68194+ struct acl_role_label *role;
68195+ unsigned int x;
68196+
68197+ FOR_EACH_ROLE_START(role)
68198+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
68199+
68200+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
68201+ if ((subj->inode == ino) && (subj->device == dev)) {
68202+ subj->inode = ino;
68203+ subj->device = dev;
68204+ }
68205+ /* nested subjects aren't in the role's subj_hash table */
68206+ update_acl_obj_label(matchn->inode, matchn->device,
68207+ ino, dev, subj);
68208+ FOR_EACH_NESTED_SUBJECT_END(subj)
68209+ FOR_EACH_SUBJECT_START(role, subj, x)
68210+ update_acl_obj_label(matchn->inode, matchn->device,
68211+ ino, dev, subj);
68212+ FOR_EACH_SUBJECT_END(subj,x)
68213+ FOR_EACH_ROLE_END(role)
68214+
68215+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
68216+
68217+ return;
68218+}
68219+
68220+static void
68221+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
68222+ const struct vfsmount *mnt)
68223+{
68224+ ino_t ino = dentry->d_inode->i_ino;
68225+ dev_t dev = __get_dev(dentry);
68226+
68227+ __do_handle_create(matchn, ino, dev);
68228+
68229+ return;
68230+}
68231+
68232+void
68233+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
68234+{
68235+ struct name_entry *matchn;
68236+
68237+ if (unlikely(!(gr_status & GR_READY)))
68238+ return;
68239+
68240+ preempt_disable();
68241+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
68242+
68243+ if (unlikely((unsigned long)matchn)) {
68244+ write_lock(&gr_inode_lock);
68245+ do_handle_create(matchn, dentry, mnt);
68246+ write_unlock(&gr_inode_lock);
68247+ }
68248+ preempt_enable();
68249+
68250+ return;
68251+}
68252+
68253+void
68254+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
68255+{
68256+ struct name_entry *matchn;
68257+
68258+ if (unlikely(!(gr_status & GR_READY)))
68259+ return;
68260+
68261+ preempt_disable();
68262+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
68263+
68264+ if (unlikely((unsigned long)matchn)) {
68265+ write_lock(&gr_inode_lock);
68266+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
68267+ write_unlock(&gr_inode_lock);
68268+ }
68269+ preempt_enable();
68270+
68271+ return;
68272+}
68273+
68274+void
68275+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68276+ struct dentry *old_dentry,
68277+ struct dentry *new_dentry,
68278+ struct vfsmount *mnt, const __u8 replace)
68279+{
68280+ struct name_entry *matchn;
68281+ struct inodev_entry *inodev;
68282+ struct inode *inode = new_dentry->d_inode;
68283+ ino_t old_ino = old_dentry->d_inode->i_ino;
68284+ dev_t old_dev = __get_dev(old_dentry);
68285+
68286+ /* vfs_rename swaps the name and parent link for old_dentry and
68287+ new_dentry
68288+ at this point, old_dentry has the new name, parent link, and inode
68289+ for the renamed file
68290+ if a file is being replaced by a rename, new_dentry has the inode
68291+ and name for the replaced file
68292+ */
68293+
68294+ if (unlikely(!(gr_status & GR_READY)))
68295+ return;
68296+
68297+ preempt_disable();
68298+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
68299+
68300+ /* we wouldn't have to check d_inode if it weren't for
68301+ NFS silly-renaming
68302+ */
68303+
68304+ write_lock(&gr_inode_lock);
68305+ if (unlikely(replace && inode)) {
68306+ ino_t new_ino = inode->i_ino;
68307+ dev_t new_dev = __get_dev(new_dentry);
68308+
68309+ inodev = lookup_inodev_entry(new_ino, new_dev);
68310+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
68311+ do_handle_delete(inodev, new_ino, new_dev);
68312+ }
68313+
68314+ inodev = lookup_inodev_entry(old_ino, old_dev);
68315+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
68316+ do_handle_delete(inodev, old_ino, old_dev);
68317+
68318+ if (unlikely((unsigned long)matchn))
68319+ do_handle_create(matchn, old_dentry, mnt);
68320+
68321+ write_unlock(&gr_inode_lock);
68322+ preempt_enable();
68323+
68324+ return;
68325+}
68326+
68327+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
68328+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
68329+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
68330+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
68331+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
68332+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
68333+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
68334+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
68335+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
68336+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
68337+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
68338+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
68339+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
68340+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
68341+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
68342+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
68343+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
68344+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
68345+};
68346+
68347+void
68348+gr_learn_resource(const struct task_struct *task,
68349+ const int res, const unsigned long wanted, const int gt)
68350+{
68351+ struct acl_subject_label *acl;
68352+ const struct cred *cred;
68353+
68354+ if (unlikely((gr_status & GR_READY) &&
68355+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
68356+ goto skip_reslog;
68357+
68358+ gr_log_resource(task, res, wanted, gt);
68359+skip_reslog:
68360+
68361+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
68362+ return;
68363+
68364+ acl = task->acl;
68365+
68366+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
68367+ !(acl->resmask & (1U << (unsigned short) res))))
68368+ return;
68369+
68370+ if (wanted >= acl->res[res].rlim_cur) {
68371+ unsigned long res_add;
68372+
68373+ res_add = wanted + res_learn_bumps[res];
68374+
68375+ acl->res[res].rlim_cur = res_add;
68376+
68377+ if (wanted > acl->res[res].rlim_max)
68378+ acl->res[res].rlim_max = res_add;
68379+
68380+ /* only log the subject filename, since resource logging is supported for
68381+ single-subject learning only */
68382+ rcu_read_lock();
68383+ cred = __task_cred(task);
68384+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
68385+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
68386+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
68387+ "", (unsigned long) res, &task->signal->saved_ip);
68388+ rcu_read_unlock();
68389+ }
68390+
68391+ return;
68392+}
68393+EXPORT_SYMBOL_GPL(gr_learn_resource);
68394+#endif
68395+
68396+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
68397+void
68398+pax_set_initial_flags(struct linux_binprm *bprm)
68399+{
68400+ struct task_struct *task = current;
68401+ struct acl_subject_label *proc;
68402+ unsigned long flags;
68403+
68404+ if (unlikely(!(gr_status & GR_READY)))
68405+ return;
68406+
68407+ flags = pax_get_flags(task);
68408+
68409+ proc = task->acl;
68410+
68411+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
68412+ flags &= ~MF_PAX_PAGEEXEC;
68413+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
68414+ flags &= ~MF_PAX_SEGMEXEC;
68415+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
68416+ flags &= ~MF_PAX_RANDMMAP;
68417+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
68418+ flags &= ~MF_PAX_EMUTRAMP;
68419+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
68420+ flags &= ~MF_PAX_MPROTECT;
68421+
68422+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
68423+ flags |= MF_PAX_PAGEEXEC;
68424+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
68425+ flags |= MF_PAX_SEGMEXEC;
68426+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
68427+ flags |= MF_PAX_RANDMMAP;
68428+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
68429+ flags |= MF_PAX_EMUTRAMP;
68430+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
68431+ flags |= MF_PAX_MPROTECT;
68432+
68433+ pax_set_flags(task, flags);
68434+
68435+ return;
68436+}
68437+#endif
68438+
68439+int
68440+gr_handle_proc_ptrace(struct task_struct *task)
68441+{
68442+ struct file *filp;
68443+ struct task_struct *tmp = task;
68444+ struct task_struct *curtemp = current;
68445+ __u32 retmode;
68446+
68447+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
68448+ if (unlikely(!(gr_status & GR_READY)))
68449+ return 0;
68450+#endif
68451+
68452+ read_lock(&tasklist_lock);
68453+ read_lock(&grsec_exec_file_lock);
68454+ filp = task->exec_file;
68455+
68456+ while (task_pid_nr(tmp) > 0) {
68457+ if (tmp == curtemp)
68458+ break;
68459+ tmp = tmp->real_parent;
68460+ }
68461+
68462+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
68463+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
68464+ read_unlock(&grsec_exec_file_lock);
68465+ read_unlock(&tasklist_lock);
68466+ return 1;
68467+ }
68468+
68469+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
68470+ if (!(gr_status & GR_READY)) {
68471+ read_unlock(&grsec_exec_file_lock);
68472+ read_unlock(&tasklist_lock);
68473+ return 0;
68474+ }
68475+#endif
68476+
68477+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
68478+ read_unlock(&grsec_exec_file_lock);
68479+ read_unlock(&tasklist_lock);
68480+
68481+ if (retmode & GR_NOPTRACE)
68482+ return 1;
68483+
68484+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
68485+ && (current->acl != task->acl || (current->acl != current->role->root_label
68486+ && task_pid_nr(current) != task_pid_nr(task))))
68487+ return 1;
68488+
68489+ return 0;
68490+}
68491+
68492+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
68493+{
68494+ if (unlikely(!(gr_status & GR_READY)))
68495+ return;
68496+
68497+ if (!(current->role->roletype & GR_ROLE_GOD))
68498+ return;
68499+
68500+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
68501+ p->role->rolename, gr_task_roletype_to_char(p),
68502+ p->acl->filename);
68503+}
68504+
68505+int
68506+gr_handle_ptrace(struct task_struct *task, const long request)
68507+{
68508+ struct task_struct *tmp = task;
68509+ struct task_struct *curtemp = current;
68510+ __u32 retmode;
68511+
68512+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
68513+ if (unlikely(!(gr_status & GR_READY)))
68514+ return 0;
68515+#endif
68516+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68517+ read_lock(&tasklist_lock);
68518+ while (task_pid_nr(tmp) > 0) {
68519+ if (tmp == curtemp)
68520+ break;
68521+ tmp = tmp->real_parent;
68522+ }
68523+
68524+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
68525+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
68526+ read_unlock(&tasklist_lock);
68527+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
68528+ return 1;
68529+ }
68530+ read_unlock(&tasklist_lock);
68531+ }
68532+
68533+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
68534+ if (!(gr_status & GR_READY))
68535+ return 0;
68536+#endif
68537+
68538+ read_lock(&grsec_exec_file_lock);
68539+ if (unlikely(!task->exec_file)) {
68540+ read_unlock(&grsec_exec_file_lock);
68541+ return 0;
68542+ }
68543+
68544+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
68545+ read_unlock(&grsec_exec_file_lock);
68546+
68547+ if (retmode & GR_NOPTRACE) {
68548+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
68549+ return 1;
68550+ }
68551+
68552+ if (retmode & GR_PTRACERD) {
68553+ switch (request) {
68554+ case PTRACE_SEIZE:
68555+ case PTRACE_POKETEXT:
68556+ case PTRACE_POKEDATA:
68557+ case PTRACE_POKEUSR:
68558+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
68559+ case PTRACE_SETREGS:
68560+ case PTRACE_SETFPREGS:
68561+#endif
68562+#ifdef CONFIG_X86
68563+ case PTRACE_SETFPXREGS:
68564+#endif
68565+#ifdef CONFIG_ALTIVEC
68566+ case PTRACE_SETVRREGS:
68567+#endif
68568+ return 1;
68569+ default:
68570+ return 0;
68571+ }
68572+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
68573+ !(current->role->roletype & GR_ROLE_GOD) &&
68574+ (current->acl != task->acl)) {
68575+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
68576+ return 1;
68577+ }
68578+
68579+ return 0;
68580+}
68581+
68582+static int is_writable_mmap(const struct file *filp)
68583+{
68584+ struct task_struct *task = current;
68585+ struct acl_object_label *obj, *obj2;
68586+
68587+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
68588+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
68589+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
68590+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
68591+ task->role->root_label);
68592+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
68593+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
68594+ return 1;
68595+ }
68596+ }
68597+ return 0;
68598+}
68599+
68600+int
68601+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
68602+{
68603+ __u32 mode;
68604+
68605+ if (unlikely(!file || !(prot & PROT_EXEC)))
68606+ return 1;
68607+
68608+ if (is_writable_mmap(file))
68609+ return 0;
68610+
68611+ mode =
68612+ gr_search_file(file->f_path.dentry,
68613+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
68614+ file->f_path.mnt);
68615+
68616+ if (!gr_tpe_allow(file))
68617+ return 0;
68618+
68619+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
68620+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
68621+ return 0;
68622+ } else if (unlikely(!(mode & GR_EXEC))) {
68623+ return 0;
68624+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
68625+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
68626+ return 1;
68627+ }
68628+
68629+ return 1;
68630+}
68631+
68632+int
68633+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
68634+{
68635+ __u32 mode;
68636+
68637+ if (unlikely(!file || !(prot & PROT_EXEC)))
68638+ return 1;
68639+
68640+ if (is_writable_mmap(file))
68641+ return 0;
68642+
68643+ mode =
68644+ gr_search_file(file->f_path.dentry,
68645+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
68646+ file->f_path.mnt);
68647+
68648+ if (!gr_tpe_allow(file))
68649+ return 0;
68650+
68651+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
68652+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
68653+ return 0;
68654+ } else if (unlikely(!(mode & GR_EXEC))) {
68655+ return 0;
68656+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
68657+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
68658+ return 1;
68659+ }
68660+
68661+ return 1;
68662+}
68663+
68664+void
68665+gr_acl_handle_psacct(struct task_struct *task, const long code)
68666+{
68667+ unsigned long runtime, cputime;
68668+ cputime_t utime, stime;
68669+ unsigned int wday, cday;
68670+ __u8 whr, chr;
68671+ __u8 wmin, cmin;
68672+ __u8 wsec, csec;
68673+ struct timespec timeval;
68674+
68675+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
68676+ !(task->acl->mode & GR_PROCACCT)))
68677+ return;
68678+
68679+ do_posix_clock_monotonic_gettime(&timeval);
68680+ runtime = timeval.tv_sec - task->start_time.tv_sec;
68681+ wday = runtime / (60 * 60 * 24);
68682+ runtime -= wday * (60 * 60 * 24);
68683+ whr = runtime / (60 * 60);
68684+ runtime -= whr * (60 * 60);
68685+ wmin = runtime / 60;
68686+ runtime -= wmin * 60;
68687+ wsec = runtime;
68688+
68689+ task_cputime(task, &utime, &stime);
68690+ cputime = cputime_to_secs(utime + stime);
68691+ cday = cputime / (60 * 60 * 24);
68692+ cputime -= cday * (60 * 60 * 24);
68693+ chr = cputime / (60 * 60);
68694+ cputime -= chr * (60 * 60);
68695+ cmin = cputime / 60;
68696+ cputime -= cmin * 60;
68697+ csec = cputime;
68698+
68699+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
68700+
68701+ return;
68702+}
68703+
68704+#ifdef CONFIG_TASKSTATS
68705+int gr_is_taskstats_denied(int pid)
68706+{
68707+ struct task_struct *task;
68708+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68709+ const struct cred *cred;
68710+#endif
68711+ int ret = 0;
68712+
68713+ /* restrict taskstats viewing to un-chrooted root users
68714+ who have the 'view' subject flag if the RBAC system is enabled
68715+ */
68716+
68717+ rcu_read_lock();
68718+ read_lock(&tasklist_lock);
68719+ task = find_task_by_vpid(pid);
68720+ if (task) {
68721+#ifdef CONFIG_GRKERNSEC_CHROOT
68722+ if (proc_is_chrooted(task))
68723+ ret = -EACCES;
68724+#endif
68725+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68726+ cred = __task_cred(task);
68727+#ifdef CONFIG_GRKERNSEC_PROC_USER
68728+ if (gr_is_global_nonroot(cred->uid))
68729+ ret = -EACCES;
68730+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68731+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
68732+ ret = -EACCES;
68733+#endif
68734+#endif
68735+ if (gr_status & GR_READY) {
68736+ if (!(task->acl->mode & GR_VIEW))
68737+ ret = -EACCES;
68738+ }
68739+ } else
68740+ ret = -ENOENT;
68741+
68742+ read_unlock(&tasklist_lock);
68743+ rcu_read_unlock();
68744+
68745+ return ret;
68746+}
68747+#endif
68748+
68749+/* AUXV entries are filled via a descendant of search_binary_handler
68750+ after we've already applied the subject for the target
68751+*/
68752+int gr_acl_enable_at_secure(void)
68753+{
68754+ if (unlikely(!(gr_status & GR_READY)))
68755+ return 0;
68756+
68757+ if (current->acl->mode & GR_ATSECURE)
68758+ return 1;
68759+
68760+ return 0;
68761+}
68762+
68763+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
68764+{
68765+ struct task_struct *task = current;
68766+ struct dentry *dentry = file->f_path.dentry;
68767+ struct vfsmount *mnt = file->f_path.mnt;
68768+ struct acl_object_label *obj, *tmp;
68769+ struct acl_subject_label *subj;
68770+ unsigned int bufsize;
68771+ int is_not_root;
68772+ char *path;
68773+ dev_t dev = __get_dev(dentry);
68774+
68775+ if (unlikely(!(gr_status & GR_READY)))
68776+ return 1;
68777+
68778+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
68779+ return 1;
68780+
68781+ /* ignore Eric Biederman */
68782+ if (IS_PRIVATE(dentry->d_inode))
68783+ return 1;
68784+
68785+ subj = task->acl;
68786+ read_lock(&gr_inode_lock);
68787+ do {
68788+ obj = lookup_acl_obj_label(ino, dev, subj);
68789+ if (obj != NULL) {
68790+ read_unlock(&gr_inode_lock);
68791+ return (obj->mode & GR_FIND) ? 1 : 0;
68792+ }
68793+ } while ((subj = subj->parent_subject));
68794+ read_unlock(&gr_inode_lock);
68795+
68796+ /* this is purely an optimization since we're looking for an object
68797+ for the directory we're doing a readdir on
68798+ if it's possible for any globbed object to match the entry we're
68799+ filling into the directory, then the object we find here will be
68800+ an anchor point with attached globbed objects
68801+ */
68802+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
68803+ if (obj->globbed == NULL)
68804+ return (obj->mode & GR_FIND) ? 1 : 0;
68805+
68806+ is_not_root = ((obj->filename[0] == '/') &&
68807+ (obj->filename[1] == '\0')) ? 0 : 1;
68808+ bufsize = PAGE_SIZE - namelen - is_not_root;
68809+
68810+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
68811+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
68812+ return 1;
68813+
68814+ preempt_disable();
68815+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
68816+ bufsize);
68817+
68818+ bufsize = strlen(path);
68819+
68820+ /* if base is "/", don't append an additional slash */
68821+ if (is_not_root)
68822+ *(path + bufsize) = '/';
68823+ memcpy(path + bufsize + is_not_root, name, namelen);
68824+ *(path + bufsize + namelen + is_not_root) = '\0';
68825+
68826+ tmp = obj->globbed;
68827+ while (tmp) {
68828+ if (!glob_match(tmp->filename, path)) {
68829+ preempt_enable();
68830+ return (tmp->mode & GR_FIND) ? 1 : 0;
68831+ }
68832+ tmp = tmp->next;
68833+ }
68834+ preempt_enable();
68835+ return (obj->mode & GR_FIND) ? 1 : 0;
68836+}
68837+
68838+void gr_put_exec_file(struct task_struct *task)
68839+{
68840+ struct file *filp;
68841+
68842+ write_lock(&grsec_exec_file_lock);
68843+ filp = task->exec_file;
68844+ task->exec_file = NULL;
68845+ write_unlock(&grsec_exec_file_lock);
68846+
68847+ if (filp)
68848+ fput(filp);
68849+
68850+ return;
68851+}
68852+
68853+
68854+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
68855+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
68856+#endif
68857+#ifdef CONFIG_SECURITY
68858+EXPORT_SYMBOL_GPL(gr_check_user_change);
68859+EXPORT_SYMBOL_GPL(gr_check_group_change);
68860+#endif
68861+
68862diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
68863new file mode 100644
68864index 0000000..18ffbbd
68865--- /dev/null
68866+++ b/grsecurity/gracl_alloc.c
68867@@ -0,0 +1,105 @@
68868+#include <linux/kernel.h>
68869+#include <linux/mm.h>
68870+#include <linux/slab.h>
68871+#include <linux/vmalloc.h>
68872+#include <linux/gracl.h>
68873+#include <linux/grsecurity.h>
68874+
68875+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
68876+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
68877+
68878+static __inline__ int
68879+alloc_pop(void)
68880+{
68881+ if (current_alloc_state->alloc_stack_next == 1)
68882+ return 0;
68883+
68884+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
68885+
68886+ current_alloc_state->alloc_stack_next--;
68887+
68888+ return 1;
68889+}
68890+
68891+static __inline__ int
68892+alloc_push(void *buf)
68893+{
68894+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
68895+ return 1;
68896+
68897+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
68898+
68899+ current_alloc_state->alloc_stack_next++;
68900+
68901+ return 0;
68902+}
68903+
68904+void *
68905+acl_alloc(unsigned long len)
68906+{
68907+ void *ret = NULL;
68908+
68909+ if (!len || len > PAGE_SIZE)
68910+ goto out;
68911+
68912+ ret = kmalloc(len, GFP_KERNEL);
68913+
68914+ if (ret) {
68915+ if (alloc_push(ret)) {
68916+ kfree(ret);
68917+ ret = NULL;
68918+ }
68919+ }
68920+
68921+out:
68922+ return ret;
68923+}
68924+
68925+void *
68926+acl_alloc_num(unsigned long num, unsigned long len)
68927+{
68928+ if (!len || (num > (PAGE_SIZE / len)))
68929+ return NULL;
68930+
68931+ return acl_alloc(num * len);
68932+}
68933+
68934+void
68935+acl_free_all(void)
68936+{
68937+ if (!current_alloc_state->alloc_stack)
68938+ return;
68939+
68940+ while (alloc_pop()) ;
68941+
68942+ if (current_alloc_state->alloc_stack) {
68943+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
68944+ kfree(current_alloc_state->alloc_stack);
68945+ else
68946+ vfree(current_alloc_state->alloc_stack);
68947+ }
68948+
68949+ current_alloc_state->alloc_stack = NULL;
68950+ current_alloc_state->alloc_stack_size = 1;
68951+ current_alloc_state->alloc_stack_next = 1;
68952+
68953+ return;
68954+}
68955+
68956+int
68957+acl_alloc_stack_init(unsigned long size)
68958+{
68959+ if ((size * sizeof (void *)) <= PAGE_SIZE)
68960+ current_alloc_state->alloc_stack =
68961+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
68962+ else
68963+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
68964+
68965+ current_alloc_state->alloc_stack_size = size;
68966+ current_alloc_state->alloc_stack_next = 1;
68967+
68968+ if (!current_alloc_state->alloc_stack)
68969+ return 0;
68970+ else
68971+ return 1;
68972+}
68973diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
68974new file mode 100644
68975index 0000000..bdd51ea
68976--- /dev/null
68977+++ b/grsecurity/gracl_cap.c
68978@@ -0,0 +1,110 @@
68979+#include <linux/kernel.h>
68980+#include <linux/module.h>
68981+#include <linux/sched.h>
68982+#include <linux/gracl.h>
68983+#include <linux/grsecurity.h>
68984+#include <linux/grinternal.h>
68985+
68986+extern const char *captab_log[];
68987+extern int captab_log_entries;
68988+
68989+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
68990+{
68991+ struct acl_subject_label *curracl;
68992+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
68993+ kernel_cap_t cap_audit = __cap_empty_set;
68994+
68995+ if (!gr_acl_is_enabled())
68996+ return 1;
68997+
68998+ curracl = task->acl;
68999+
69000+ cap_drop = curracl->cap_lower;
69001+ cap_mask = curracl->cap_mask;
69002+ cap_audit = curracl->cap_invert_audit;
69003+
69004+ while ((curracl = curracl->parent_subject)) {
69005+ /* if the cap isn't specified in the current computed mask but is specified in the
69006+ current level subject, and is lowered in the current level subject, then add
69007+ it to the set of dropped capabilities
69008+ otherwise, add the current level subject's mask to the current computed mask
69009+ */
69010+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
69011+ cap_raise(cap_mask, cap);
69012+ if (cap_raised(curracl->cap_lower, cap))
69013+ cap_raise(cap_drop, cap);
69014+ if (cap_raised(curracl->cap_invert_audit, cap))
69015+ cap_raise(cap_audit, cap);
69016+ }
69017+ }
69018+
69019+ if (!cap_raised(cap_drop, cap)) {
69020+ if (cap_raised(cap_audit, cap))
69021+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
69022+ return 1;
69023+ }
69024+
69025+ curracl = task->acl;
69026+
69027+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
69028+ && cap_raised(cred->cap_effective, cap)) {
69029+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
69030+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
69031+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
69032+ gr_to_filename(task->exec_file->f_path.dentry,
69033+ task->exec_file->f_path.mnt) : curracl->filename,
69034+ curracl->filename, 0UL,
69035+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
69036+ return 1;
69037+ }
69038+
69039+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
69040+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
69041+
69042+ return 0;
69043+}
69044+
69045+int
69046+gr_acl_is_capable(const int cap)
69047+{
69048+ return gr_task_acl_is_capable(current, current_cred(), cap);
69049+}
69050+
69051+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
69052+{
69053+ struct acl_subject_label *curracl;
69054+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
69055+
69056+ if (!gr_acl_is_enabled())
69057+ return 1;
69058+
69059+ curracl = task->acl;
69060+
69061+ cap_drop = curracl->cap_lower;
69062+ cap_mask = curracl->cap_mask;
69063+
69064+ while ((curracl = curracl->parent_subject)) {
69065+ /* if the cap isn't specified in the current computed mask but is specified in the
69066+ current level subject, and is lowered in the current level subject, then add
69067+ it to the set of dropped capabilities
69068+ otherwise, add the current level subject's mask to the current computed mask
69069+ */
69070+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
69071+ cap_raise(cap_mask, cap);
69072+ if (cap_raised(curracl->cap_lower, cap))
69073+ cap_raise(cap_drop, cap);
69074+ }
69075+ }
69076+
69077+ if (!cap_raised(cap_drop, cap))
69078+ return 1;
69079+
69080+ return 0;
69081+}
69082+
69083+int
69084+gr_acl_is_capable_nolog(const int cap)
69085+{
69086+ return gr_task_acl_is_capable_nolog(current, cap);
69087+}
69088+
69089diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
69090new file mode 100644
69091index 0000000..ca25605
69092--- /dev/null
69093+++ b/grsecurity/gracl_compat.c
69094@@ -0,0 +1,270 @@
69095+#include <linux/kernel.h>
69096+#include <linux/gracl.h>
69097+#include <linux/compat.h>
69098+#include <linux/gracl_compat.h>
69099+
69100+#include <asm/uaccess.h>
69101+
69102+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
69103+{
69104+ struct gr_arg_wrapper_compat uwrapcompat;
69105+
69106+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
69107+ return -EFAULT;
69108+
69109+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
69110+ (uwrapcompat.version != 0x2901)) ||
69111+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
69112+ return -EINVAL;
69113+
69114+ uwrap->arg = compat_ptr(uwrapcompat.arg);
69115+ uwrap->version = uwrapcompat.version;
69116+ uwrap->size = sizeof(struct gr_arg);
69117+
69118+ return 0;
69119+}
69120+
69121+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
69122+{
69123+ struct gr_arg_compat argcompat;
69124+
69125+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
69126+ return -EFAULT;
69127+
69128+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
69129+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
69130+ arg->role_db.num_roles = argcompat.role_db.num_roles;
69131+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
69132+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
69133+ arg->role_db.num_objects = argcompat.role_db.num_objects;
69134+
69135+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
69136+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
69137+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
69138+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
69139+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
69140+ arg->segv_device = argcompat.segv_device;
69141+ arg->segv_inode = argcompat.segv_inode;
69142+ arg->segv_uid = argcompat.segv_uid;
69143+ arg->num_sprole_pws = argcompat.num_sprole_pws;
69144+ arg->mode = argcompat.mode;
69145+
69146+ return 0;
69147+}
69148+
69149+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
69150+{
69151+ struct acl_object_label_compat objcompat;
69152+
69153+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
69154+ return -EFAULT;
69155+
69156+ obj->filename = compat_ptr(objcompat.filename);
69157+ obj->inode = objcompat.inode;
69158+ obj->device = objcompat.device;
69159+ obj->mode = objcompat.mode;
69160+
69161+ obj->nested = compat_ptr(objcompat.nested);
69162+ obj->globbed = compat_ptr(objcompat.globbed);
69163+
69164+ obj->prev = compat_ptr(objcompat.prev);
69165+ obj->next = compat_ptr(objcompat.next);
69166+
69167+ return 0;
69168+}
69169+
69170+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
69171+{
69172+ unsigned int i;
69173+ struct acl_subject_label_compat subjcompat;
69174+
69175+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
69176+ return -EFAULT;
69177+
69178+ subj->filename = compat_ptr(subjcompat.filename);
69179+ subj->inode = subjcompat.inode;
69180+ subj->device = subjcompat.device;
69181+ subj->mode = subjcompat.mode;
69182+ subj->cap_mask = subjcompat.cap_mask;
69183+ subj->cap_lower = subjcompat.cap_lower;
69184+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
69185+
69186+ for (i = 0; i < GR_NLIMITS; i++) {
69187+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
69188+ subj->res[i].rlim_cur = RLIM_INFINITY;
69189+ else
69190+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
69191+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
69192+ subj->res[i].rlim_max = RLIM_INFINITY;
69193+ else
69194+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
69195+ }
69196+ subj->resmask = subjcompat.resmask;
69197+
69198+ subj->user_trans_type = subjcompat.user_trans_type;
69199+ subj->group_trans_type = subjcompat.group_trans_type;
69200+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
69201+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
69202+ subj->user_trans_num = subjcompat.user_trans_num;
69203+ subj->group_trans_num = subjcompat.group_trans_num;
69204+
69205+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
69206+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
69207+ subj->ip_type = subjcompat.ip_type;
69208+ subj->ips = compat_ptr(subjcompat.ips);
69209+ subj->ip_num = subjcompat.ip_num;
69210+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
69211+
69212+ subj->crashes = subjcompat.crashes;
69213+ subj->expires = subjcompat.expires;
69214+
69215+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
69216+ subj->hash = compat_ptr(subjcompat.hash);
69217+ subj->prev = compat_ptr(subjcompat.prev);
69218+ subj->next = compat_ptr(subjcompat.next);
69219+
69220+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
69221+ subj->obj_hash_size = subjcompat.obj_hash_size;
69222+ subj->pax_flags = subjcompat.pax_flags;
69223+
69224+ return 0;
69225+}
69226+
69227+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
69228+{
69229+ struct acl_role_label_compat rolecompat;
69230+
69231+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
69232+ return -EFAULT;
69233+
69234+ role->rolename = compat_ptr(rolecompat.rolename);
69235+ role->uidgid = rolecompat.uidgid;
69236+ role->roletype = rolecompat.roletype;
69237+
69238+ role->auth_attempts = rolecompat.auth_attempts;
69239+ role->expires = rolecompat.expires;
69240+
69241+ role->root_label = compat_ptr(rolecompat.root_label);
69242+ role->hash = compat_ptr(rolecompat.hash);
69243+
69244+ role->prev = compat_ptr(rolecompat.prev);
69245+ role->next = compat_ptr(rolecompat.next);
69246+
69247+ role->transitions = compat_ptr(rolecompat.transitions);
69248+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
69249+ role->domain_children = compat_ptr(rolecompat.domain_children);
69250+ role->domain_child_num = rolecompat.domain_child_num;
69251+
69252+ role->umask = rolecompat.umask;
69253+
69254+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
69255+ role->subj_hash_size = rolecompat.subj_hash_size;
69256+
69257+ return 0;
69258+}
69259+
69260+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
69261+{
69262+ struct role_allowed_ip_compat roleip_compat;
69263+
69264+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
69265+ return -EFAULT;
69266+
69267+ roleip->addr = roleip_compat.addr;
69268+ roleip->netmask = roleip_compat.netmask;
69269+
69270+ roleip->prev = compat_ptr(roleip_compat.prev);
69271+ roleip->next = compat_ptr(roleip_compat.next);
69272+
69273+ return 0;
69274+}
69275+
69276+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
69277+{
69278+ struct role_transition_compat trans_compat;
69279+
69280+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
69281+ return -EFAULT;
69282+
69283+ trans->rolename = compat_ptr(trans_compat.rolename);
69284+
69285+ trans->prev = compat_ptr(trans_compat.prev);
69286+ trans->next = compat_ptr(trans_compat.next);
69287+
69288+ return 0;
69289+
69290+}
69291+
69292+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
69293+{
69294+ struct gr_hash_struct_compat hash_compat;
69295+
69296+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
69297+ return -EFAULT;
69298+
69299+ hash->table = compat_ptr(hash_compat.table);
69300+ hash->nametable = compat_ptr(hash_compat.nametable);
69301+ hash->first = compat_ptr(hash_compat.first);
69302+
69303+ hash->table_size = hash_compat.table_size;
69304+ hash->used_size = hash_compat.used_size;
69305+
69306+ hash->type = hash_compat.type;
69307+
69308+ return 0;
69309+}
69310+
69311+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
69312+{
69313+ compat_uptr_t ptrcompat;
69314+
69315+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
69316+ return -EFAULT;
69317+
69318+ *(void **)ptr = compat_ptr(ptrcompat);
69319+
69320+ return 0;
69321+}
69322+
69323+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
69324+{
69325+ struct acl_ip_label_compat ip_compat;
69326+
69327+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
69328+ return -EFAULT;
69329+
69330+ ip->iface = compat_ptr(ip_compat.iface);
69331+ ip->addr = ip_compat.addr;
69332+ ip->netmask = ip_compat.netmask;
69333+ ip->low = ip_compat.low;
69334+ ip->high = ip_compat.high;
69335+ ip->mode = ip_compat.mode;
69336+ ip->type = ip_compat.type;
69337+
69338+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
69339+
69340+ ip->prev = compat_ptr(ip_compat.prev);
69341+ ip->next = compat_ptr(ip_compat.next);
69342+
69343+ return 0;
69344+}
69345+
69346+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
69347+{
69348+ struct sprole_pw_compat pw_compat;
69349+
69350+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
69351+ return -EFAULT;
69352+
69353+ pw->rolename = compat_ptr(pw_compat.rolename);
69354+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
69355+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
69356+
69357+ return 0;
69358+}
69359+
69360+size_t get_gr_arg_wrapper_size_compat(void)
69361+{
69362+ return sizeof(struct gr_arg_wrapper_compat);
69363+}
69364+
69365diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
69366new file mode 100644
69367index 0000000..a89b1f4
69368--- /dev/null
69369+++ b/grsecurity/gracl_fs.c
69370@@ -0,0 +1,437 @@
69371+#include <linux/kernel.h>
69372+#include <linux/sched.h>
69373+#include <linux/types.h>
69374+#include <linux/fs.h>
69375+#include <linux/file.h>
69376+#include <linux/stat.h>
69377+#include <linux/grsecurity.h>
69378+#include <linux/grinternal.h>
69379+#include <linux/gracl.h>
69380+
69381+umode_t
69382+gr_acl_umask(void)
69383+{
69384+ if (unlikely(!gr_acl_is_enabled()))
69385+ return 0;
69386+
69387+ return current->role->umask;
69388+}
69389+
69390+__u32
69391+gr_acl_handle_hidden_file(const struct dentry * dentry,
69392+ const struct vfsmount * mnt)
69393+{
69394+ __u32 mode;
69395+
69396+ if (unlikely(d_is_negative(dentry)))
69397+ return GR_FIND;
69398+
69399+ mode =
69400+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
69401+
69402+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
69403+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
69404+ return mode;
69405+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
69406+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
69407+ return 0;
69408+ } else if (unlikely(!(mode & GR_FIND)))
69409+ return 0;
69410+
69411+ return GR_FIND;
69412+}
69413+
69414+__u32
69415+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
69416+ int acc_mode)
69417+{
69418+ __u32 reqmode = GR_FIND;
69419+ __u32 mode;
69420+
69421+ if (unlikely(d_is_negative(dentry)))
69422+ return reqmode;
69423+
69424+ if (acc_mode & MAY_APPEND)
69425+ reqmode |= GR_APPEND;
69426+ else if (acc_mode & MAY_WRITE)
69427+ reqmode |= GR_WRITE;
69428+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
69429+ reqmode |= GR_READ;
69430+
69431+ mode =
69432+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
69433+ mnt);
69434+
69435+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
69436+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
69437+ reqmode & GR_READ ? " reading" : "",
69438+ reqmode & GR_WRITE ? " writing" : reqmode &
69439+ GR_APPEND ? " appending" : "");
69440+ return reqmode;
69441+ } else
69442+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
69443+ {
69444+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
69445+ reqmode & GR_READ ? " reading" : "",
69446+ reqmode & GR_WRITE ? " writing" : reqmode &
69447+ GR_APPEND ? " appending" : "");
69448+ return 0;
69449+ } else if (unlikely((mode & reqmode) != reqmode))
69450+ return 0;
69451+
69452+ return reqmode;
69453+}
69454+
69455+__u32
69456+gr_acl_handle_creat(const struct dentry * dentry,
69457+ const struct dentry * p_dentry,
69458+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
69459+ const int imode)
69460+{
69461+ __u32 reqmode = GR_WRITE | GR_CREATE;
69462+ __u32 mode;
69463+
69464+ if (acc_mode & MAY_APPEND)
69465+ reqmode |= GR_APPEND;
69466+ // if a directory was required or the directory already exists, then
69467+ // don't count this open as a read
69468+ if ((acc_mode & MAY_READ) &&
69469+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
69470+ reqmode |= GR_READ;
69471+ if ((open_flags & O_CREAT) &&
69472+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
69473+ reqmode |= GR_SETID;
69474+
69475+ mode =
69476+ gr_check_create(dentry, p_dentry, p_mnt,
69477+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
69478+
69479+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
69480+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
69481+ reqmode & GR_READ ? " reading" : "",
69482+ reqmode & GR_WRITE ? " writing" : reqmode &
69483+ GR_APPEND ? " appending" : "");
69484+ return reqmode;
69485+ } else
69486+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
69487+ {
69488+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
69489+ reqmode & GR_READ ? " reading" : "",
69490+ reqmode & GR_WRITE ? " writing" : reqmode &
69491+ GR_APPEND ? " appending" : "");
69492+ return 0;
69493+ } else if (unlikely((mode & reqmode) != reqmode))
69494+ return 0;
69495+
69496+ return reqmode;
69497+}
69498+
69499+__u32
69500+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
69501+ const int fmode)
69502+{
69503+ __u32 mode, reqmode = GR_FIND;
69504+
69505+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
69506+ reqmode |= GR_EXEC;
69507+ if (fmode & S_IWOTH)
69508+ reqmode |= GR_WRITE;
69509+ if (fmode & S_IROTH)
69510+ reqmode |= GR_READ;
69511+
69512+ mode =
69513+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
69514+ mnt);
69515+
69516+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
69517+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
69518+ reqmode & GR_READ ? " reading" : "",
69519+ reqmode & GR_WRITE ? " writing" : "",
69520+ reqmode & GR_EXEC ? " executing" : "");
69521+ return reqmode;
69522+ } else
69523+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
69524+ {
69525+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
69526+ reqmode & GR_READ ? " reading" : "",
69527+ reqmode & GR_WRITE ? " writing" : "",
69528+ reqmode & GR_EXEC ? " executing" : "");
69529+ return 0;
69530+ } else if (unlikely((mode & reqmode) != reqmode))
69531+ return 0;
69532+
69533+ return reqmode;
69534+}
69535+
69536+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
69537+{
69538+ __u32 mode;
69539+
69540+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
69541+
69542+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
69543+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
69544+ return mode;
69545+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
69546+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
69547+ return 0;
69548+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
69549+ return 0;
69550+
69551+ return (reqmode);
69552+}
69553+
69554+__u32
69555+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
69556+{
69557+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
69558+}
69559+
69560+__u32
69561+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
69562+{
69563+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
69564+}
69565+
69566+__u32
69567+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
69568+{
69569+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
69570+}
69571+
69572+__u32
69573+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
69574+{
69575+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
69576+}
69577+
69578+__u32
69579+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
69580+ umode_t *modeptr)
69581+{
69582+ umode_t mode;
69583+
69584+ *modeptr &= ~gr_acl_umask();
69585+ mode = *modeptr;
69586+
69587+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
69588+ return 1;
69589+
69590+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
69591+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
69592+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
69593+ GR_CHMOD_ACL_MSG);
69594+ } else {
69595+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
69596+ }
69597+}
69598+
69599+__u32
69600+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
69601+{
69602+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
69603+}
69604+
69605+__u32
69606+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
69607+{
69608+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
69609+}
69610+
69611+__u32
69612+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
69613+{
69614+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
69615+}
69616+
69617+__u32
69618+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
69619+{
69620+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
69621+}
69622+
69623+__u32
69624+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
69625+{
69626+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
69627+ GR_UNIXCONNECT_ACL_MSG);
69628+}
69629+
69630+/* hardlinks require at minimum create and link permission,
69631+ any additional privilege required is based on the
69632+ privilege of the file being linked to
69633+*/
69634+__u32
69635+gr_acl_handle_link(const struct dentry * new_dentry,
69636+ const struct dentry * parent_dentry,
69637+ const struct vfsmount * parent_mnt,
69638+ const struct dentry * old_dentry,
69639+ const struct vfsmount * old_mnt, const struct filename *to)
69640+{
69641+ __u32 mode;
69642+ __u32 needmode = GR_CREATE | GR_LINK;
69643+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
69644+
69645+ mode =
69646+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
69647+ old_mnt);
69648+
69649+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
69650+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
69651+ return mode;
69652+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
69653+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
69654+ return 0;
69655+ } else if (unlikely((mode & needmode) != needmode))
69656+ return 0;
69657+
69658+ return 1;
69659+}
69660+
69661+__u32
69662+gr_acl_handle_symlink(const struct dentry * new_dentry,
69663+ const struct dentry * parent_dentry,
69664+ const struct vfsmount * parent_mnt, const struct filename *from)
69665+{
69666+ __u32 needmode = GR_WRITE | GR_CREATE;
69667+ __u32 mode;
69668+
69669+ mode =
69670+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
69671+ GR_CREATE | GR_AUDIT_CREATE |
69672+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
69673+
69674+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
69675+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
69676+ return mode;
69677+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
69678+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
69679+ return 0;
69680+ } else if (unlikely((mode & needmode) != needmode))
69681+ return 0;
69682+
69683+ return (GR_WRITE | GR_CREATE);
69684+}
69685+
69686+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
69687+{
69688+ __u32 mode;
69689+
69690+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
69691+
69692+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
69693+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
69694+ return mode;
69695+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
69696+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
69697+ return 0;
69698+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
69699+ return 0;
69700+
69701+ return (reqmode);
69702+}
69703+
69704+__u32
69705+gr_acl_handle_mknod(const struct dentry * new_dentry,
69706+ const struct dentry * parent_dentry,
69707+ const struct vfsmount * parent_mnt,
69708+ const int mode)
69709+{
69710+ __u32 reqmode = GR_WRITE | GR_CREATE;
69711+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
69712+ reqmode |= GR_SETID;
69713+
69714+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
69715+ reqmode, GR_MKNOD_ACL_MSG);
69716+}
69717+
69718+__u32
69719+gr_acl_handle_mkdir(const struct dentry *new_dentry,
69720+ const struct dentry *parent_dentry,
69721+ const struct vfsmount *parent_mnt)
69722+{
69723+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
69724+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
69725+}
69726+
69727+#define RENAME_CHECK_SUCCESS(old, new) \
69728+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
69729+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
69730+
69731+int
69732+gr_acl_handle_rename(struct dentry *new_dentry,
69733+ struct dentry *parent_dentry,
69734+ const struct vfsmount *parent_mnt,
69735+ struct dentry *old_dentry,
69736+ struct inode *old_parent_inode,
69737+ struct vfsmount *old_mnt, const struct filename *newname)
69738+{
69739+ __u32 comp1, comp2;
69740+ int error = 0;
69741+
69742+ if (unlikely(!gr_acl_is_enabled()))
69743+ return 0;
69744+
69745+ if (d_is_negative(new_dentry)) {
69746+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
69747+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
69748+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
69749+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
69750+ GR_DELETE | GR_AUDIT_DELETE |
69751+ GR_AUDIT_READ | GR_AUDIT_WRITE |
69752+ GR_SUPPRESS, old_mnt);
69753+ } else {
69754+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
69755+ GR_CREATE | GR_DELETE |
69756+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
69757+ GR_AUDIT_READ | GR_AUDIT_WRITE |
69758+ GR_SUPPRESS, parent_mnt);
69759+ comp2 =
69760+ gr_search_file(old_dentry,
69761+ GR_READ | GR_WRITE | GR_AUDIT_READ |
69762+ GR_DELETE | GR_AUDIT_DELETE |
69763+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
69764+ }
69765+
69766+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
69767+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
69768+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
69769+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
69770+ && !(comp2 & GR_SUPPRESS)) {
69771+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
69772+ error = -EACCES;
69773+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
69774+ error = -EACCES;
69775+
69776+ return error;
69777+}
69778+
69779+void
69780+gr_acl_handle_exit(void)
69781+{
69782+ u16 id;
69783+ char *rolename;
69784+
69785+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
69786+ !(current->role->roletype & GR_ROLE_PERSIST))) {
69787+ id = current->acl_role_id;
69788+ rolename = current->role->rolename;
69789+ gr_set_acls(1);
69790+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
69791+ }
69792+
69793+ gr_put_exec_file(current);
69794+ return;
69795+}
69796+
69797+int
69798+gr_acl_handle_procpidmem(const struct task_struct *task)
69799+{
69800+ if (unlikely(!gr_acl_is_enabled()))
69801+ return 0;
69802+
69803+ if (task != current && task->acl->mode & GR_PROTPROCFD)
69804+ return -EACCES;
69805+
69806+ return 0;
69807+}
69808diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
69809new file mode 100644
69810index 0000000..f056b81
69811--- /dev/null
69812+++ b/grsecurity/gracl_ip.c
69813@@ -0,0 +1,386 @@
69814+#include <linux/kernel.h>
69815+#include <asm/uaccess.h>
69816+#include <asm/errno.h>
69817+#include <net/sock.h>
69818+#include <linux/file.h>
69819+#include <linux/fs.h>
69820+#include <linux/net.h>
69821+#include <linux/in.h>
69822+#include <linux/skbuff.h>
69823+#include <linux/ip.h>
69824+#include <linux/udp.h>
69825+#include <linux/types.h>
69826+#include <linux/sched.h>
69827+#include <linux/netdevice.h>
69828+#include <linux/inetdevice.h>
69829+#include <linux/gracl.h>
69830+#include <linux/grsecurity.h>
69831+#include <linux/grinternal.h>
69832+
69833+#define GR_BIND 0x01
69834+#define GR_CONNECT 0x02
69835+#define GR_INVERT 0x04
69836+#define GR_BINDOVERRIDE 0x08
69837+#define GR_CONNECTOVERRIDE 0x10
69838+#define GR_SOCK_FAMILY 0x20
69839+
69840+static const char * gr_protocols[IPPROTO_MAX] = {
69841+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
69842+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
69843+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
69844+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
69845+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
69846+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
69847+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
69848+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
69849+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
69850+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
69851+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
69852+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
69853+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
69854+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
69855+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
69856+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
69857+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
69858+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
69859+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
69860+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
69861+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
69862+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
69863+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
69864+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
69865+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
69866+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
69867+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
69868+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
69869+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
69870+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
69871+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
69872+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
69873+ };
69874+
69875+static const char * gr_socktypes[SOCK_MAX] = {
69876+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
69877+ "unknown:7", "unknown:8", "unknown:9", "packet"
69878+ };
69879+
69880+static const char * gr_sockfamilies[AF_MAX+1] = {
69881+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
69882+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
69883+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
69884+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
69885+ };
69886+
69887+const char *
69888+gr_proto_to_name(unsigned char proto)
69889+{
69890+ return gr_protocols[proto];
69891+}
69892+
69893+const char *
69894+gr_socktype_to_name(unsigned char type)
69895+{
69896+ return gr_socktypes[type];
69897+}
69898+
69899+const char *
69900+gr_sockfamily_to_name(unsigned char family)
69901+{
69902+ return gr_sockfamilies[family];
69903+}
69904+
69905+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
69906+
69907+int
69908+gr_search_socket(const int domain, const int type, const int protocol)
69909+{
69910+ struct acl_subject_label *curr;
69911+ const struct cred *cred = current_cred();
69912+
69913+ if (unlikely(!gr_acl_is_enabled()))
69914+ goto exit;
69915+
69916+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
69917+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
69918+ goto exit; // let the kernel handle it
69919+
69920+ curr = current->acl;
69921+
69922+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
69923+ /* the family is allowed, if this is PF_INET allow it only if
69924+ the extra sock type/protocol checks pass */
69925+ if (domain == PF_INET)
69926+ goto inet_check;
69927+ goto exit;
69928+ } else {
69929+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
69930+ __u32 fakeip = 0;
69931+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
69932+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
69933+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
69934+ gr_to_filename(current->exec_file->f_path.dentry,
69935+ current->exec_file->f_path.mnt) :
69936+ curr->filename, curr->filename,
69937+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
69938+ &current->signal->saved_ip);
69939+ goto exit;
69940+ }
69941+ goto exit_fail;
69942+ }
69943+
69944+inet_check:
69945+ /* the rest of this checking is for IPv4 only */
69946+ if (!curr->ips)
69947+ goto exit;
69948+
69949+ if ((curr->ip_type & (1U << type)) &&
69950+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
69951+ goto exit;
69952+
69953+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
69954+ /* we don't place acls on raw sockets , and sometimes
69955+ dgram/ip sockets are opened for ioctl and not
69956+ bind/connect, so we'll fake a bind learn log */
69957+ if (type == SOCK_RAW || type == SOCK_PACKET) {
69958+ __u32 fakeip = 0;
69959+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
69960+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
69961+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
69962+ gr_to_filename(current->exec_file->f_path.dentry,
69963+ current->exec_file->f_path.mnt) :
69964+ curr->filename, curr->filename,
69965+ &fakeip, 0, type,
69966+ protocol, GR_CONNECT, &current->signal->saved_ip);
69967+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
69968+ __u32 fakeip = 0;
69969+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
69970+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
69971+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
69972+ gr_to_filename(current->exec_file->f_path.dentry,
69973+ current->exec_file->f_path.mnt) :
69974+ curr->filename, curr->filename,
69975+ &fakeip, 0, type,
69976+ protocol, GR_BIND, &current->signal->saved_ip);
69977+ }
69978+ /* we'll log when they use connect or bind */
69979+ goto exit;
69980+ }
69981+
69982+exit_fail:
69983+ if (domain == PF_INET)
69984+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
69985+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
69986+ else if (rcu_access_pointer(net_families[domain]) != NULL)
69987+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
69988+ gr_socktype_to_name(type), protocol);
69989+
69990+ return 0;
69991+exit:
69992+ return 1;
69993+}
69994+
69995+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
69996+{
69997+ if ((ip->mode & mode) &&
69998+ (ip_port >= ip->low) &&
69999+ (ip_port <= ip->high) &&
70000+ ((ntohl(ip_addr) & our_netmask) ==
70001+ (ntohl(our_addr) & our_netmask))
70002+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
70003+ && (ip->type & (1U << type))) {
70004+ if (ip->mode & GR_INVERT)
70005+ return 2; // specifically denied
70006+ else
70007+ return 1; // allowed
70008+ }
70009+
70010+ return 0; // not specifically allowed, may continue parsing
70011+}
70012+
70013+static int
70014+gr_search_connectbind(const int full_mode, struct sock *sk,
70015+ struct sockaddr_in *addr, const int type)
70016+{
70017+ char iface[IFNAMSIZ] = {0};
70018+ struct acl_subject_label *curr;
70019+ struct acl_ip_label *ip;
70020+ struct inet_sock *isk;
70021+ struct net_device *dev;
70022+ struct in_device *idev;
70023+ unsigned long i;
70024+ int ret;
70025+ int mode = full_mode & (GR_BIND | GR_CONNECT);
70026+ __u32 ip_addr = 0;
70027+ __u32 our_addr;
70028+ __u32 our_netmask;
70029+ char *p;
70030+ __u16 ip_port = 0;
70031+ const struct cred *cred = current_cred();
70032+
70033+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
70034+ return 0;
70035+
70036+ curr = current->acl;
70037+ isk = inet_sk(sk);
70038+
70039+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
70040+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
70041+ addr->sin_addr.s_addr = curr->inaddr_any_override;
70042+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
70043+ struct sockaddr_in saddr;
70044+ int err;
70045+
70046+ saddr.sin_family = AF_INET;
70047+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
70048+ saddr.sin_port = isk->inet_sport;
70049+
70050+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
70051+ if (err)
70052+ return err;
70053+
70054+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
70055+ if (err)
70056+ return err;
70057+ }
70058+
70059+ if (!curr->ips)
70060+ return 0;
70061+
70062+ ip_addr = addr->sin_addr.s_addr;
70063+ ip_port = ntohs(addr->sin_port);
70064+
70065+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
70066+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
70067+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
70068+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
70069+ gr_to_filename(current->exec_file->f_path.dentry,
70070+ current->exec_file->f_path.mnt) :
70071+ curr->filename, curr->filename,
70072+ &ip_addr, ip_port, type,
70073+ sk->sk_protocol, mode, &current->signal->saved_ip);
70074+ return 0;
70075+ }
70076+
70077+ for (i = 0; i < curr->ip_num; i++) {
70078+ ip = *(curr->ips + i);
70079+ if (ip->iface != NULL) {
70080+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
70081+ p = strchr(iface, ':');
70082+ if (p != NULL)
70083+ *p = '\0';
70084+ dev = dev_get_by_name(sock_net(sk), iface);
70085+ if (dev == NULL)
70086+ continue;
70087+ idev = in_dev_get(dev);
70088+ if (idev == NULL) {
70089+ dev_put(dev);
70090+ continue;
70091+ }
70092+ rcu_read_lock();
70093+ for_ifa(idev) {
70094+ if (!strcmp(ip->iface, ifa->ifa_label)) {
70095+ our_addr = ifa->ifa_address;
70096+ our_netmask = 0xffffffff;
70097+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
70098+ if (ret == 1) {
70099+ rcu_read_unlock();
70100+ in_dev_put(idev);
70101+ dev_put(dev);
70102+ return 0;
70103+ } else if (ret == 2) {
70104+ rcu_read_unlock();
70105+ in_dev_put(idev);
70106+ dev_put(dev);
70107+ goto denied;
70108+ }
70109+ }
70110+ } endfor_ifa(idev);
70111+ rcu_read_unlock();
70112+ in_dev_put(idev);
70113+ dev_put(dev);
70114+ } else {
70115+ our_addr = ip->addr;
70116+ our_netmask = ip->netmask;
70117+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
70118+ if (ret == 1)
70119+ return 0;
70120+ else if (ret == 2)
70121+ goto denied;
70122+ }
70123+ }
70124+
70125+denied:
70126+ if (mode == GR_BIND)
70127+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
70128+ else if (mode == GR_CONNECT)
70129+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
70130+
70131+ return -EACCES;
70132+}
70133+
70134+int
70135+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
70136+{
70137+ /* always allow disconnection of dgram sockets with connect */
70138+ if (addr->sin_family == AF_UNSPEC)
70139+ return 0;
70140+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
70141+}
70142+
70143+int
70144+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
70145+{
70146+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
70147+}
70148+
70149+int gr_search_listen(struct socket *sock)
70150+{
70151+ struct sock *sk = sock->sk;
70152+ struct sockaddr_in addr;
70153+
70154+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
70155+ addr.sin_port = inet_sk(sk)->inet_sport;
70156+
70157+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
70158+}
70159+
70160+int gr_search_accept(struct socket *sock)
70161+{
70162+ struct sock *sk = sock->sk;
70163+ struct sockaddr_in addr;
70164+
70165+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
70166+ addr.sin_port = inet_sk(sk)->inet_sport;
70167+
70168+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
70169+}
70170+
70171+int
70172+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
70173+{
70174+ if (addr)
70175+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
70176+ else {
70177+ struct sockaddr_in sin;
70178+ const struct inet_sock *inet = inet_sk(sk);
70179+
70180+ sin.sin_addr.s_addr = inet->inet_daddr;
70181+ sin.sin_port = inet->inet_dport;
70182+
70183+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
70184+ }
70185+}
70186+
70187+int
70188+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
70189+{
70190+ struct sockaddr_in sin;
70191+
70192+ if (unlikely(skb->len < sizeof (struct udphdr)))
70193+ return 0; // skip this packet
70194+
70195+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
70196+ sin.sin_port = udp_hdr(skb)->source;
70197+
70198+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
70199+}
70200diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
70201new file mode 100644
70202index 0000000..25f54ef
70203--- /dev/null
70204+++ b/grsecurity/gracl_learn.c
70205@@ -0,0 +1,207 @@
70206+#include <linux/kernel.h>
70207+#include <linux/mm.h>
70208+#include <linux/sched.h>
70209+#include <linux/poll.h>
70210+#include <linux/string.h>
70211+#include <linux/file.h>
70212+#include <linux/types.h>
70213+#include <linux/vmalloc.h>
70214+#include <linux/grinternal.h>
70215+
70216+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
70217+ size_t count, loff_t *ppos);
70218+extern int gr_acl_is_enabled(void);
70219+
70220+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
70221+static int gr_learn_attached;
70222+
70223+/* use a 512k buffer */
70224+#define LEARN_BUFFER_SIZE (512 * 1024)
70225+
70226+static DEFINE_SPINLOCK(gr_learn_lock);
70227+static DEFINE_MUTEX(gr_learn_user_mutex);
70228+
70229+/* we need to maintain two buffers, so that the kernel context of grlearn
70230+ uses a semaphore around the userspace copying, and the other kernel contexts
70231+ use a spinlock when copying into the buffer, since they cannot sleep
70232+*/
70233+static char *learn_buffer;
70234+static char *learn_buffer_user;
70235+static int learn_buffer_len;
70236+static int learn_buffer_user_len;
70237+
70238+static ssize_t
70239+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
70240+{
70241+ DECLARE_WAITQUEUE(wait, current);
70242+ ssize_t retval = 0;
70243+
70244+ add_wait_queue(&learn_wait, &wait);
70245+ set_current_state(TASK_INTERRUPTIBLE);
70246+ do {
70247+ mutex_lock(&gr_learn_user_mutex);
70248+ spin_lock(&gr_learn_lock);
70249+ if (learn_buffer_len)
70250+ break;
70251+ spin_unlock(&gr_learn_lock);
70252+ mutex_unlock(&gr_learn_user_mutex);
70253+ if (file->f_flags & O_NONBLOCK) {
70254+ retval = -EAGAIN;
70255+ goto out;
70256+ }
70257+ if (signal_pending(current)) {
70258+ retval = -ERESTARTSYS;
70259+ goto out;
70260+ }
70261+
70262+ schedule();
70263+ } while (1);
70264+
70265+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
70266+ learn_buffer_user_len = learn_buffer_len;
70267+ retval = learn_buffer_len;
70268+ learn_buffer_len = 0;
70269+
70270+ spin_unlock(&gr_learn_lock);
70271+
70272+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
70273+ retval = -EFAULT;
70274+
70275+ mutex_unlock(&gr_learn_user_mutex);
70276+out:
70277+ set_current_state(TASK_RUNNING);
70278+ remove_wait_queue(&learn_wait, &wait);
70279+ return retval;
70280+}
70281+
70282+static unsigned int
70283+poll_learn(struct file * file, poll_table * wait)
70284+{
70285+ poll_wait(file, &learn_wait, wait);
70286+
70287+ if (learn_buffer_len)
70288+ return (POLLIN | POLLRDNORM);
70289+
70290+ return 0;
70291+}
70292+
70293+void
70294+gr_clear_learn_entries(void)
70295+{
70296+ char *tmp;
70297+
70298+ mutex_lock(&gr_learn_user_mutex);
70299+ spin_lock(&gr_learn_lock);
70300+ tmp = learn_buffer;
70301+ learn_buffer = NULL;
70302+ spin_unlock(&gr_learn_lock);
70303+ if (tmp)
70304+ vfree(tmp);
70305+ if (learn_buffer_user != NULL) {
70306+ vfree(learn_buffer_user);
70307+ learn_buffer_user = NULL;
70308+ }
70309+ learn_buffer_len = 0;
70310+ mutex_unlock(&gr_learn_user_mutex);
70311+
70312+ return;
70313+}
70314+
70315+void
70316+gr_add_learn_entry(const char *fmt, ...)
70317+{
70318+ va_list args;
70319+ unsigned int len;
70320+
70321+ if (!gr_learn_attached)
70322+ return;
70323+
70324+ spin_lock(&gr_learn_lock);
70325+
70326+ /* leave a gap at the end so we know when it's "full" but don't have to
70327+ compute the exact length of the string we're trying to append
70328+ */
70329+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
70330+ spin_unlock(&gr_learn_lock);
70331+ wake_up_interruptible(&learn_wait);
70332+ return;
70333+ }
70334+ if (learn_buffer == NULL) {
70335+ spin_unlock(&gr_learn_lock);
70336+ return;
70337+ }
70338+
70339+ va_start(args, fmt);
70340+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
70341+ va_end(args);
70342+
70343+ learn_buffer_len += len + 1;
70344+
70345+ spin_unlock(&gr_learn_lock);
70346+ wake_up_interruptible(&learn_wait);
70347+
70348+ return;
70349+}
70350+
70351+static int
70352+open_learn(struct inode *inode, struct file *file)
70353+{
70354+ if (file->f_mode & FMODE_READ && gr_learn_attached)
70355+ return -EBUSY;
70356+ if (file->f_mode & FMODE_READ) {
70357+ int retval = 0;
70358+ mutex_lock(&gr_learn_user_mutex);
70359+ if (learn_buffer == NULL)
70360+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
70361+ if (learn_buffer_user == NULL)
70362+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
70363+ if (learn_buffer == NULL) {
70364+ retval = -ENOMEM;
70365+ goto out_error;
70366+ }
70367+ if (learn_buffer_user == NULL) {
70368+ retval = -ENOMEM;
70369+ goto out_error;
70370+ }
70371+ learn_buffer_len = 0;
70372+ learn_buffer_user_len = 0;
70373+ gr_learn_attached = 1;
70374+out_error:
70375+ mutex_unlock(&gr_learn_user_mutex);
70376+ return retval;
70377+ }
70378+ return 0;
70379+}
70380+
70381+static int
70382+close_learn(struct inode *inode, struct file *file)
70383+{
70384+ if (file->f_mode & FMODE_READ) {
70385+ char *tmp = NULL;
70386+ mutex_lock(&gr_learn_user_mutex);
70387+ spin_lock(&gr_learn_lock);
70388+ tmp = learn_buffer;
70389+ learn_buffer = NULL;
70390+ spin_unlock(&gr_learn_lock);
70391+ if (tmp)
70392+ vfree(tmp);
70393+ if (learn_buffer_user != NULL) {
70394+ vfree(learn_buffer_user);
70395+ learn_buffer_user = NULL;
70396+ }
70397+ learn_buffer_len = 0;
70398+ learn_buffer_user_len = 0;
70399+ gr_learn_attached = 0;
70400+ mutex_unlock(&gr_learn_user_mutex);
70401+ }
70402+
70403+ return 0;
70404+}
70405+
70406+const struct file_operations grsec_fops = {
70407+ .read = read_learn,
70408+ .write = write_grsec_handler,
70409+ .open = open_learn,
70410+ .release = close_learn,
70411+ .poll = poll_learn,
70412+};
70413diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
70414new file mode 100644
70415index 0000000..361a099
70416--- /dev/null
70417+++ b/grsecurity/gracl_policy.c
70418@@ -0,0 +1,1782 @@
70419+#include <linux/kernel.h>
70420+#include <linux/module.h>
70421+#include <linux/sched.h>
70422+#include <linux/mm.h>
70423+#include <linux/file.h>
70424+#include <linux/fs.h>
70425+#include <linux/namei.h>
70426+#include <linux/mount.h>
70427+#include <linux/tty.h>
70428+#include <linux/proc_fs.h>
70429+#include <linux/lglock.h>
70430+#include <linux/slab.h>
70431+#include <linux/vmalloc.h>
70432+#include <linux/types.h>
70433+#include <linux/sysctl.h>
70434+#include <linux/netdevice.h>
70435+#include <linux/ptrace.h>
70436+#include <linux/gracl.h>
70437+#include <linux/gralloc.h>
70438+#include <linux/security.h>
70439+#include <linux/grinternal.h>
70440+#include <linux/pid_namespace.h>
70441+#include <linux/stop_machine.h>
70442+#include <linux/fdtable.h>
70443+#include <linux/percpu.h>
70444+#include <linux/lglock.h>
70445+#include <linux/hugetlb.h>
70446+#include <linux/posix-timers.h>
70447+#include "../fs/mount.h"
70448+
70449+#include <asm/uaccess.h>
70450+#include <asm/errno.h>
70451+#include <asm/mman.h>
70452+
70453+extern struct gr_policy_state *polstate;
70454+
70455+#define FOR_EACH_ROLE_START(role) \
70456+ role = polstate->role_list; \
70457+ while (role) {
70458+
70459+#define FOR_EACH_ROLE_END(role) \
70460+ role = role->prev; \
70461+ }
70462+
70463+struct path gr_real_root;
70464+
70465+extern struct gr_alloc_state *current_alloc_state;
70466+
70467+u16 acl_sp_role_value;
70468+
70469+static DEFINE_MUTEX(gr_dev_mutex);
70470+
70471+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
70472+extern void gr_clear_learn_entries(void);
70473+
70474+static struct gr_arg gr_usermode;
70475+static unsigned char gr_system_salt[GR_SALT_LEN];
70476+static unsigned char gr_system_sum[GR_SHA_LEN];
70477+
70478+static unsigned int gr_auth_attempts = 0;
70479+static unsigned long gr_auth_expires = 0UL;
70480+
70481+struct acl_object_label *fakefs_obj_rw;
70482+struct acl_object_label *fakefs_obj_rwx;
70483+
70484+extern int gr_init_uidset(void);
70485+extern void gr_free_uidset(void);
70486+extern void gr_remove_uid(uid_t uid);
70487+extern int gr_find_uid(uid_t uid);
70488+
70489+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
70490+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
70491+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
70492+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
70493+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
70494+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
70495+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
70496+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
70497+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
70498+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
70499+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
70500+extern void assign_special_role(const char *rolename);
70501+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
70502+extern int gr_rbac_disable(void *unused);
70503+extern void gr_enable_rbac_system(void);
70504+
70505+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
70506+{
70507+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
70508+ return -EFAULT;
70509+
70510+ return 0;
70511+}
70512+
70513+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
70514+{
70515+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
70516+ return -EFAULT;
70517+
70518+ return 0;
70519+}
70520+
70521+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
70522+{
70523+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
70524+ return -EFAULT;
70525+
70526+ return 0;
70527+}
70528+
70529+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
70530+{
70531+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
70532+ return -EFAULT;
70533+
70534+ return 0;
70535+}
70536+
70537+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
70538+{
70539+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
70540+ return -EFAULT;
70541+
70542+ return 0;
70543+}
70544+
70545+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
70546+{
70547+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
70548+ return -EFAULT;
70549+
70550+ return 0;
70551+}
70552+
70553+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
70554+{
70555+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
70556+ return -EFAULT;
70557+
70558+ return 0;
70559+}
70560+
70561+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
70562+{
70563+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
70564+ return -EFAULT;
70565+
70566+ return 0;
70567+}
70568+
70569+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
70570+{
70571+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
70572+ return -EFAULT;
70573+
70574+ return 0;
70575+}
70576+
70577+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
70578+{
70579+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
70580+ return -EFAULT;
70581+
70582+ if (((uwrap->version != GRSECURITY_VERSION) &&
70583+ (uwrap->version != 0x2901)) ||
70584+ (uwrap->size != sizeof(struct gr_arg)))
70585+ return -EINVAL;
70586+
70587+ return 0;
70588+}
70589+
70590+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
70591+{
70592+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
70593+ return -EFAULT;
70594+
70595+ return 0;
70596+}
70597+
70598+static size_t get_gr_arg_wrapper_size_normal(void)
70599+{
70600+ return sizeof(struct gr_arg_wrapper);
70601+}
70602+
70603+#ifdef CONFIG_COMPAT
70604+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
70605+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
70606+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
70607+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
70608+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
70609+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
70610+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
70611+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
70612+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
70613+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
70614+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
70615+extern size_t get_gr_arg_wrapper_size_compat(void);
70616+
70617+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
70618+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
70619+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
70620+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
70621+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
70622+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
70623+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
70624+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
70625+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
70626+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
70627+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
70628+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
70629+
70630+#else
70631+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
70632+#define copy_gr_arg copy_gr_arg_normal
70633+#define copy_gr_hash_struct copy_gr_hash_struct_normal
70634+#define copy_acl_object_label copy_acl_object_label_normal
70635+#define copy_acl_subject_label copy_acl_subject_label_normal
70636+#define copy_acl_role_label copy_acl_role_label_normal
70637+#define copy_acl_ip_label copy_acl_ip_label_normal
70638+#define copy_pointer_from_array copy_pointer_from_array_normal
70639+#define copy_sprole_pw copy_sprole_pw_normal
70640+#define copy_role_transition copy_role_transition_normal
70641+#define copy_role_allowed_ip copy_role_allowed_ip_normal
70642+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
70643+#endif
70644+
70645+static struct acl_subject_label *
70646+lookup_subject_map(const struct acl_subject_label *userp)
70647+{
70648+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
70649+ struct subject_map *match;
70650+
70651+ match = polstate->subj_map_set.s_hash[index];
70652+
70653+ while (match && match->user != userp)
70654+ match = match->next;
70655+
70656+ if (match != NULL)
70657+ return match->kernel;
70658+ else
70659+ return NULL;
70660+}
70661+
70662+static void
70663+insert_subj_map_entry(struct subject_map *subjmap)
70664+{
70665+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
70666+ struct subject_map **curr;
70667+
70668+ subjmap->prev = NULL;
70669+
70670+ curr = &polstate->subj_map_set.s_hash[index];
70671+ if (*curr != NULL)
70672+ (*curr)->prev = subjmap;
70673+
70674+ subjmap->next = *curr;
70675+ *curr = subjmap;
70676+
70677+ return;
70678+}
70679+
70680+static void
70681+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
70682+{
70683+ unsigned int index =
70684+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
70685+ struct acl_role_label **curr;
70686+ struct acl_role_label *tmp, *tmp2;
70687+
70688+ curr = &polstate->acl_role_set.r_hash[index];
70689+
70690+ /* simple case, slot is empty, just set it to our role */
70691+ if (*curr == NULL) {
70692+ *curr = role;
70693+ } else {
70694+ /* example:
70695+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
70696+ 2 -> 3
70697+ */
70698+ /* first check to see if we can already be reached via this slot */
70699+ tmp = *curr;
70700+ while (tmp && tmp != role)
70701+ tmp = tmp->next;
70702+ if (tmp == role) {
70703+ /* we don't need to add ourselves to this slot's chain */
70704+ return;
70705+ }
70706+ /* we need to add ourselves to this chain, two cases */
70707+ if (role->next == NULL) {
70708+ /* simple case, append the current chain to our role */
70709+ role->next = *curr;
70710+ *curr = role;
70711+ } else {
70712+ /* 1 -> 2 -> 3 -> 4
70713+ 2 -> 3 -> 4
70714+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
70715+ */
70716+ /* trickier case: walk our role's chain until we find
70717+ the role for the start of the current slot's chain */
70718+ tmp = role;
70719+ tmp2 = *curr;
70720+ while (tmp->next && tmp->next != tmp2)
70721+ tmp = tmp->next;
70722+ if (tmp->next == tmp2) {
70723+ /* from example above, we found 3, so just
70724+ replace this slot's chain with ours */
70725+ *curr = role;
70726+ } else {
70727+ /* we didn't find a subset of our role's chain
70728+ in the current slot's chain, so append their
70729+ chain to ours, and set us as the first role in
70730+ the slot's chain
70731+
70732+ we could fold this case with the case above,
70733+ but making it explicit for clarity
70734+ */
70735+ tmp->next = tmp2;
70736+ *curr = role;
70737+ }
70738+ }
70739+ }
70740+
70741+ return;
70742+}
70743+
70744+static void
70745+insert_acl_role_label(struct acl_role_label *role)
70746+{
70747+ int i;
70748+
70749+ if (polstate->role_list == NULL) {
70750+ polstate->role_list = role;
70751+ role->prev = NULL;
70752+ } else {
70753+ role->prev = polstate->role_list;
70754+ polstate->role_list = role;
70755+ }
70756+
70757+ /* used for hash chains */
70758+ role->next = NULL;
70759+
70760+ if (role->roletype & GR_ROLE_DOMAIN) {
70761+ for (i = 0; i < role->domain_child_num; i++)
70762+ __insert_acl_role_label(role, role->domain_children[i]);
70763+ } else
70764+ __insert_acl_role_label(role, role->uidgid);
70765+}
70766+
70767+static int
70768+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
70769+{
70770+ struct name_entry **curr, *nentry;
70771+ struct inodev_entry *ientry;
70772+ unsigned int len = strlen(name);
70773+ unsigned int key = full_name_hash(name, len);
70774+ unsigned int index = key % polstate->name_set.n_size;
70775+
70776+ curr = &polstate->name_set.n_hash[index];
70777+
70778+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
70779+ curr = &((*curr)->next);
70780+
70781+ if (*curr != NULL)
70782+ return 1;
70783+
70784+ nentry = acl_alloc(sizeof (struct name_entry));
70785+ if (nentry == NULL)
70786+ return 0;
70787+ ientry = acl_alloc(sizeof (struct inodev_entry));
70788+ if (ientry == NULL)
70789+ return 0;
70790+ ientry->nentry = nentry;
70791+
70792+ nentry->key = key;
70793+ nentry->name = name;
70794+ nentry->inode = inode;
70795+ nentry->device = device;
70796+ nentry->len = len;
70797+ nentry->deleted = deleted;
70798+
70799+ nentry->prev = NULL;
70800+ curr = &polstate->name_set.n_hash[index];
70801+ if (*curr != NULL)
70802+ (*curr)->prev = nentry;
70803+ nentry->next = *curr;
70804+ *curr = nentry;
70805+
70806+ /* insert us into the table searchable by inode/dev */
70807+ __insert_inodev_entry(polstate, ientry);
70808+
70809+ return 1;
70810+}
70811+
70812+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
70813+
70814+static void *
70815+create_table(__u32 * len, int elementsize)
70816+{
70817+ unsigned int table_sizes[] = {
70818+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
70819+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
70820+ 4194301, 8388593, 16777213, 33554393, 67108859
70821+ };
70822+ void *newtable = NULL;
70823+ unsigned int pwr = 0;
70824+
70825+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
70826+ table_sizes[pwr] <= *len)
70827+ pwr++;
70828+
70829+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
70830+ return newtable;
70831+
70832+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
70833+ newtable =
70834+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
70835+ else
70836+ newtable = vmalloc(table_sizes[pwr] * elementsize);
70837+
70838+ *len = table_sizes[pwr];
70839+
70840+ return newtable;
70841+}
70842+
70843+static int
70844+init_variables(const struct gr_arg *arg, bool reload)
70845+{
70846+ struct task_struct *reaper = init_pid_ns.child_reaper;
70847+ unsigned int stacksize;
70848+
70849+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
70850+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
70851+ polstate->name_set.n_size = arg->role_db.num_objects;
70852+ polstate->inodev_set.i_size = arg->role_db.num_objects;
70853+
70854+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
70855+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
70856+ return 1;
70857+
70858+ if (!reload) {
70859+ if (!gr_init_uidset())
70860+ return 1;
70861+ }
70862+
70863+ /* set up the stack that holds allocation info */
70864+
70865+ stacksize = arg->role_db.num_pointers + 5;
70866+
70867+ if (!acl_alloc_stack_init(stacksize))
70868+ return 1;
70869+
70870+ if (!reload) {
70871+ /* grab reference for the real root dentry and vfsmount */
70872+ get_fs_root(reaper->fs, &gr_real_root);
70873+
70874+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70875+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
70876+#endif
70877+
70878+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
70879+ if (fakefs_obj_rw == NULL)
70880+ return 1;
70881+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
70882+
70883+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
70884+ if (fakefs_obj_rwx == NULL)
70885+ return 1;
70886+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
70887+ }
70888+
70889+ polstate->subj_map_set.s_hash =
70890+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
70891+ polstate->acl_role_set.r_hash =
70892+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
70893+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
70894+ polstate->inodev_set.i_hash =
70895+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
70896+
70897+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
70898+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
70899+ return 1;
70900+
70901+ memset(polstate->subj_map_set.s_hash, 0,
70902+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
70903+ memset(polstate->acl_role_set.r_hash, 0,
70904+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
70905+ memset(polstate->name_set.n_hash, 0,
70906+ sizeof (struct name_entry *) * polstate->name_set.n_size);
70907+ memset(polstate->inodev_set.i_hash, 0,
70908+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
70909+
70910+ return 0;
70911+}
70912+
70913+/* free information not needed after startup
70914+ currently contains user->kernel pointer mappings for subjects
70915+*/
70916+
70917+static void
70918+free_init_variables(void)
70919+{
70920+ __u32 i;
70921+
70922+ if (polstate->subj_map_set.s_hash) {
70923+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
70924+ if (polstate->subj_map_set.s_hash[i]) {
70925+ kfree(polstate->subj_map_set.s_hash[i]);
70926+ polstate->subj_map_set.s_hash[i] = NULL;
70927+ }
70928+ }
70929+
70930+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
70931+ PAGE_SIZE)
70932+ kfree(polstate->subj_map_set.s_hash);
70933+ else
70934+ vfree(polstate->subj_map_set.s_hash);
70935+ }
70936+
70937+ return;
70938+}
70939+
70940+static void
70941+free_variables(bool reload)
70942+{
70943+ struct acl_subject_label *s;
70944+ struct acl_role_label *r;
70945+ struct task_struct *task, *task2;
70946+ unsigned int x;
70947+
70948+ if (!reload) {
70949+ gr_clear_learn_entries();
70950+
70951+ read_lock(&tasklist_lock);
70952+ do_each_thread(task2, task) {
70953+ task->acl_sp_role = 0;
70954+ task->acl_role_id = 0;
70955+ task->inherited = 0;
70956+ task->acl = NULL;
70957+ task->role = NULL;
70958+ } while_each_thread(task2, task);
70959+ read_unlock(&tasklist_lock);
70960+
70961+ kfree(fakefs_obj_rw);
70962+ fakefs_obj_rw = NULL;
70963+ kfree(fakefs_obj_rwx);
70964+ fakefs_obj_rwx = NULL;
70965+
70966+ /* release the reference to the real root dentry and vfsmount */
70967+ path_put(&gr_real_root);
70968+ memset(&gr_real_root, 0, sizeof(gr_real_root));
70969+ }
70970+
70971+ /* free all object hash tables */
70972+
70973+ FOR_EACH_ROLE_START(r)
70974+ if (r->subj_hash == NULL)
70975+ goto next_role;
70976+ FOR_EACH_SUBJECT_START(r, s, x)
70977+ if (s->obj_hash == NULL)
70978+ break;
70979+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
70980+ kfree(s->obj_hash);
70981+ else
70982+ vfree(s->obj_hash);
70983+ FOR_EACH_SUBJECT_END(s, x)
70984+ FOR_EACH_NESTED_SUBJECT_START(r, s)
70985+ if (s->obj_hash == NULL)
70986+ break;
70987+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
70988+ kfree(s->obj_hash);
70989+ else
70990+ vfree(s->obj_hash);
70991+ FOR_EACH_NESTED_SUBJECT_END(s)
70992+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
70993+ kfree(r->subj_hash);
70994+ else
70995+ vfree(r->subj_hash);
70996+ r->subj_hash = NULL;
70997+next_role:
70998+ FOR_EACH_ROLE_END(r)
70999+
71000+ acl_free_all();
71001+
71002+ if (polstate->acl_role_set.r_hash) {
71003+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
71004+ PAGE_SIZE)
71005+ kfree(polstate->acl_role_set.r_hash);
71006+ else
71007+ vfree(polstate->acl_role_set.r_hash);
71008+ }
71009+ if (polstate->name_set.n_hash) {
71010+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
71011+ PAGE_SIZE)
71012+ kfree(polstate->name_set.n_hash);
71013+ else
71014+ vfree(polstate->name_set.n_hash);
71015+ }
71016+
71017+ if (polstate->inodev_set.i_hash) {
71018+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
71019+ PAGE_SIZE)
71020+ kfree(polstate->inodev_set.i_hash);
71021+ else
71022+ vfree(polstate->inodev_set.i_hash);
71023+ }
71024+
71025+ if (!reload)
71026+ gr_free_uidset();
71027+
71028+ memset(&polstate->name_set, 0, sizeof (struct name_db));
71029+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
71030+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
71031+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
71032+
71033+ polstate->default_role = NULL;
71034+ polstate->kernel_role = NULL;
71035+ polstate->role_list = NULL;
71036+
71037+ return;
71038+}
71039+
71040+static struct acl_subject_label *
71041+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
71042+
71043+static int alloc_and_copy_string(char **name, unsigned int maxlen)
71044+{
71045+ unsigned int len = strnlen_user(*name, maxlen);
71046+ char *tmp;
71047+
71048+ if (!len || len >= maxlen)
71049+ return -EINVAL;
71050+
71051+ if ((tmp = (char *) acl_alloc(len)) == NULL)
71052+ return -ENOMEM;
71053+
71054+ if (copy_from_user(tmp, *name, len))
71055+ return -EFAULT;
71056+
71057+ tmp[len-1] = '\0';
71058+ *name = tmp;
71059+
71060+ return 0;
71061+}
71062+
71063+static int
71064+copy_user_glob(struct acl_object_label *obj)
71065+{
71066+ struct acl_object_label *g_tmp, **guser;
71067+ int error;
71068+
71069+ if (obj->globbed == NULL)
71070+ return 0;
71071+
71072+ guser = &obj->globbed;
71073+ while (*guser) {
71074+ g_tmp = (struct acl_object_label *)
71075+ acl_alloc(sizeof (struct acl_object_label));
71076+ if (g_tmp == NULL)
71077+ return -ENOMEM;
71078+
71079+ if (copy_acl_object_label(g_tmp, *guser))
71080+ return -EFAULT;
71081+
71082+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
71083+ if (error)
71084+ return error;
71085+
71086+ *guser = g_tmp;
71087+ guser = &(g_tmp->next);
71088+ }
71089+
71090+ return 0;
71091+}
71092+
71093+static int
71094+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
71095+ struct acl_role_label *role)
71096+{
71097+ struct acl_object_label *o_tmp;
71098+ int ret;
71099+
71100+ while (userp) {
71101+ if ((o_tmp = (struct acl_object_label *)
71102+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
71103+ return -ENOMEM;
71104+
71105+ if (copy_acl_object_label(o_tmp, userp))
71106+ return -EFAULT;
71107+
71108+ userp = o_tmp->prev;
71109+
71110+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
71111+ if (ret)
71112+ return ret;
71113+
71114+ insert_acl_obj_label(o_tmp, subj);
71115+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
71116+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
71117+ return -ENOMEM;
71118+
71119+ ret = copy_user_glob(o_tmp);
71120+ if (ret)
71121+ return ret;
71122+
71123+ if (o_tmp->nested) {
71124+ int already_copied;
71125+
71126+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
71127+ if (IS_ERR(o_tmp->nested))
71128+ return PTR_ERR(o_tmp->nested);
71129+
71130+ /* insert into nested subject list if we haven't copied this one yet
71131+ to prevent duplicate entries */
71132+ if (!already_copied) {
71133+ o_tmp->nested->next = role->hash->first;
71134+ role->hash->first = o_tmp->nested;
71135+ }
71136+ }
71137+ }
71138+
71139+ return 0;
71140+}
71141+
71142+static __u32
71143+count_user_subjs(struct acl_subject_label *userp)
71144+{
71145+ struct acl_subject_label s_tmp;
71146+ __u32 num = 0;
71147+
71148+ while (userp) {
71149+ if (copy_acl_subject_label(&s_tmp, userp))
71150+ break;
71151+
71152+ userp = s_tmp.prev;
71153+ }
71154+
71155+ return num;
71156+}
71157+
71158+static int
71159+copy_user_allowedips(struct acl_role_label *rolep)
71160+{
71161+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
71162+
71163+ ruserip = rolep->allowed_ips;
71164+
71165+ while (ruserip) {
71166+ rlast = rtmp;
71167+
71168+ if ((rtmp = (struct role_allowed_ip *)
71169+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
71170+ return -ENOMEM;
71171+
71172+ if (copy_role_allowed_ip(rtmp, ruserip))
71173+ return -EFAULT;
71174+
71175+ ruserip = rtmp->prev;
71176+
71177+ if (!rlast) {
71178+ rtmp->prev = NULL;
71179+ rolep->allowed_ips = rtmp;
71180+ } else {
71181+ rlast->next = rtmp;
71182+ rtmp->prev = rlast;
71183+ }
71184+
71185+ if (!ruserip)
71186+ rtmp->next = NULL;
71187+ }
71188+
71189+ return 0;
71190+}
71191+
71192+static int
71193+copy_user_transitions(struct acl_role_label *rolep)
71194+{
71195+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
71196+ int error;
71197+
71198+ rusertp = rolep->transitions;
71199+
71200+ while (rusertp) {
71201+ rlast = rtmp;
71202+
71203+ if ((rtmp = (struct role_transition *)
71204+ acl_alloc(sizeof (struct role_transition))) == NULL)
71205+ return -ENOMEM;
71206+
71207+ if (copy_role_transition(rtmp, rusertp))
71208+ return -EFAULT;
71209+
71210+ rusertp = rtmp->prev;
71211+
71212+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
71213+ if (error)
71214+ return error;
71215+
71216+ if (!rlast) {
71217+ rtmp->prev = NULL;
71218+ rolep->transitions = rtmp;
71219+ } else {
71220+ rlast->next = rtmp;
71221+ rtmp->prev = rlast;
71222+ }
71223+
71224+ if (!rusertp)
71225+ rtmp->next = NULL;
71226+ }
71227+
71228+ return 0;
71229+}
71230+
71231+static __u32 count_user_objs(const struct acl_object_label __user *userp)
71232+{
71233+ struct acl_object_label o_tmp;
71234+ __u32 num = 0;
71235+
71236+ while (userp) {
71237+ if (copy_acl_object_label(&o_tmp, userp))
71238+ break;
71239+
71240+ userp = o_tmp.prev;
71241+ num++;
71242+ }
71243+
71244+ return num;
71245+}
71246+
71247+static struct acl_subject_label *
71248+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
71249+{
71250+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
71251+ __u32 num_objs;
71252+ struct acl_ip_label **i_tmp, *i_utmp2;
71253+ struct gr_hash_struct ghash;
71254+ struct subject_map *subjmap;
71255+ unsigned int i_num;
71256+ int err;
71257+
71258+ if (already_copied != NULL)
71259+ *already_copied = 0;
71260+
71261+ s_tmp = lookup_subject_map(userp);
71262+
71263+ /* we've already copied this subject into the kernel, just return
71264+ the reference to it, and don't copy it over again
71265+ */
71266+ if (s_tmp) {
71267+ if (already_copied != NULL)
71268+ *already_copied = 1;
71269+ return(s_tmp);
71270+ }
71271+
71272+ if ((s_tmp = (struct acl_subject_label *)
71273+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
71274+ return ERR_PTR(-ENOMEM);
71275+
71276+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
71277+ if (subjmap == NULL)
71278+ return ERR_PTR(-ENOMEM);
71279+
71280+ subjmap->user = userp;
71281+ subjmap->kernel = s_tmp;
71282+ insert_subj_map_entry(subjmap);
71283+
71284+ if (copy_acl_subject_label(s_tmp, userp))
71285+ return ERR_PTR(-EFAULT);
71286+
71287+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
71288+ if (err)
71289+ return ERR_PTR(err);
71290+
71291+ if (!strcmp(s_tmp->filename, "/"))
71292+ role->root_label = s_tmp;
71293+
71294+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
71295+ return ERR_PTR(-EFAULT);
71296+
71297+ /* copy user and group transition tables */
71298+
71299+ if (s_tmp->user_trans_num) {
71300+ uid_t *uidlist;
71301+
71302+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
71303+ if (uidlist == NULL)
71304+ return ERR_PTR(-ENOMEM);
71305+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
71306+ return ERR_PTR(-EFAULT);
71307+
71308+ s_tmp->user_transitions = uidlist;
71309+ }
71310+
71311+ if (s_tmp->group_trans_num) {
71312+ gid_t *gidlist;
71313+
71314+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
71315+ if (gidlist == NULL)
71316+ return ERR_PTR(-ENOMEM);
71317+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
71318+ return ERR_PTR(-EFAULT);
71319+
71320+ s_tmp->group_transitions = gidlist;
71321+ }
71322+
71323+ /* set up object hash table */
71324+ num_objs = count_user_objs(ghash.first);
71325+
71326+ s_tmp->obj_hash_size = num_objs;
71327+ s_tmp->obj_hash =
71328+ (struct acl_object_label **)
71329+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
71330+
71331+ if (!s_tmp->obj_hash)
71332+ return ERR_PTR(-ENOMEM);
71333+
71334+ memset(s_tmp->obj_hash, 0,
71335+ s_tmp->obj_hash_size *
71336+ sizeof (struct acl_object_label *));
71337+
71338+ /* add in objects */
71339+ err = copy_user_objs(ghash.first, s_tmp, role);
71340+
71341+ if (err)
71342+ return ERR_PTR(err);
71343+
71344+ /* set pointer for parent subject */
71345+ if (s_tmp->parent_subject) {
71346+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
71347+
71348+ if (IS_ERR(s_tmp2))
71349+ return s_tmp2;
71350+
71351+ s_tmp->parent_subject = s_tmp2;
71352+ }
71353+
71354+ /* add in ip acls */
71355+
71356+ if (!s_tmp->ip_num) {
71357+ s_tmp->ips = NULL;
71358+ goto insert;
71359+ }
71360+
71361+ i_tmp =
71362+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
71363+ sizeof (struct acl_ip_label *));
71364+
71365+ if (!i_tmp)
71366+ return ERR_PTR(-ENOMEM);
71367+
71368+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
71369+ *(i_tmp + i_num) =
71370+ (struct acl_ip_label *)
71371+ acl_alloc(sizeof (struct acl_ip_label));
71372+ if (!*(i_tmp + i_num))
71373+ return ERR_PTR(-ENOMEM);
71374+
71375+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
71376+ return ERR_PTR(-EFAULT);
71377+
71378+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
71379+ return ERR_PTR(-EFAULT);
71380+
71381+ if ((*(i_tmp + i_num))->iface == NULL)
71382+ continue;
71383+
71384+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
71385+ if (err)
71386+ return ERR_PTR(err);
71387+ }
71388+
71389+ s_tmp->ips = i_tmp;
71390+
71391+insert:
71392+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
71393+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
71394+ return ERR_PTR(-ENOMEM);
71395+
71396+ return s_tmp;
71397+}
71398+
71399+static int
71400+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
71401+{
71402+ struct acl_subject_label s_pre;
71403+ struct acl_subject_label * ret;
71404+ int err;
71405+
71406+ while (userp) {
71407+ if (copy_acl_subject_label(&s_pre, userp))
71408+ return -EFAULT;
71409+
71410+ ret = do_copy_user_subj(userp, role, NULL);
71411+
71412+ err = PTR_ERR(ret);
71413+ if (IS_ERR(ret))
71414+ return err;
71415+
71416+ insert_acl_subj_label(ret, role);
71417+
71418+ userp = s_pre.prev;
71419+ }
71420+
71421+ return 0;
71422+}
71423+
71424+static int
71425+copy_user_acl(struct gr_arg *arg)
71426+{
71427+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
71428+ struct acl_subject_label *subj_list;
71429+ struct sprole_pw *sptmp;
71430+ struct gr_hash_struct *ghash;
71431+ uid_t *domainlist;
71432+ unsigned int r_num;
71433+ int err = 0;
71434+ __u16 i;
71435+ __u32 num_subjs;
71436+
71437+ /* we need a default and kernel role */
71438+ if (arg->role_db.num_roles < 2)
71439+ return -EINVAL;
71440+
71441+ /* copy special role authentication info from userspace */
71442+
71443+ polstate->num_sprole_pws = arg->num_sprole_pws;
71444+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
71445+
71446+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
71447+ return -ENOMEM;
71448+
71449+ for (i = 0; i < polstate->num_sprole_pws; i++) {
71450+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
71451+ if (!sptmp)
71452+ return -ENOMEM;
71453+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
71454+ return -EFAULT;
71455+
71456+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
71457+ if (err)
71458+ return err;
71459+
71460+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71461+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
71462+#endif
71463+
71464+ polstate->acl_special_roles[i] = sptmp;
71465+ }
71466+
71467+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
71468+
71469+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
71470+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
71471+
71472+ if (!r_tmp)
71473+ return -ENOMEM;
71474+
71475+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
71476+ return -EFAULT;
71477+
71478+ if (copy_acl_role_label(r_tmp, r_utmp2))
71479+ return -EFAULT;
71480+
71481+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
71482+ if (err)
71483+ return err;
71484+
71485+ if (!strcmp(r_tmp->rolename, "default")
71486+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
71487+ polstate->default_role = r_tmp;
71488+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
71489+ polstate->kernel_role = r_tmp;
71490+ }
71491+
71492+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
71493+ return -ENOMEM;
71494+
71495+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
71496+ return -EFAULT;
71497+
71498+ r_tmp->hash = ghash;
71499+
71500+ num_subjs = count_user_subjs(r_tmp->hash->first);
71501+
71502+ r_tmp->subj_hash_size = num_subjs;
71503+ r_tmp->subj_hash =
71504+ (struct acl_subject_label **)
71505+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
71506+
71507+ if (!r_tmp->subj_hash)
71508+ return -ENOMEM;
71509+
71510+ err = copy_user_allowedips(r_tmp);
71511+ if (err)
71512+ return err;
71513+
71514+ /* copy domain info */
71515+ if (r_tmp->domain_children != NULL) {
71516+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
71517+ if (domainlist == NULL)
71518+ return -ENOMEM;
71519+
71520+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
71521+ return -EFAULT;
71522+
71523+ r_tmp->domain_children = domainlist;
71524+ }
71525+
71526+ err = copy_user_transitions(r_tmp);
71527+ if (err)
71528+ return err;
71529+
71530+ memset(r_tmp->subj_hash, 0,
71531+ r_tmp->subj_hash_size *
71532+ sizeof (struct acl_subject_label *));
71533+
71534+ /* acquire the list of subjects, then NULL out
71535+ the list prior to parsing the subjects for this role,
71536+ as during this parsing the list is replaced with a list
71537+ of *nested* subjects for the role
71538+ */
71539+ subj_list = r_tmp->hash->first;
71540+
71541+ /* set nested subject list to null */
71542+ r_tmp->hash->first = NULL;
71543+
71544+ err = copy_user_subjs(subj_list, r_tmp);
71545+
71546+ if (err)
71547+ return err;
71548+
71549+ insert_acl_role_label(r_tmp);
71550+ }
71551+
71552+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
71553+ return -EINVAL;
71554+
71555+ return err;
71556+}
71557+
71558+static int gracl_reload_apply_policies(void *reload)
71559+{
71560+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
71561+ struct task_struct *task, *task2;
71562+ struct acl_role_label *role, *rtmp;
71563+ struct acl_subject_label *subj;
71564+ const struct cred *cred;
71565+ int role_applied;
71566+ int ret = 0;
71567+
71568+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
71569+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
71570+
71571+ /* first make sure we'll be able to apply the new policy cleanly */
71572+ do_each_thread(task2, task) {
71573+ if (task->exec_file == NULL)
71574+ continue;
71575+ role_applied = 0;
71576+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
71577+ /* preserve special roles */
71578+ FOR_EACH_ROLE_START(role)
71579+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
71580+ rtmp = task->role;
71581+ task->role = role;
71582+ role_applied = 1;
71583+ break;
71584+ }
71585+ FOR_EACH_ROLE_END(role)
71586+ }
71587+ if (!role_applied) {
71588+ cred = __task_cred(task);
71589+ rtmp = task->role;
71590+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
71591+ }
71592+ /* this handles non-nested inherited subjects, nested subjects will still
71593+ be dropped currently */
71594+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
71595+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
71596+ /* change the role back so that we've made no modifications to the policy */
71597+ task->role = rtmp;
71598+
71599+ if (subj == NULL || task->tmpacl == NULL) {
71600+ ret = -EINVAL;
71601+ goto out;
71602+ }
71603+ } while_each_thread(task2, task);
71604+
71605+ /* now actually apply the policy */
71606+
71607+ do_each_thread(task2, task) {
71608+ if (task->exec_file) {
71609+ role_applied = 0;
71610+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
71611+ /* preserve special roles */
71612+ FOR_EACH_ROLE_START(role)
71613+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
71614+ task->role = role;
71615+ role_applied = 1;
71616+ break;
71617+ }
71618+ FOR_EACH_ROLE_END(role)
71619+ }
71620+ if (!role_applied) {
71621+ cred = __task_cred(task);
71622+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
71623+ }
71624+ /* this handles non-nested inherited subjects, nested subjects will still
71625+ be dropped currently */
71626+ if (!reload_state->oldmode && task->inherited)
71627+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
71628+ else {
71629+ /* looked up and tagged to the task previously */
71630+ subj = task->tmpacl;
71631+ }
71632+ /* subj will be non-null */
71633+ __gr_apply_subject_to_task(polstate, task, subj);
71634+ if (reload_state->oldmode) {
71635+ task->acl_role_id = 0;
71636+ task->acl_sp_role = 0;
71637+ task->inherited = 0;
71638+ }
71639+ } else {
71640+ // it's a kernel process
71641+ task->role = polstate->kernel_role;
71642+ task->acl = polstate->kernel_role->root_label;
71643+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
71644+ task->acl->mode &= ~GR_PROCFIND;
71645+#endif
71646+ }
71647+ } while_each_thread(task2, task);
71648+
71649+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
71650+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
71651+
71652+out:
71653+
71654+ return ret;
71655+}
71656+
71657+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
71658+{
71659+ struct gr_reload_state new_reload_state = { };
71660+ int err;
71661+
71662+ new_reload_state.oldpolicy_ptr = polstate;
71663+ new_reload_state.oldalloc_ptr = current_alloc_state;
71664+ new_reload_state.oldmode = oldmode;
71665+
71666+ current_alloc_state = &new_reload_state.newalloc;
71667+ polstate = &new_reload_state.newpolicy;
71668+
71669+ /* everything relevant is now saved off, copy in the new policy */
71670+ if (init_variables(args, true)) {
71671+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
71672+ err = -ENOMEM;
71673+ goto error;
71674+ }
71675+
71676+ err = copy_user_acl(args);
71677+ free_init_variables();
71678+ if (err)
71679+ goto error;
71680+ /* the new policy is copied in, with the old policy available via saved_state
71681+ first go through applying roles, making sure to preserve special roles
71682+ then apply new subjects, making sure to preserve inherited and nested subjects,
71683+ though currently only inherited subjects will be preserved
71684+ */
71685+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
71686+ if (err)
71687+ goto error;
71688+
71689+ /* we've now applied the new policy, so restore the old policy state to free it */
71690+ polstate = &new_reload_state.oldpolicy;
71691+ current_alloc_state = &new_reload_state.oldalloc;
71692+ free_variables(true);
71693+
71694+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
71695+ to running_polstate/current_alloc_state inside stop_machine
71696+ */
71697+ err = 0;
71698+ goto out;
71699+error:
71700+ /* on error of loading the new policy, we'll just keep the previous
71701+ policy set around
71702+ */
71703+ free_variables(true);
71704+
71705+ /* doesn't affect runtime, but maintains consistent state */
71706+out:
71707+ polstate = new_reload_state.oldpolicy_ptr;
71708+ current_alloc_state = new_reload_state.oldalloc_ptr;
71709+
71710+ return err;
71711+}
71712+
71713+static int
71714+gracl_init(struct gr_arg *args)
71715+{
71716+ int error = 0;
71717+
71718+ memcpy(&gr_system_salt, args->salt, sizeof(gr_system_salt));
71719+ memcpy(&gr_system_sum, args->sum, sizeof(gr_system_sum));
71720+
71721+ if (init_variables(args, false)) {
71722+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
71723+ error = -ENOMEM;
71724+ goto out;
71725+ }
71726+
71727+ error = copy_user_acl(args);
71728+ free_init_variables();
71729+ if (error)
71730+ goto out;
71731+
71732+ error = gr_set_acls(0);
71733+ if (error)
71734+ goto out;
71735+
71736+ gr_enable_rbac_system();
71737+
71738+ return 0;
71739+
71740+out:
71741+ free_variables(false);
71742+ return error;
71743+}
71744+
71745+static int
71746+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
71747+ unsigned char **sum)
71748+{
71749+ struct acl_role_label *r;
71750+ struct role_allowed_ip *ipp;
71751+ struct role_transition *trans;
71752+ unsigned int i;
71753+ int found = 0;
71754+ u32 curr_ip = current->signal->curr_ip;
71755+
71756+ current->signal->saved_ip = curr_ip;
71757+
71758+ /* check transition table */
71759+
71760+ for (trans = current->role->transitions; trans; trans = trans->next) {
71761+ if (!strcmp(rolename, trans->rolename)) {
71762+ found = 1;
71763+ break;
71764+ }
71765+ }
71766+
71767+ if (!found)
71768+ return 0;
71769+
71770+ /* handle special roles that do not require authentication
71771+ and check ip */
71772+
71773+ FOR_EACH_ROLE_START(r)
71774+ if (!strcmp(rolename, r->rolename) &&
71775+ (r->roletype & GR_ROLE_SPECIAL)) {
71776+ found = 0;
71777+ if (r->allowed_ips != NULL) {
71778+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
71779+ if ((ntohl(curr_ip) & ipp->netmask) ==
71780+ (ntohl(ipp->addr) & ipp->netmask))
71781+ found = 1;
71782+ }
71783+ } else
71784+ found = 2;
71785+ if (!found)
71786+ return 0;
71787+
71788+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
71789+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
71790+ *salt = NULL;
71791+ *sum = NULL;
71792+ return 1;
71793+ }
71794+ }
71795+ FOR_EACH_ROLE_END(r)
71796+
71797+ for (i = 0; i < polstate->num_sprole_pws; i++) {
71798+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
71799+ *salt = polstate->acl_special_roles[i]->salt;
71800+ *sum = polstate->acl_special_roles[i]->sum;
71801+ return 1;
71802+ }
71803+ }
71804+
71805+ return 0;
71806+}
71807+
71808+int gr_check_secure_terminal(struct task_struct *task)
71809+{
71810+ struct task_struct *p, *p2, *p3;
71811+ struct files_struct *files;
71812+ struct fdtable *fdt;
71813+ struct file *our_file = NULL, *file;
71814+ int i;
71815+
71816+ if (task->signal->tty == NULL)
71817+ return 1;
71818+
71819+ files = get_files_struct(task);
71820+ if (files != NULL) {
71821+ rcu_read_lock();
71822+ fdt = files_fdtable(files);
71823+ for (i=0; i < fdt->max_fds; i++) {
71824+ file = fcheck_files(files, i);
71825+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
71826+ get_file(file);
71827+ our_file = file;
71828+ }
71829+ }
71830+ rcu_read_unlock();
71831+ put_files_struct(files);
71832+ }
71833+
71834+ if (our_file == NULL)
71835+ return 1;
71836+
71837+ read_lock(&tasklist_lock);
71838+ do_each_thread(p2, p) {
71839+ files = get_files_struct(p);
71840+ if (files == NULL ||
71841+ (p->signal && p->signal->tty == task->signal->tty)) {
71842+ if (files != NULL)
71843+ put_files_struct(files);
71844+ continue;
71845+ }
71846+ rcu_read_lock();
71847+ fdt = files_fdtable(files);
71848+ for (i=0; i < fdt->max_fds; i++) {
71849+ file = fcheck_files(files, i);
71850+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
71851+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
71852+ p3 = task;
71853+ while (task_pid_nr(p3) > 0) {
71854+ if (p3 == p)
71855+ break;
71856+ p3 = p3->real_parent;
71857+ }
71858+ if (p3 == p)
71859+ break;
71860+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
71861+ gr_handle_alertkill(p);
71862+ rcu_read_unlock();
71863+ put_files_struct(files);
71864+ read_unlock(&tasklist_lock);
71865+ fput(our_file);
71866+ return 0;
71867+ }
71868+ }
71869+ rcu_read_unlock();
71870+ put_files_struct(files);
71871+ } while_each_thread(p2, p);
71872+ read_unlock(&tasklist_lock);
71873+
71874+ fput(our_file);
71875+ return 1;
71876+}
71877+
71878+ssize_t
71879+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
71880+{
71881+ struct gr_arg_wrapper uwrap;
71882+ unsigned char *sprole_salt = NULL;
71883+ unsigned char *sprole_sum = NULL;
71884+ int error = 0;
71885+ int error2 = 0;
71886+ size_t req_count = 0;
71887+ unsigned char oldmode = 0;
71888+
71889+ mutex_lock(&gr_dev_mutex);
71890+
71891+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
71892+ error = -EPERM;
71893+ goto out;
71894+ }
71895+
71896+#ifdef CONFIG_COMPAT
71897+ pax_open_kernel();
71898+ if (is_compat_task()) {
71899+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
71900+ copy_gr_arg = &copy_gr_arg_compat;
71901+ copy_acl_object_label = &copy_acl_object_label_compat;
71902+ copy_acl_subject_label = &copy_acl_subject_label_compat;
71903+ copy_acl_role_label = &copy_acl_role_label_compat;
71904+ copy_acl_ip_label = &copy_acl_ip_label_compat;
71905+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
71906+ copy_role_transition = &copy_role_transition_compat;
71907+ copy_sprole_pw = &copy_sprole_pw_compat;
71908+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
71909+ copy_pointer_from_array = &copy_pointer_from_array_compat;
71910+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
71911+ } else {
71912+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
71913+ copy_gr_arg = &copy_gr_arg_normal;
71914+ copy_acl_object_label = &copy_acl_object_label_normal;
71915+ copy_acl_subject_label = &copy_acl_subject_label_normal;
71916+ copy_acl_role_label = &copy_acl_role_label_normal;
71917+ copy_acl_ip_label = &copy_acl_ip_label_normal;
71918+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
71919+ copy_role_transition = &copy_role_transition_normal;
71920+ copy_sprole_pw = &copy_sprole_pw_normal;
71921+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
71922+ copy_pointer_from_array = &copy_pointer_from_array_normal;
71923+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
71924+ }
71925+ pax_close_kernel();
71926+#endif
71927+
71928+ req_count = get_gr_arg_wrapper_size();
71929+
71930+ if (count != req_count) {
71931+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
71932+ error = -EINVAL;
71933+ goto out;
71934+ }
71935+
71936+
71937+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
71938+ gr_auth_expires = 0;
71939+ gr_auth_attempts = 0;
71940+ }
71941+
71942+ error = copy_gr_arg_wrapper(buf, &uwrap);
71943+ if (error)
71944+ goto out;
71945+
71946+ error = copy_gr_arg(uwrap.arg, &gr_usermode);
71947+ if (error)
71948+ goto out;
71949+
71950+ if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_SPROLEPAM &&
71951+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
71952+ time_after(gr_auth_expires, get_seconds())) {
71953+ error = -EBUSY;
71954+ goto out;
71955+ }
71956+
71957+ /* if non-root trying to do anything other than use a special role,
71958+ do not attempt authentication, do not count towards authentication
71959+ locking
71960+ */
71961+
71962+ if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_STATUS &&
71963+ gr_usermode.mode != GR_UNSPROLE && gr_usermode.mode != GR_SPROLEPAM &&
71964+ gr_is_global_nonroot(current_uid())) {
71965+ error = -EPERM;
71966+ goto out;
71967+ }
71968+
71969+ /* ensure pw and special role name are null terminated */
71970+
71971+ gr_usermode.pw[GR_PW_LEN - 1] = '\0';
71972+ gr_usermode.sp_role[GR_SPROLE_LEN - 1] = '\0';
71973+
71974+ /* Okay.
71975+ * We have our enough of the argument structure..(we have yet
71976+ * to copy_from_user the tables themselves) . Copy the tables
71977+ * only if we need them, i.e. for loading operations. */
71978+
71979+ switch (gr_usermode.mode) {
71980+ case GR_STATUS:
71981+ if (gr_acl_is_enabled()) {
71982+ error = 1;
71983+ if (!gr_check_secure_terminal(current))
71984+ error = 3;
71985+ } else
71986+ error = 2;
71987+ goto out;
71988+ case GR_SHUTDOWN:
71989+ if (gr_acl_is_enabled() && !(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
71990+ stop_machine(gr_rbac_disable, NULL, NULL);
71991+ free_variables(false);
71992+ memset(&gr_usermode, 0, sizeof(gr_usermode));
71993+ memset(&gr_system_salt, 0, sizeof(gr_system_salt));
71994+ memset(&gr_system_sum, 0, sizeof(gr_system_sum));
71995+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
71996+ } else if (gr_acl_is_enabled()) {
71997+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
71998+ error = -EPERM;
71999+ } else {
72000+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
72001+ error = -EAGAIN;
72002+ }
72003+ break;
72004+ case GR_ENABLE:
72005+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(&gr_usermode)))
72006+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
72007+ else {
72008+ if (gr_acl_is_enabled())
72009+ error = -EAGAIN;
72010+ else
72011+ error = error2;
72012+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
72013+ }
72014+ break;
72015+ case GR_OLDRELOAD:
72016+ oldmode = 1;
72017+ case GR_RELOAD:
72018+ if (!gr_acl_is_enabled()) {
72019+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
72020+ error = -EAGAIN;
72021+ } else if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
72022+ error2 = gracl_reload(&gr_usermode, oldmode);
72023+ if (!error2)
72024+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
72025+ else {
72026+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
72027+ error = error2;
72028+ }
72029+ } else {
72030+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
72031+ error = -EPERM;
72032+ }
72033+ break;
72034+ case GR_SEGVMOD:
72035+ if (unlikely(!gr_acl_is_enabled())) {
72036+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
72037+ error = -EAGAIN;
72038+ break;
72039+ }
72040+
72041+ if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
72042+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
72043+ if (gr_usermode.segv_device && gr_usermode.segv_inode) {
72044+ struct acl_subject_label *segvacl;
72045+ segvacl =
72046+ lookup_acl_subj_label(gr_usermode.segv_inode,
72047+ gr_usermode.segv_device,
72048+ current->role);
72049+ if (segvacl) {
72050+ segvacl->crashes = 0;
72051+ segvacl->expires = 0;
72052+ }
72053+ } else if (gr_find_uid(gr_usermode.segv_uid) >= 0) {
72054+ gr_remove_uid(gr_usermode.segv_uid);
72055+ }
72056+ } else {
72057+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
72058+ error = -EPERM;
72059+ }
72060+ break;
72061+ case GR_SPROLE:
72062+ case GR_SPROLEPAM:
72063+ if (unlikely(!gr_acl_is_enabled())) {
72064+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
72065+ error = -EAGAIN;
72066+ break;
72067+ }
72068+
72069+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
72070+ current->role->expires = 0;
72071+ current->role->auth_attempts = 0;
72072+ }
72073+
72074+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
72075+ time_after(current->role->expires, get_seconds())) {
72076+ error = -EBUSY;
72077+ goto out;
72078+ }
72079+
72080+ if (lookup_special_role_auth
72081+ (gr_usermode.mode, gr_usermode.sp_role, &sprole_salt, &sprole_sum)
72082+ && ((!sprole_salt && !sprole_sum)
72083+ || !(chkpw(&gr_usermode, sprole_salt, sprole_sum)))) {
72084+ char *p = "";
72085+ assign_special_role(gr_usermode.sp_role);
72086+ read_lock(&tasklist_lock);
72087+ if (current->real_parent)
72088+ p = current->real_parent->role->rolename;
72089+ read_unlock(&tasklist_lock);
72090+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
72091+ p, acl_sp_role_value);
72092+ } else {
72093+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode.sp_role);
72094+ error = -EPERM;
72095+ if(!(current->role->auth_attempts++))
72096+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
72097+
72098+ goto out;
72099+ }
72100+ break;
72101+ case GR_UNSPROLE:
72102+ if (unlikely(!gr_acl_is_enabled())) {
72103+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
72104+ error = -EAGAIN;
72105+ break;
72106+ }
72107+
72108+ if (current->role->roletype & GR_ROLE_SPECIAL) {
72109+ char *p = "";
72110+ int i = 0;
72111+
72112+ read_lock(&tasklist_lock);
72113+ if (current->real_parent) {
72114+ p = current->real_parent->role->rolename;
72115+ i = current->real_parent->acl_role_id;
72116+ }
72117+ read_unlock(&tasklist_lock);
72118+
72119+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
72120+ gr_set_acls(1);
72121+ } else {
72122+ error = -EPERM;
72123+ goto out;
72124+ }
72125+ break;
72126+ default:
72127+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode.mode);
72128+ error = -EINVAL;
72129+ break;
72130+ }
72131+
72132+ if (error != -EPERM)
72133+ goto out;
72134+
72135+ if(!(gr_auth_attempts++))
72136+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
72137+
72138+ out:
72139+ mutex_unlock(&gr_dev_mutex);
72140+
72141+ if (!error)
72142+ error = req_count;
72143+
72144+ return error;
72145+}
72146+
72147+int
72148+gr_set_acls(const int type)
72149+{
72150+ struct task_struct *task, *task2;
72151+ struct acl_role_label *role = current->role;
72152+ struct acl_subject_label *subj;
72153+ __u16 acl_role_id = current->acl_role_id;
72154+ const struct cred *cred;
72155+ int ret;
72156+
72157+ rcu_read_lock();
72158+ read_lock(&tasklist_lock);
72159+ read_lock(&grsec_exec_file_lock);
72160+ do_each_thread(task2, task) {
72161+ /* check to see if we're called from the exit handler,
72162+ if so, only replace ACLs that have inherited the admin
72163+ ACL */
72164+
72165+ if (type && (task->role != role ||
72166+ task->acl_role_id != acl_role_id))
72167+ continue;
72168+
72169+ task->acl_role_id = 0;
72170+ task->acl_sp_role = 0;
72171+ task->inherited = 0;
72172+
72173+ if (task->exec_file) {
72174+ cred = __task_cred(task);
72175+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
72176+ subj = __gr_get_subject_for_task(polstate, task, NULL);
72177+ if (subj == NULL) {
72178+ ret = -EINVAL;
72179+ read_unlock(&grsec_exec_file_lock);
72180+ read_unlock(&tasklist_lock);
72181+ rcu_read_unlock();
72182+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
72183+ return ret;
72184+ }
72185+ __gr_apply_subject_to_task(polstate, task, subj);
72186+ } else {
72187+ // it's a kernel process
72188+ task->role = polstate->kernel_role;
72189+ task->acl = polstate->kernel_role->root_label;
72190+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
72191+ task->acl->mode &= ~GR_PROCFIND;
72192+#endif
72193+ }
72194+ } while_each_thread(task2, task);
72195+ read_unlock(&grsec_exec_file_lock);
72196+ read_unlock(&tasklist_lock);
72197+ rcu_read_unlock();
72198+
72199+ return 0;
72200+}
72201diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
72202new file mode 100644
72203index 0000000..39645c9
72204--- /dev/null
72205+++ b/grsecurity/gracl_res.c
72206@@ -0,0 +1,68 @@
72207+#include <linux/kernel.h>
72208+#include <linux/sched.h>
72209+#include <linux/gracl.h>
72210+#include <linux/grinternal.h>
72211+
72212+static const char *restab_log[] = {
72213+ [RLIMIT_CPU] = "RLIMIT_CPU",
72214+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
72215+ [RLIMIT_DATA] = "RLIMIT_DATA",
72216+ [RLIMIT_STACK] = "RLIMIT_STACK",
72217+ [RLIMIT_CORE] = "RLIMIT_CORE",
72218+ [RLIMIT_RSS] = "RLIMIT_RSS",
72219+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
72220+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
72221+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
72222+ [RLIMIT_AS] = "RLIMIT_AS",
72223+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
72224+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
72225+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
72226+ [RLIMIT_NICE] = "RLIMIT_NICE",
72227+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
72228+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
72229+ [GR_CRASH_RES] = "RLIMIT_CRASH"
72230+};
72231+
72232+void
72233+gr_log_resource(const struct task_struct *task,
72234+ const int res, const unsigned long wanted, const int gt)
72235+{
72236+ const struct cred *cred;
72237+ unsigned long rlim;
72238+
72239+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
72240+ return;
72241+
72242+ // not yet supported resource
72243+ if (unlikely(!restab_log[res]))
72244+ return;
72245+
72246+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
72247+ rlim = task_rlimit_max(task, res);
72248+ else
72249+ rlim = task_rlimit(task, res);
72250+
72251+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
72252+ return;
72253+
72254+ rcu_read_lock();
72255+ cred = __task_cred(task);
72256+
72257+ if (res == RLIMIT_NPROC &&
72258+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
72259+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
72260+ goto out_rcu_unlock;
72261+ else if (res == RLIMIT_MEMLOCK &&
72262+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
72263+ goto out_rcu_unlock;
72264+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
72265+ goto out_rcu_unlock;
72266+ rcu_read_unlock();
72267+
72268+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
72269+
72270+ return;
72271+out_rcu_unlock:
72272+ rcu_read_unlock();
72273+ return;
72274+}
72275diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
72276new file mode 100644
72277index 0000000..2040e61
72278--- /dev/null
72279+++ b/grsecurity/gracl_segv.c
72280@@ -0,0 +1,313 @@
72281+#include <linux/kernel.h>
72282+#include <linux/mm.h>
72283+#include <asm/uaccess.h>
72284+#include <asm/errno.h>
72285+#include <asm/mman.h>
72286+#include <net/sock.h>
72287+#include <linux/file.h>
72288+#include <linux/fs.h>
72289+#include <linux/net.h>
72290+#include <linux/in.h>
72291+#include <linux/slab.h>
72292+#include <linux/types.h>
72293+#include <linux/sched.h>
72294+#include <linux/timer.h>
72295+#include <linux/gracl.h>
72296+#include <linux/grsecurity.h>
72297+#include <linux/grinternal.h>
72298+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
72299+#include <linux/magic.h>
72300+#include <linux/pagemap.h>
72301+#include "../fs/btrfs/async-thread.h"
72302+#include "../fs/btrfs/ctree.h"
72303+#include "../fs/btrfs/btrfs_inode.h"
72304+#endif
72305+
72306+static struct crash_uid *uid_set;
72307+static unsigned short uid_used;
72308+static DEFINE_SPINLOCK(gr_uid_lock);
72309+extern rwlock_t gr_inode_lock;
72310+extern struct acl_subject_label *
72311+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
72312+ struct acl_role_label *role);
72313+
72314+static inline dev_t __get_dev(const struct dentry *dentry)
72315+{
72316+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
72317+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
72318+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
72319+ else
72320+#endif
72321+ return dentry->d_sb->s_dev;
72322+}
72323+
72324+int
72325+gr_init_uidset(void)
72326+{
72327+ uid_set =
72328+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
72329+ uid_used = 0;
72330+
72331+ return uid_set ? 1 : 0;
72332+}
72333+
72334+void
72335+gr_free_uidset(void)
72336+{
72337+ if (uid_set) {
72338+ struct crash_uid *tmpset;
72339+ spin_lock(&gr_uid_lock);
72340+ tmpset = uid_set;
72341+ uid_set = NULL;
72342+ uid_used = 0;
72343+ spin_unlock(&gr_uid_lock);
72344+ if (tmpset)
72345+ kfree(tmpset);
72346+ }
72347+
72348+ return;
72349+}
72350+
72351+int
72352+gr_find_uid(const uid_t uid)
72353+{
72354+ struct crash_uid *tmp = uid_set;
72355+ uid_t buid;
72356+ int low = 0, high = uid_used - 1, mid;
72357+
72358+ while (high >= low) {
72359+ mid = (low + high) >> 1;
72360+ buid = tmp[mid].uid;
72361+ if (buid == uid)
72362+ return mid;
72363+ if (buid > uid)
72364+ high = mid - 1;
72365+ if (buid < uid)
72366+ low = mid + 1;
72367+ }
72368+
72369+ return -1;
72370+}
72371+
72372+static __inline__ void
72373+gr_insertsort(void)
72374+{
72375+ unsigned short i, j;
72376+ struct crash_uid index;
72377+
72378+ for (i = 1; i < uid_used; i++) {
72379+ index = uid_set[i];
72380+ j = i;
72381+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
72382+ uid_set[j] = uid_set[j - 1];
72383+ j--;
72384+ }
72385+ uid_set[j] = index;
72386+ }
72387+
72388+ return;
72389+}
72390+
72391+static __inline__ void
72392+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
72393+{
72394+ int loc;
72395+ uid_t uid = GR_GLOBAL_UID(kuid);
72396+
72397+ if (uid_used == GR_UIDTABLE_MAX)
72398+ return;
72399+
72400+ loc = gr_find_uid(uid);
72401+
72402+ if (loc >= 0) {
72403+ uid_set[loc].expires = expires;
72404+ return;
72405+ }
72406+
72407+ uid_set[uid_used].uid = uid;
72408+ uid_set[uid_used].expires = expires;
72409+ uid_used++;
72410+
72411+ gr_insertsort();
72412+
72413+ return;
72414+}
72415+
72416+void
72417+gr_remove_uid(const unsigned short loc)
72418+{
72419+ unsigned short i;
72420+
72421+ for (i = loc + 1; i < uid_used; i++)
72422+ uid_set[i - 1] = uid_set[i];
72423+
72424+ uid_used--;
72425+
72426+ return;
72427+}
72428+
72429+int
72430+gr_check_crash_uid(const kuid_t kuid)
72431+{
72432+ int loc;
72433+ int ret = 0;
72434+ uid_t uid;
72435+
72436+ if (unlikely(!gr_acl_is_enabled()))
72437+ return 0;
72438+
72439+ uid = GR_GLOBAL_UID(kuid);
72440+
72441+ spin_lock(&gr_uid_lock);
72442+ loc = gr_find_uid(uid);
72443+
72444+ if (loc < 0)
72445+ goto out_unlock;
72446+
72447+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
72448+ gr_remove_uid(loc);
72449+ else
72450+ ret = 1;
72451+
72452+out_unlock:
72453+ spin_unlock(&gr_uid_lock);
72454+ return ret;
72455+}
72456+
72457+static __inline__ int
72458+proc_is_setxid(const struct cred *cred)
72459+{
72460+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
72461+ !uid_eq(cred->uid, cred->fsuid))
72462+ return 1;
72463+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
72464+ !gid_eq(cred->gid, cred->fsgid))
72465+ return 1;
72466+
72467+ return 0;
72468+}
72469+
72470+extern int gr_fake_force_sig(int sig, struct task_struct *t);
72471+
72472+void
72473+gr_handle_crash(struct task_struct *task, const int sig)
72474+{
72475+ struct acl_subject_label *curr;
72476+ struct task_struct *tsk, *tsk2;
72477+ const struct cred *cred;
72478+ const struct cred *cred2;
72479+
72480+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
72481+ return;
72482+
72483+ if (unlikely(!gr_acl_is_enabled()))
72484+ return;
72485+
72486+ curr = task->acl;
72487+
72488+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
72489+ return;
72490+
72491+ if (time_before_eq(curr->expires, get_seconds())) {
72492+ curr->expires = 0;
72493+ curr->crashes = 0;
72494+ }
72495+
72496+ curr->crashes++;
72497+
72498+ if (!curr->expires)
72499+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
72500+
72501+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
72502+ time_after(curr->expires, get_seconds())) {
72503+ rcu_read_lock();
72504+ cred = __task_cred(task);
72505+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
72506+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
72507+ spin_lock(&gr_uid_lock);
72508+ gr_insert_uid(cred->uid, curr->expires);
72509+ spin_unlock(&gr_uid_lock);
72510+ curr->expires = 0;
72511+ curr->crashes = 0;
72512+ read_lock(&tasklist_lock);
72513+ do_each_thread(tsk2, tsk) {
72514+ cred2 = __task_cred(tsk);
72515+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
72516+ gr_fake_force_sig(SIGKILL, tsk);
72517+ } while_each_thread(tsk2, tsk);
72518+ read_unlock(&tasklist_lock);
72519+ } else {
72520+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
72521+ read_lock(&tasklist_lock);
72522+ read_lock(&grsec_exec_file_lock);
72523+ do_each_thread(tsk2, tsk) {
72524+ if (likely(tsk != task)) {
72525+ // if this thread has the same subject as the one that triggered
72526+ // RES_CRASH and it's the same binary, kill it
72527+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
72528+ gr_fake_force_sig(SIGKILL, tsk);
72529+ }
72530+ } while_each_thread(tsk2, tsk);
72531+ read_unlock(&grsec_exec_file_lock);
72532+ read_unlock(&tasklist_lock);
72533+ }
72534+ rcu_read_unlock();
72535+ }
72536+
72537+ return;
72538+}
72539+
72540+int
72541+gr_check_crash_exec(const struct file *filp)
72542+{
72543+ struct acl_subject_label *curr;
72544+
72545+ if (unlikely(!gr_acl_is_enabled()))
72546+ return 0;
72547+
72548+ read_lock(&gr_inode_lock);
72549+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
72550+ __get_dev(filp->f_path.dentry),
72551+ current->role);
72552+ read_unlock(&gr_inode_lock);
72553+
72554+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
72555+ (!curr->crashes && !curr->expires))
72556+ return 0;
72557+
72558+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
72559+ time_after(curr->expires, get_seconds()))
72560+ return 1;
72561+ else if (time_before_eq(curr->expires, get_seconds())) {
72562+ curr->crashes = 0;
72563+ curr->expires = 0;
72564+ }
72565+
72566+ return 0;
72567+}
72568+
72569+void
72570+gr_handle_alertkill(struct task_struct *task)
72571+{
72572+ struct acl_subject_label *curracl;
72573+ __u32 curr_ip;
72574+ struct task_struct *p, *p2;
72575+
72576+ if (unlikely(!gr_acl_is_enabled()))
72577+ return;
72578+
72579+ curracl = task->acl;
72580+ curr_ip = task->signal->curr_ip;
72581+
72582+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
72583+ read_lock(&tasklist_lock);
72584+ do_each_thread(p2, p) {
72585+ if (p->signal->curr_ip == curr_ip)
72586+ gr_fake_force_sig(SIGKILL, p);
72587+ } while_each_thread(p2, p);
72588+ read_unlock(&tasklist_lock);
72589+ } else if (curracl->mode & GR_KILLPROC)
72590+ gr_fake_force_sig(SIGKILL, task);
72591+
72592+ return;
72593+}
72594diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
72595new file mode 100644
72596index 0000000..98011b0
72597--- /dev/null
72598+++ b/grsecurity/gracl_shm.c
72599@@ -0,0 +1,40 @@
72600+#include <linux/kernel.h>
72601+#include <linux/mm.h>
72602+#include <linux/sched.h>
72603+#include <linux/file.h>
72604+#include <linux/ipc.h>
72605+#include <linux/gracl.h>
72606+#include <linux/grsecurity.h>
72607+#include <linux/grinternal.h>
72608+
72609+int
72610+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72611+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
72612+{
72613+ struct task_struct *task;
72614+
72615+ if (!gr_acl_is_enabled())
72616+ return 1;
72617+
72618+ rcu_read_lock();
72619+ read_lock(&tasklist_lock);
72620+
72621+ task = find_task_by_vpid(shm_cprid);
72622+
72623+ if (unlikely(!task))
72624+ task = find_task_by_vpid(shm_lapid);
72625+
72626+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
72627+ (task_pid_nr(task) == shm_lapid)) &&
72628+ (task->acl->mode & GR_PROTSHM) &&
72629+ (task->acl != current->acl))) {
72630+ read_unlock(&tasklist_lock);
72631+ rcu_read_unlock();
72632+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
72633+ return 0;
72634+ }
72635+ read_unlock(&tasklist_lock);
72636+ rcu_read_unlock();
72637+
72638+ return 1;
72639+}
72640diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
72641new file mode 100644
72642index 0000000..bc0be01
72643--- /dev/null
72644+++ b/grsecurity/grsec_chdir.c
72645@@ -0,0 +1,19 @@
72646+#include <linux/kernel.h>
72647+#include <linux/sched.h>
72648+#include <linux/fs.h>
72649+#include <linux/file.h>
72650+#include <linux/grsecurity.h>
72651+#include <linux/grinternal.h>
72652+
72653+void
72654+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
72655+{
72656+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
72657+ if ((grsec_enable_chdir && grsec_enable_group &&
72658+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
72659+ !grsec_enable_group)) {
72660+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
72661+ }
72662+#endif
72663+ return;
72664+}
72665diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
72666new file mode 100644
72667index 0000000..651d6c2
72668--- /dev/null
72669+++ b/grsecurity/grsec_chroot.c
72670@@ -0,0 +1,370 @@
72671+#include <linux/kernel.h>
72672+#include <linux/module.h>
72673+#include <linux/sched.h>
72674+#include <linux/file.h>
72675+#include <linux/fs.h>
72676+#include <linux/mount.h>
72677+#include <linux/types.h>
72678+#include "../fs/mount.h"
72679+#include <linux/grsecurity.h>
72680+#include <linux/grinternal.h>
72681+
72682+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
72683+int gr_init_ran;
72684+#endif
72685+
72686+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
72687+{
72688+#ifdef CONFIG_GRKERNSEC
72689+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
72690+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
72691+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
72692+ && gr_init_ran
72693+#endif
72694+ )
72695+ task->gr_is_chrooted = 1;
72696+ else {
72697+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
72698+ if (task_pid_nr(task) == 1 && !gr_init_ran)
72699+ gr_init_ran = 1;
72700+#endif
72701+ task->gr_is_chrooted = 0;
72702+ }
72703+
72704+ task->gr_chroot_dentry = path->dentry;
72705+#endif
72706+ return;
72707+}
72708+
72709+void gr_clear_chroot_entries(struct task_struct *task)
72710+{
72711+#ifdef CONFIG_GRKERNSEC
72712+ task->gr_is_chrooted = 0;
72713+ task->gr_chroot_dentry = NULL;
72714+#endif
72715+ return;
72716+}
72717+
72718+int
72719+gr_handle_chroot_unix(const pid_t pid)
72720+{
72721+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
72722+ struct task_struct *p;
72723+
72724+ if (unlikely(!grsec_enable_chroot_unix))
72725+ return 1;
72726+
72727+ if (likely(!proc_is_chrooted(current)))
72728+ return 1;
72729+
72730+ rcu_read_lock();
72731+ read_lock(&tasklist_lock);
72732+ p = find_task_by_vpid_unrestricted(pid);
72733+ if (unlikely(p && !have_same_root(current, p))) {
72734+ read_unlock(&tasklist_lock);
72735+ rcu_read_unlock();
72736+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
72737+ return 0;
72738+ }
72739+ read_unlock(&tasklist_lock);
72740+ rcu_read_unlock();
72741+#endif
72742+ return 1;
72743+}
72744+
72745+int
72746+gr_handle_chroot_nice(void)
72747+{
72748+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
72749+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
72750+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
72751+ return -EPERM;
72752+ }
72753+#endif
72754+ return 0;
72755+}
72756+
72757+int
72758+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
72759+{
72760+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
72761+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
72762+ && proc_is_chrooted(current)) {
72763+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
72764+ return -EACCES;
72765+ }
72766+#endif
72767+ return 0;
72768+}
72769+
72770+int
72771+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
72772+{
72773+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
72774+ struct task_struct *p;
72775+ int ret = 0;
72776+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
72777+ return ret;
72778+
72779+ read_lock(&tasklist_lock);
72780+ do_each_pid_task(pid, type, p) {
72781+ if (!have_same_root(current, p)) {
72782+ ret = 1;
72783+ goto out;
72784+ }
72785+ } while_each_pid_task(pid, type, p);
72786+out:
72787+ read_unlock(&tasklist_lock);
72788+ return ret;
72789+#endif
72790+ return 0;
72791+}
72792+
72793+int
72794+gr_pid_is_chrooted(struct task_struct *p)
72795+{
72796+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
72797+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
72798+ return 0;
72799+
72800+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
72801+ !have_same_root(current, p)) {
72802+ return 1;
72803+ }
72804+#endif
72805+ return 0;
72806+}
72807+
72808+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
72809+
72810+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
72811+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
72812+{
72813+ struct path path, currentroot;
72814+ int ret = 0;
72815+
72816+ path.dentry = (struct dentry *)u_dentry;
72817+ path.mnt = (struct vfsmount *)u_mnt;
72818+ get_fs_root(current->fs, &currentroot);
72819+ if (path_is_under(&path, &currentroot))
72820+ ret = 1;
72821+ path_put(&currentroot);
72822+
72823+ return ret;
72824+}
72825+#endif
72826+
72827+int
72828+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
72829+{
72830+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
72831+ if (!grsec_enable_chroot_fchdir)
72832+ return 1;
72833+
72834+ if (!proc_is_chrooted(current))
72835+ return 1;
72836+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
72837+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
72838+ return 0;
72839+ }
72840+#endif
72841+ return 1;
72842+}
72843+
72844+int
72845+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72846+ const time_t shm_createtime)
72847+{
72848+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
72849+ struct task_struct *p;
72850+ time_t starttime;
72851+
72852+ if (unlikely(!grsec_enable_chroot_shmat))
72853+ return 1;
72854+
72855+ if (likely(!proc_is_chrooted(current)))
72856+ return 1;
72857+
72858+ rcu_read_lock();
72859+ read_lock(&tasklist_lock);
72860+
72861+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
72862+ starttime = p->start_time.tv_sec;
72863+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
72864+ if (have_same_root(current, p)) {
72865+ goto allow;
72866+ } else {
72867+ read_unlock(&tasklist_lock);
72868+ rcu_read_unlock();
72869+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
72870+ return 0;
72871+ }
72872+ }
72873+ /* creator exited, pid reuse, fall through to next check */
72874+ }
72875+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
72876+ if (unlikely(!have_same_root(current, p))) {
72877+ read_unlock(&tasklist_lock);
72878+ rcu_read_unlock();
72879+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
72880+ return 0;
72881+ }
72882+ }
72883+
72884+allow:
72885+ read_unlock(&tasklist_lock);
72886+ rcu_read_unlock();
72887+#endif
72888+ return 1;
72889+}
72890+
72891+void
72892+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
72893+{
72894+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
72895+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
72896+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
72897+#endif
72898+ return;
72899+}
72900+
72901+int
72902+gr_handle_chroot_mknod(const struct dentry *dentry,
72903+ const struct vfsmount *mnt, const int mode)
72904+{
72905+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
72906+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
72907+ proc_is_chrooted(current)) {
72908+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
72909+ return -EPERM;
72910+ }
72911+#endif
72912+ return 0;
72913+}
72914+
72915+int
72916+gr_handle_chroot_mount(const struct dentry *dentry,
72917+ const struct vfsmount *mnt, const char *dev_name)
72918+{
72919+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
72920+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
72921+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
72922+ return -EPERM;
72923+ }
72924+#endif
72925+ return 0;
72926+}
72927+
72928+int
72929+gr_handle_chroot_pivot(void)
72930+{
72931+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
72932+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
72933+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
72934+ return -EPERM;
72935+ }
72936+#endif
72937+ return 0;
72938+}
72939+
72940+int
72941+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
72942+{
72943+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
72944+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
72945+ !gr_is_outside_chroot(dentry, mnt)) {
72946+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
72947+ return -EPERM;
72948+ }
72949+#endif
72950+ return 0;
72951+}
72952+
72953+extern const char *captab_log[];
72954+extern int captab_log_entries;
72955+
72956+int
72957+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72958+{
72959+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72960+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
72961+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
72962+ if (cap_raised(chroot_caps, cap)) {
72963+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
72964+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
72965+ }
72966+ return 0;
72967+ }
72968+ }
72969+#endif
72970+ return 1;
72971+}
72972+
72973+int
72974+gr_chroot_is_capable(const int cap)
72975+{
72976+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72977+ return gr_task_chroot_is_capable(current, current_cred(), cap);
72978+#endif
72979+ return 1;
72980+}
72981+
72982+int
72983+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
72984+{
72985+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72986+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
72987+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
72988+ if (cap_raised(chroot_caps, cap)) {
72989+ return 0;
72990+ }
72991+ }
72992+#endif
72993+ return 1;
72994+}
72995+
72996+int
72997+gr_chroot_is_capable_nolog(const int cap)
72998+{
72999+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
73000+ return gr_task_chroot_is_capable_nolog(current, cap);
73001+#endif
73002+ return 1;
73003+}
73004+
73005+int
73006+gr_handle_chroot_sysctl(const int op)
73007+{
73008+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
73009+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
73010+ proc_is_chrooted(current))
73011+ return -EACCES;
73012+#endif
73013+ return 0;
73014+}
73015+
73016+void
73017+gr_handle_chroot_chdir(const struct path *path)
73018+{
73019+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
73020+ if (grsec_enable_chroot_chdir)
73021+ set_fs_pwd(current->fs, path);
73022+#endif
73023+ return;
73024+}
73025+
73026+int
73027+gr_handle_chroot_chmod(const struct dentry *dentry,
73028+ const struct vfsmount *mnt, const int mode)
73029+{
73030+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
73031+ /* allow chmod +s on directories, but not files */
73032+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
73033+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
73034+ proc_is_chrooted(current)) {
73035+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
73036+ return -EPERM;
73037+ }
73038+#endif
73039+ return 0;
73040+}
73041diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
73042new file mode 100644
73043index 0000000..4d6fce8
73044--- /dev/null
73045+++ b/grsecurity/grsec_disabled.c
73046@@ -0,0 +1,433 @@
73047+#include <linux/kernel.h>
73048+#include <linux/module.h>
73049+#include <linux/sched.h>
73050+#include <linux/file.h>
73051+#include <linux/fs.h>
73052+#include <linux/kdev_t.h>
73053+#include <linux/net.h>
73054+#include <linux/in.h>
73055+#include <linux/ip.h>
73056+#include <linux/skbuff.h>
73057+#include <linux/sysctl.h>
73058+
73059+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
73060+void
73061+pax_set_initial_flags(struct linux_binprm *bprm)
73062+{
73063+ return;
73064+}
73065+#endif
73066+
73067+#ifdef CONFIG_SYSCTL
73068+__u32
73069+gr_handle_sysctl(const struct ctl_table * table, const int op)
73070+{
73071+ return 0;
73072+}
73073+#endif
73074+
73075+#ifdef CONFIG_TASKSTATS
73076+int gr_is_taskstats_denied(int pid)
73077+{
73078+ return 0;
73079+}
73080+#endif
73081+
73082+int
73083+gr_acl_is_enabled(void)
73084+{
73085+ return 0;
73086+}
73087+
73088+void
73089+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
73090+{
73091+ return;
73092+}
73093+
73094+int
73095+gr_handle_rawio(const struct inode *inode)
73096+{
73097+ return 0;
73098+}
73099+
73100+void
73101+gr_acl_handle_psacct(struct task_struct *task, const long code)
73102+{
73103+ return;
73104+}
73105+
73106+int
73107+gr_handle_ptrace(struct task_struct *task, const long request)
73108+{
73109+ return 0;
73110+}
73111+
73112+int
73113+gr_handle_proc_ptrace(struct task_struct *task)
73114+{
73115+ return 0;
73116+}
73117+
73118+int
73119+gr_set_acls(const int type)
73120+{
73121+ return 0;
73122+}
73123+
73124+int
73125+gr_check_hidden_task(const struct task_struct *tsk)
73126+{
73127+ return 0;
73128+}
73129+
73130+int
73131+gr_check_protected_task(const struct task_struct *task)
73132+{
73133+ return 0;
73134+}
73135+
73136+int
73137+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
73138+{
73139+ return 0;
73140+}
73141+
73142+void
73143+gr_copy_label(struct task_struct *tsk)
73144+{
73145+ return;
73146+}
73147+
73148+void
73149+gr_set_pax_flags(struct task_struct *task)
73150+{
73151+ return;
73152+}
73153+
73154+int
73155+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
73156+ const int unsafe_share)
73157+{
73158+ return 0;
73159+}
73160+
73161+void
73162+gr_handle_delete(const ino_t ino, const dev_t dev)
73163+{
73164+ return;
73165+}
73166+
73167+void
73168+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
73169+{
73170+ return;
73171+}
73172+
73173+void
73174+gr_handle_crash(struct task_struct *task, const int sig)
73175+{
73176+ return;
73177+}
73178+
73179+int
73180+gr_check_crash_exec(const struct file *filp)
73181+{
73182+ return 0;
73183+}
73184+
73185+int
73186+gr_check_crash_uid(const kuid_t uid)
73187+{
73188+ return 0;
73189+}
73190+
73191+void
73192+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
73193+ struct dentry *old_dentry,
73194+ struct dentry *new_dentry,
73195+ struct vfsmount *mnt, const __u8 replace)
73196+{
73197+ return;
73198+}
73199+
73200+int
73201+gr_search_socket(const int family, const int type, const int protocol)
73202+{
73203+ return 1;
73204+}
73205+
73206+int
73207+gr_search_connectbind(const int mode, const struct socket *sock,
73208+ const struct sockaddr_in *addr)
73209+{
73210+ return 0;
73211+}
73212+
73213+void
73214+gr_handle_alertkill(struct task_struct *task)
73215+{
73216+ return;
73217+}
73218+
73219+__u32
73220+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
73221+{
73222+ return 1;
73223+}
73224+
73225+__u32
73226+gr_acl_handle_hidden_file(const struct dentry * dentry,
73227+ const struct vfsmount * mnt)
73228+{
73229+ return 1;
73230+}
73231+
73232+__u32
73233+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
73234+ int acc_mode)
73235+{
73236+ return 1;
73237+}
73238+
73239+__u32
73240+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
73241+{
73242+ return 1;
73243+}
73244+
73245+__u32
73246+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
73247+{
73248+ return 1;
73249+}
73250+
73251+int
73252+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
73253+ unsigned int *vm_flags)
73254+{
73255+ return 1;
73256+}
73257+
73258+__u32
73259+gr_acl_handle_truncate(const struct dentry * dentry,
73260+ const struct vfsmount * mnt)
73261+{
73262+ return 1;
73263+}
73264+
73265+__u32
73266+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
73267+{
73268+ return 1;
73269+}
73270+
73271+__u32
73272+gr_acl_handle_access(const struct dentry * dentry,
73273+ const struct vfsmount * mnt, const int fmode)
73274+{
73275+ return 1;
73276+}
73277+
73278+__u32
73279+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
73280+ umode_t *mode)
73281+{
73282+ return 1;
73283+}
73284+
73285+__u32
73286+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
73287+{
73288+ return 1;
73289+}
73290+
73291+__u32
73292+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
73293+{
73294+ return 1;
73295+}
73296+
73297+__u32
73298+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
73299+{
73300+ return 1;
73301+}
73302+
73303+void
73304+grsecurity_init(void)
73305+{
73306+ return;
73307+}
73308+
73309+umode_t gr_acl_umask(void)
73310+{
73311+ return 0;
73312+}
73313+
73314+__u32
73315+gr_acl_handle_mknod(const struct dentry * new_dentry,
73316+ const struct dentry * parent_dentry,
73317+ const struct vfsmount * parent_mnt,
73318+ const int mode)
73319+{
73320+ return 1;
73321+}
73322+
73323+__u32
73324+gr_acl_handle_mkdir(const struct dentry * new_dentry,
73325+ const struct dentry * parent_dentry,
73326+ const struct vfsmount * parent_mnt)
73327+{
73328+ return 1;
73329+}
73330+
73331+__u32
73332+gr_acl_handle_symlink(const struct dentry * new_dentry,
73333+ const struct dentry * parent_dentry,
73334+ const struct vfsmount * parent_mnt, const struct filename *from)
73335+{
73336+ return 1;
73337+}
73338+
73339+__u32
73340+gr_acl_handle_link(const struct dentry * new_dentry,
73341+ const struct dentry * parent_dentry,
73342+ const struct vfsmount * parent_mnt,
73343+ const struct dentry * old_dentry,
73344+ const struct vfsmount * old_mnt, const struct filename *to)
73345+{
73346+ return 1;
73347+}
73348+
73349+int
73350+gr_acl_handle_rename(const struct dentry *new_dentry,
73351+ const struct dentry *parent_dentry,
73352+ const struct vfsmount *parent_mnt,
73353+ const struct dentry *old_dentry,
73354+ const struct inode *old_parent_inode,
73355+ const struct vfsmount *old_mnt, const struct filename *newname)
73356+{
73357+ return 0;
73358+}
73359+
73360+int
73361+gr_acl_handle_filldir(const struct file *file, const char *name,
73362+ const int namelen, const ino_t ino)
73363+{
73364+ return 1;
73365+}
73366+
73367+int
73368+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
73369+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
73370+{
73371+ return 1;
73372+}
73373+
73374+int
73375+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
73376+{
73377+ return 0;
73378+}
73379+
73380+int
73381+gr_search_accept(const struct socket *sock)
73382+{
73383+ return 0;
73384+}
73385+
73386+int
73387+gr_search_listen(const struct socket *sock)
73388+{
73389+ return 0;
73390+}
73391+
73392+int
73393+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
73394+{
73395+ return 0;
73396+}
73397+
73398+__u32
73399+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
73400+{
73401+ return 1;
73402+}
73403+
73404+__u32
73405+gr_acl_handle_creat(const struct dentry * dentry,
73406+ const struct dentry * p_dentry,
73407+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
73408+ const int imode)
73409+{
73410+ return 1;
73411+}
73412+
73413+void
73414+gr_acl_handle_exit(void)
73415+{
73416+ return;
73417+}
73418+
73419+int
73420+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
73421+{
73422+ return 1;
73423+}
73424+
73425+void
73426+gr_set_role_label(const kuid_t uid, const kgid_t gid)
73427+{
73428+ return;
73429+}
73430+
73431+int
73432+gr_acl_handle_procpidmem(const struct task_struct *task)
73433+{
73434+ return 0;
73435+}
73436+
73437+int
73438+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
73439+{
73440+ return 0;
73441+}
73442+
73443+int
73444+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
73445+{
73446+ return 0;
73447+}
73448+
73449+int
73450+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
73451+{
73452+ return 0;
73453+}
73454+
73455+int
73456+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
73457+{
73458+ return 0;
73459+}
73460+
73461+int gr_acl_enable_at_secure(void)
73462+{
73463+ return 0;
73464+}
73465+
73466+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
73467+{
73468+ return dentry->d_sb->s_dev;
73469+}
73470+
73471+void gr_put_exec_file(struct task_struct *task)
73472+{
73473+ return;
73474+}
73475+
73476+#ifdef CONFIG_SECURITY
73477+EXPORT_SYMBOL_GPL(gr_check_user_change);
73478+EXPORT_SYMBOL_GPL(gr_check_group_change);
73479+#endif
73480diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
73481new file mode 100644
73482index 0000000..f35f454
73483--- /dev/null
73484+++ b/grsecurity/grsec_exec.c
73485@@ -0,0 +1,187 @@
73486+#include <linux/kernel.h>
73487+#include <linux/sched.h>
73488+#include <linux/file.h>
73489+#include <linux/binfmts.h>
73490+#include <linux/fs.h>
73491+#include <linux/types.h>
73492+#include <linux/grdefs.h>
73493+#include <linux/grsecurity.h>
73494+#include <linux/grinternal.h>
73495+#include <linux/capability.h>
73496+#include <linux/module.h>
73497+#include <linux/compat.h>
73498+
73499+#include <asm/uaccess.h>
73500+
73501+#ifdef CONFIG_GRKERNSEC_EXECLOG
73502+static char gr_exec_arg_buf[132];
73503+static DEFINE_MUTEX(gr_exec_arg_mutex);
73504+#endif
73505+
73506+struct user_arg_ptr {
73507+#ifdef CONFIG_COMPAT
73508+ bool is_compat;
73509+#endif
73510+ union {
73511+ const char __user *const __user *native;
73512+#ifdef CONFIG_COMPAT
73513+ const compat_uptr_t __user *compat;
73514+#endif
73515+ } ptr;
73516+};
73517+
73518+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
73519+
73520+void
73521+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
73522+{
73523+#ifdef CONFIG_GRKERNSEC_EXECLOG
73524+ char *grarg = gr_exec_arg_buf;
73525+ unsigned int i, x, execlen = 0;
73526+ char c;
73527+
73528+ if (!((grsec_enable_execlog && grsec_enable_group &&
73529+ in_group_p(grsec_audit_gid))
73530+ || (grsec_enable_execlog && !grsec_enable_group)))
73531+ return;
73532+
73533+ mutex_lock(&gr_exec_arg_mutex);
73534+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
73535+
73536+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
73537+ const char __user *p;
73538+ unsigned int len;
73539+
73540+ p = get_user_arg_ptr(argv, i);
73541+ if (IS_ERR(p))
73542+ goto log;
73543+
73544+ len = strnlen_user(p, 128 - execlen);
73545+ if (len > 128 - execlen)
73546+ len = 128 - execlen;
73547+ else if (len > 0)
73548+ len--;
73549+ if (copy_from_user(grarg + execlen, p, len))
73550+ goto log;
73551+
73552+ /* rewrite unprintable characters */
73553+ for (x = 0; x < len; x++) {
73554+ c = *(grarg + execlen + x);
73555+ if (c < 32 || c > 126)
73556+ *(grarg + execlen + x) = ' ';
73557+ }
73558+
73559+ execlen += len;
73560+ *(grarg + execlen) = ' ';
73561+ *(grarg + execlen + 1) = '\0';
73562+ execlen++;
73563+ }
73564+
73565+ log:
73566+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
73567+ bprm->file->f_path.mnt, grarg);
73568+ mutex_unlock(&gr_exec_arg_mutex);
73569+#endif
73570+ return;
73571+}
73572+
73573+#ifdef CONFIG_GRKERNSEC
73574+extern int gr_acl_is_capable(const int cap);
73575+extern int gr_acl_is_capable_nolog(const int cap);
73576+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
73577+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
73578+extern int gr_chroot_is_capable(const int cap);
73579+extern int gr_chroot_is_capable_nolog(const int cap);
73580+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
73581+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
73582+#endif
73583+
73584+const char *captab_log[] = {
73585+ "CAP_CHOWN",
73586+ "CAP_DAC_OVERRIDE",
73587+ "CAP_DAC_READ_SEARCH",
73588+ "CAP_FOWNER",
73589+ "CAP_FSETID",
73590+ "CAP_KILL",
73591+ "CAP_SETGID",
73592+ "CAP_SETUID",
73593+ "CAP_SETPCAP",
73594+ "CAP_LINUX_IMMUTABLE",
73595+ "CAP_NET_BIND_SERVICE",
73596+ "CAP_NET_BROADCAST",
73597+ "CAP_NET_ADMIN",
73598+ "CAP_NET_RAW",
73599+ "CAP_IPC_LOCK",
73600+ "CAP_IPC_OWNER",
73601+ "CAP_SYS_MODULE",
73602+ "CAP_SYS_RAWIO",
73603+ "CAP_SYS_CHROOT",
73604+ "CAP_SYS_PTRACE",
73605+ "CAP_SYS_PACCT",
73606+ "CAP_SYS_ADMIN",
73607+ "CAP_SYS_BOOT",
73608+ "CAP_SYS_NICE",
73609+ "CAP_SYS_RESOURCE",
73610+ "CAP_SYS_TIME",
73611+ "CAP_SYS_TTY_CONFIG",
73612+ "CAP_MKNOD",
73613+ "CAP_LEASE",
73614+ "CAP_AUDIT_WRITE",
73615+ "CAP_AUDIT_CONTROL",
73616+ "CAP_SETFCAP",
73617+ "CAP_MAC_OVERRIDE",
73618+ "CAP_MAC_ADMIN",
73619+ "CAP_SYSLOG",
73620+ "CAP_WAKE_ALARM"
73621+};
73622+
73623+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
73624+
73625+int gr_is_capable(const int cap)
73626+{
73627+#ifdef CONFIG_GRKERNSEC
73628+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
73629+ return 1;
73630+ return 0;
73631+#else
73632+ return 1;
73633+#endif
73634+}
73635+
73636+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
73637+{
73638+#ifdef CONFIG_GRKERNSEC
73639+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
73640+ return 1;
73641+ return 0;
73642+#else
73643+ return 1;
73644+#endif
73645+}
73646+
73647+int gr_is_capable_nolog(const int cap)
73648+{
73649+#ifdef CONFIG_GRKERNSEC
73650+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
73651+ return 1;
73652+ return 0;
73653+#else
73654+ return 1;
73655+#endif
73656+}
73657+
73658+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
73659+{
73660+#ifdef CONFIG_GRKERNSEC
73661+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
73662+ return 1;
73663+ return 0;
73664+#else
73665+ return 1;
73666+#endif
73667+}
73668+
73669+EXPORT_SYMBOL_GPL(gr_is_capable);
73670+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
73671+EXPORT_SYMBOL_GPL(gr_task_is_capable);
73672+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
73673diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
73674new file mode 100644
73675index 0000000..06cc6ea
73676--- /dev/null
73677+++ b/grsecurity/grsec_fifo.c
73678@@ -0,0 +1,24 @@
73679+#include <linux/kernel.h>
73680+#include <linux/sched.h>
73681+#include <linux/fs.h>
73682+#include <linux/file.h>
73683+#include <linux/grinternal.h>
73684+
73685+int
73686+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
73687+ const struct dentry *dir, const int flag, const int acc_mode)
73688+{
73689+#ifdef CONFIG_GRKERNSEC_FIFO
73690+ const struct cred *cred = current_cred();
73691+
73692+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
73693+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
73694+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
73695+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
73696+ if (!inode_permission(dentry->d_inode, acc_mode))
73697+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
73698+ return -EACCES;
73699+ }
73700+#endif
73701+ return 0;
73702+}
73703diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
73704new file mode 100644
73705index 0000000..8ca18bf
73706--- /dev/null
73707+++ b/grsecurity/grsec_fork.c
73708@@ -0,0 +1,23 @@
73709+#include <linux/kernel.h>
73710+#include <linux/sched.h>
73711+#include <linux/grsecurity.h>
73712+#include <linux/grinternal.h>
73713+#include <linux/errno.h>
73714+
73715+void
73716+gr_log_forkfail(const int retval)
73717+{
73718+#ifdef CONFIG_GRKERNSEC_FORKFAIL
73719+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
73720+ switch (retval) {
73721+ case -EAGAIN:
73722+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
73723+ break;
73724+ case -ENOMEM:
73725+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
73726+ break;
73727+ }
73728+ }
73729+#endif
73730+ return;
73731+}
73732diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
73733new file mode 100644
73734index 0000000..ae6c028
73735--- /dev/null
73736+++ b/grsecurity/grsec_init.c
73737@@ -0,0 +1,272 @@
73738+#include <linux/kernel.h>
73739+#include <linux/sched.h>
73740+#include <linux/mm.h>
73741+#include <linux/gracl.h>
73742+#include <linux/slab.h>
73743+#include <linux/vmalloc.h>
73744+#include <linux/percpu.h>
73745+#include <linux/module.h>
73746+
73747+int grsec_enable_ptrace_readexec;
73748+int grsec_enable_setxid;
73749+int grsec_enable_symlinkown;
73750+kgid_t grsec_symlinkown_gid;
73751+int grsec_enable_brute;
73752+int grsec_enable_link;
73753+int grsec_enable_dmesg;
73754+int grsec_enable_harden_ptrace;
73755+int grsec_enable_harden_ipc;
73756+int grsec_enable_fifo;
73757+int grsec_enable_execlog;
73758+int grsec_enable_signal;
73759+int grsec_enable_forkfail;
73760+int grsec_enable_audit_ptrace;
73761+int grsec_enable_time;
73762+int grsec_enable_group;
73763+kgid_t grsec_audit_gid;
73764+int grsec_enable_chdir;
73765+int grsec_enable_mount;
73766+int grsec_enable_rofs;
73767+int grsec_deny_new_usb;
73768+int grsec_enable_chroot_findtask;
73769+int grsec_enable_chroot_mount;
73770+int grsec_enable_chroot_shmat;
73771+int grsec_enable_chroot_fchdir;
73772+int grsec_enable_chroot_double;
73773+int grsec_enable_chroot_pivot;
73774+int grsec_enable_chroot_chdir;
73775+int grsec_enable_chroot_chmod;
73776+int grsec_enable_chroot_mknod;
73777+int grsec_enable_chroot_nice;
73778+int grsec_enable_chroot_execlog;
73779+int grsec_enable_chroot_caps;
73780+int grsec_enable_chroot_sysctl;
73781+int grsec_enable_chroot_unix;
73782+int grsec_enable_tpe;
73783+kgid_t grsec_tpe_gid;
73784+int grsec_enable_blackhole;
73785+#ifdef CONFIG_IPV6_MODULE
73786+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
73787+#endif
73788+int grsec_lastack_retries;
73789+int grsec_enable_tpe_all;
73790+int grsec_enable_tpe_invert;
73791+int grsec_enable_socket_all;
73792+kgid_t grsec_socket_all_gid;
73793+int grsec_enable_socket_client;
73794+kgid_t grsec_socket_client_gid;
73795+int grsec_enable_socket_server;
73796+kgid_t grsec_socket_server_gid;
73797+int grsec_resource_logging;
73798+int grsec_disable_privio;
73799+int grsec_enable_log_rwxmaps;
73800+int grsec_lock;
73801+
73802+DEFINE_SPINLOCK(grsec_alert_lock);
73803+unsigned long grsec_alert_wtime = 0;
73804+unsigned long grsec_alert_fyet = 0;
73805+
73806+DEFINE_SPINLOCK(grsec_audit_lock);
73807+
73808+DEFINE_RWLOCK(grsec_exec_file_lock);
73809+
73810+char *gr_shared_page[4];
73811+
73812+char *gr_alert_log_fmt;
73813+char *gr_audit_log_fmt;
73814+char *gr_alert_log_buf;
73815+char *gr_audit_log_buf;
73816+
73817+void __init
73818+grsecurity_init(void)
73819+{
73820+ int j;
73821+ /* create the per-cpu shared pages */
73822+
73823+#ifdef CONFIG_X86
73824+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
73825+#endif
73826+
73827+ for (j = 0; j < 4; j++) {
73828+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
73829+ if (gr_shared_page[j] == NULL) {
73830+ panic("Unable to allocate grsecurity shared page");
73831+ return;
73832+ }
73833+ }
73834+
73835+ /* allocate log buffers */
73836+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
73837+ if (!gr_alert_log_fmt) {
73838+ panic("Unable to allocate grsecurity alert log format buffer");
73839+ return;
73840+ }
73841+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
73842+ if (!gr_audit_log_fmt) {
73843+ panic("Unable to allocate grsecurity audit log format buffer");
73844+ return;
73845+ }
73846+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
73847+ if (!gr_alert_log_buf) {
73848+ panic("Unable to allocate grsecurity alert log buffer");
73849+ return;
73850+ }
73851+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
73852+ if (!gr_audit_log_buf) {
73853+ panic("Unable to allocate grsecurity audit log buffer");
73854+ return;
73855+ }
73856+
73857+#ifdef CONFIG_GRKERNSEC_IO
73858+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
73859+ grsec_disable_privio = 1;
73860+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
73861+ grsec_disable_privio = 1;
73862+#else
73863+ grsec_disable_privio = 0;
73864+#endif
73865+#endif
73866+
73867+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
73868+ /* for backward compatibility, tpe_invert always defaults to on if
73869+ enabled in the kernel
73870+ */
73871+ grsec_enable_tpe_invert = 1;
73872+#endif
73873+
73874+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
73875+#ifndef CONFIG_GRKERNSEC_SYSCTL
73876+ grsec_lock = 1;
73877+#endif
73878+
73879+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
73880+ grsec_enable_log_rwxmaps = 1;
73881+#endif
73882+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
73883+ grsec_enable_group = 1;
73884+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
73885+#endif
73886+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
73887+ grsec_enable_ptrace_readexec = 1;
73888+#endif
73889+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
73890+ grsec_enable_chdir = 1;
73891+#endif
73892+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73893+ grsec_enable_harden_ptrace = 1;
73894+#endif
73895+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
73896+ grsec_enable_harden_ipc = 1;
73897+#endif
73898+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
73899+ grsec_enable_mount = 1;
73900+#endif
73901+#ifdef CONFIG_GRKERNSEC_LINK
73902+ grsec_enable_link = 1;
73903+#endif
73904+#ifdef CONFIG_GRKERNSEC_BRUTE
73905+ grsec_enable_brute = 1;
73906+#endif
73907+#ifdef CONFIG_GRKERNSEC_DMESG
73908+ grsec_enable_dmesg = 1;
73909+#endif
73910+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73911+ grsec_enable_blackhole = 1;
73912+ grsec_lastack_retries = 4;
73913+#endif
73914+#ifdef CONFIG_GRKERNSEC_FIFO
73915+ grsec_enable_fifo = 1;
73916+#endif
73917+#ifdef CONFIG_GRKERNSEC_EXECLOG
73918+ grsec_enable_execlog = 1;
73919+#endif
73920+#ifdef CONFIG_GRKERNSEC_SETXID
73921+ grsec_enable_setxid = 1;
73922+#endif
73923+#ifdef CONFIG_GRKERNSEC_SIGNAL
73924+ grsec_enable_signal = 1;
73925+#endif
73926+#ifdef CONFIG_GRKERNSEC_FORKFAIL
73927+ grsec_enable_forkfail = 1;
73928+#endif
73929+#ifdef CONFIG_GRKERNSEC_TIME
73930+ grsec_enable_time = 1;
73931+#endif
73932+#ifdef CONFIG_GRKERNSEC_RESLOG
73933+ grsec_resource_logging = 1;
73934+#endif
73935+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
73936+ grsec_enable_chroot_findtask = 1;
73937+#endif
73938+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
73939+ grsec_enable_chroot_unix = 1;
73940+#endif
73941+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
73942+ grsec_enable_chroot_mount = 1;
73943+#endif
73944+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
73945+ grsec_enable_chroot_fchdir = 1;
73946+#endif
73947+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
73948+ grsec_enable_chroot_shmat = 1;
73949+#endif
73950+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
73951+ grsec_enable_audit_ptrace = 1;
73952+#endif
73953+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
73954+ grsec_enable_chroot_double = 1;
73955+#endif
73956+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
73957+ grsec_enable_chroot_pivot = 1;
73958+#endif
73959+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
73960+ grsec_enable_chroot_chdir = 1;
73961+#endif
73962+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
73963+ grsec_enable_chroot_chmod = 1;
73964+#endif
73965+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
73966+ grsec_enable_chroot_mknod = 1;
73967+#endif
73968+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
73969+ grsec_enable_chroot_nice = 1;
73970+#endif
73971+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
73972+ grsec_enable_chroot_execlog = 1;
73973+#endif
73974+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
73975+ grsec_enable_chroot_caps = 1;
73976+#endif
73977+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
73978+ grsec_enable_chroot_sysctl = 1;
73979+#endif
73980+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
73981+ grsec_enable_symlinkown = 1;
73982+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
73983+#endif
73984+#ifdef CONFIG_GRKERNSEC_TPE
73985+ grsec_enable_tpe = 1;
73986+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
73987+#ifdef CONFIG_GRKERNSEC_TPE_ALL
73988+ grsec_enable_tpe_all = 1;
73989+#endif
73990+#endif
73991+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
73992+ grsec_enable_socket_all = 1;
73993+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
73994+#endif
73995+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
73996+ grsec_enable_socket_client = 1;
73997+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
73998+#endif
73999+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
74000+ grsec_enable_socket_server = 1;
74001+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
74002+#endif
74003+#endif
74004+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
74005+ grsec_deny_new_usb = 1;
74006+#endif
74007+
74008+ return;
74009+}
74010diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
74011new file mode 100644
74012index 0000000..1773300
74013--- /dev/null
74014+++ b/grsecurity/grsec_ipc.c
74015@@ -0,0 +1,48 @@
74016+#include <linux/kernel.h>
74017+#include <linux/mm.h>
74018+#include <linux/sched.h>
74019+#include <linux/file.h>
74020+#include <linux/ipc.h>
74021+#include <linux/ipc_namespace.h>
74022+#include <linux/grsecurity.h>
74023+#include <linux/grinternal.h>
74024+
74025+int
74026+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
74027+{
74028+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
74029+ int write;
74030+ int orig_granted_mode;
74031+ kuid_t euid;
74032+ kgid_t egid;
74033+
74034+ if (!grsec_enable_harden_ipc)
74035+ return 1;
74036+
74037+ euid = current_euid();
74038+ egid = current_egid();
74039+
74040+ write = requested_mode & 00002;
74041+ orig_granted_mode = ipcp->mode;
74042+
74043+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
74044+ orig_granted_mode >>= 6;
74045+ else {
74046+ /* if likely wrong permissions, lock to user */
74047+ if (orig_granted_mode & 0007)
74048+ orig_granted_mode = 0;
74049+ /* otherwise do a egid-only check */
74050+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
74051+ orig_granted_mode >>= 3;
74052+ /* otherwise, no access */
74053+ else
74054+ orig_granted_mode = 0;
74055+ }
74056+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
74057+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
74058+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
74059+ return 0;
74060+ }
74061+#endif
74062+ return 1;
74063+}
74064diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
74065new file mode 100644
74066index 0000000..5e05e20
74067--- /dev/null
74068+++ b/grsecurity/grsec_link.c
74069@@ -0,0 +1,58 @@
74070+#include <linux/kernel.h>
74071+#include <linux/sched.h>
74072+#include <linux/fs.h>
74073+#include <linux/file.h>
74074+#include <linux/grinternal.h>
74075+
74076+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
74077+{
74078+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
74079+ const struct inode *link_inode = link->dentry->d_inode;
74080+
74081+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
74082+ /* ignore root-owned links, e.g. /proc/self */
74083+ gr_is_global_nonroot(link_inode->i_uid) && target &&
74084+ !uid_eq(link_inode->i_uid, target->i_uid)) {
74085+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
74086+ return 1;
74087+ }
74088+#endif
74089+ return 0;
74090+}
74091+
74092+int
74093+gr_handle_follow_link(const struct inode *parent,
74094+ const struct inode *inode,
74095+ const struct dentry *dentry, const struct vfsmount *mnt)
74096+{
74097+#ifdef CONFIG_GRKERNSEC_LINK
74098+ const struct cred *cred = current_cred();
74099+
74100+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
74101+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
74102+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
74103+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
74104+ return -EACCES;
74105+ }
74106+#endif
74107+ return 0;
74108+}
74109+
74110+int
74111+gr_handle_hardlink(const struct dentry *dentry,
74112+ const struct vfsmount *mnt,
74113+ struct inode *inode, const int mode, const struct filename *to)
74114+{
74115+#ifdef CONFIG_GRKERNSEC_LINK
74116+ const struct cred *cred = current_cred();
74117+
74118+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
74119+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
74120+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
74121+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
74122+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
74123+ return -EPERM;
74124+ }
74125+#endif
74126+ return 0;
74127+}
74128diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
74129new file mode 100644
74130index 0000000..dbe0a6b
74131--- /dev/null
74132+++ b/grsecurity/grsec_log.c
74133@@ -0,0 +1,341 @@
74134+#include <linux/kernel.h>
74135+#include <linux/sched.h>
74136+#include <linux/file.h>
74137+#include <linux/tty.h>
74138+#include <linux/fs.h>
74139+#include <linux/mm.h>
74140+#include <linux/grinternal.h>
74141+
74142+#ifdef CONFIG_TREE_PREEMPT_RCU
74143+#define DISABLE_PREEMPT() preempt_disable()
74144+#define ENABLE_PREEMPT() preempt_enable()
74145+#else
74146+#define DISABLE_PREEMPT()
74147+#define ENABLE_PREEMPT()
74148+#endif
74149+
74150+#define BEGIN_LOCKS(x) \
74151+ DISABLE_PREEMPT(); \
74152+ rcu_read_lock(); \
74153+ read_lock(&tasklist_lock); \
74154+ read_lock(&grsec_exec_file_lock); \
74155+ if (x != GR_DO_AUDIT) \
74156+ spin_lock(&grsec_alert_lock); \
74157+ else \
74158+ spin_lock(&grsec_audit_lock)
74159+
74160+#define END_LOCKS(x) \
74161+ if (x != GR_DO_AUDIT) \
74162+ spin_unlock(&grsec_alert_lock); \
74163+ else \
74164+ spin_unlock(&grsec_audit_lock); \
74165+ read_unlock(&grsec_exec_file_lock); \
74166+ read_unlock(&tasklist_lock); \
74167+ rcu_read_unlock(); \
74168+ ENABLE_PREEMPT(); \
74169+ if (x == GR_DONT_AUDIT) \
74170+ gr_handle_alertkill(current)
74171+
74172+enum {
74173+ FLOODING,
74174+ NO_FLOODING
74175+};
74176+
74177+extern char *gr_alert_log_fmt;
74178+extern char *gr_audit_log_fmt;
74179+extern char *gr_alert_log_buf;
74180+extern char *gr_audit_log_buf;
74181+
74182+static int gr_log_start(int audit)
74183+{
74184+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
74185+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
74186+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
74187+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
74188+ unsigned long curr_secs = get_seconds();
74189+
74190+ if (audit == GR_DO_AUDIT)
74191+ goto set_fmt;
74192+
74193+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
74194+ grsec_alert_wtime = curr_secs;
74195+ grsec_alert_fyet = 0;
74196+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
74197+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
74198+ grsec_alert_fyet++;
74199+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
74200+ grsec_alert_wtime = curr_secs;
74201+ grsec_alert_fyet++;
74202+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
74203+ return FLOODING;
74204+ }
74205+ else return FLOODING;
74206+
74207+set_fmt:
74208+#endif
74209+ memset(buf, 0, PAGE_SIZE);
74210+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
74211+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
74212+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
74213+ } else if (current->signal->curr_ip) {
74214+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
74215+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
74216+ } else if (gr_acl_is_enabled()) {
74217+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
74218+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
74219+ } else {
74220+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
74221+ strcpy(buf, fmt);
74222+ }
74223+
74224+ return NO_FLOODING;
74225+}
74226+
74227+static void gr_log_middle(int audit, const char *msg, va_list ap)
74228+ __attribute__ ((format (printf, 2, 0)));
74229+
74230+static void gr_log_middle(int audit, const char *msg, va_list ap)
74231+{
74232+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
74233+ unsigned int len = strlen(buf);
74234+
74235+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
74236+
74237+ return;
74238+}
74239+
74240+static void gr_log_middle_varargs(int audit, const char *msg, ...)
74241+ __attribute__ ((format (printf, 2, 3)));
74242+
74243+static void gr_log_middle_varargs(int audit, const char *msg, ...)
74244+{
74245+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
74246+ unsigned int len = strlen(buf);
74247+ va_list ap;
74248+
74249+ va_start(ap, msg);
74250+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
74251+ va_end(ap);
74252+
74253+ return;
74254+}
74255+
74256+static void gr_log_end(int audit, int append_default)
74257+{
74258+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
74259+ if (append_default) {
74260+ struct task_struct *task = current;
74261+ struct task_struct *parent = task->real_parent;
74262+ const struct cred *cred = __task_cred(task);
74263+ const struct cred *pcred = __task_cred(parent);
74264+ unsigned int len = strlen(buf);
74265+
74266+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
74267+ }
74268+
74269+ printk("%s\n", buf);
74270+
74271+ return;
74272+}
74273+
74274+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
74275+{
74276+ int logtype;
74277+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
74278+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
74279+ void *voidptr = NULL;
74280+ int num1 = 0, num2 = 0;
74281+ unsigned long ulong1 = 0, ulong2 = 0;
74282+ struct dentry *dentry = NULL;
74283+ struct vfsmount *mnt = NULL;
74284+ struct file *file = NULL;
74285+ struct task_struct *task = NULL;
74286+ struct vm_area_struct *vma = NULL;
74287+ const struct cred *cred, *pcred;
74288+ va_list ap;
74289+
74290+ BEGIN_LOCKS(audit);
74291+ logtype = gr_log_start(audit);
74292+ if (logtype == FLOODING) {
74293+ END_LOCKS(audit);
74294+ return;
74295+ }
74296+ va_start(ap, argtypes);
74297+ switch (argtypes) {
74298+ case GR_TTYSNIFF:
74299+ task = va_arg(ap, struct task_struct *);
74300+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
74301+ break;
74302+ case GR_SYSCTL_HIDDEN:
74303+ str1 = va_arg(ap, char *);
74304+ gr_log_middle_varargs(audit, msg, result, str1);
74305+ break;
74306+ case GR_RBAC:
74307+ dentry = va_arg(ap, struct dentry *);
74308+ mnt = va_arg(ap, struct vfsmount *);
74309+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
74310+ break;
74311+ case GR_RBAC_STR:
74312+ dentry = va_arg(ap, struct dentry *);
74313+ mnt = va_arg(ap, struct vfsmount *);
74314+ str1 = va_arg(ap, char *);
74315+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
74316+ break;
74317+ case GR_STR_RBAC:
74318+ str1 = va_arg(ap, char *);
74319+ dentry = va_arg(ap, struct dentry *);
74320+ mnt = va_arg(ap, struct vfsmount *);
74321+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
74322+ break;
74323+ case GR_RBAC_MODE2:
74324+ dentry = va_arg(ap, struct dentry *);
74325+ mnt = va_arg(ap, struct vfsmount *);
74326+ str1 = va_arg(ap, char *);
74327+ str2 = va_arg(ap, char *);
74328+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
74329+ break;
74330+ case GR_RBAC_MODE3:
74331+ dentry = va_arg(ap, struct dentry *);
74332+ mnt = va_arg(ap, struct vfsmount *);
74333+ str1 = va_arg(ap, char *);
74334+ str2 = va_arg(ap, char *);
74335+ str3 = va_arg(ap, char *);
74336+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
74337+ break;
74338+ case GR_FILENAME:
74339+ dentry = va_arg(ap, struct dentry *);
74340+ mnt = va_arg(ap, struct vfsmount *);
74341+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
74342+ break;
74343+ case GR_STR_FILENAME:
74344+ str1 = va_arg(ap, char *);
74345+ dentry = va_arg(ap, struct dentry *);
74346+ mnt = va_arg(ap, struct vfsmount *);
74347+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
74348+ break;
74349+ case GR_FILENAME_STR:
74350+ dentry = va_arg(ap, struct dentry *);
74351+ mnt = va_arg(ap, struct vfsmount *);
74352+ str1 = va_arg(ap, char *);
74353+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
74354+ break;
74355+ case GR_FILENAME_TWO_INT:
74356+ dentry = va_arg(ap, struct dentry *);
74357+ mnt = va_arg(ap, struct vfsmount *);
74358+ num1 = va_arg(ap, int);
74359+ num2 = va_arg(ap, int);
74360+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
74361+ break;
74362+ case GR_FILENAME_TWO_INT_STR:
74363+ dentry = va_arg(ap, struct dentry *);
74364+ mnt = va_arg(ap, struct vfsmount *);
74365+ num1 = va_arg(ap, int);
74366+ num2 = va_arg(ap, int);
74367+ str1 = va_arg(ap, char *);
74368+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
74369+ break;
74370+ case GR_TEXTREL:
74371+ file = va_arg(ap, struct file *);
74372+ ulong1 = va_arg(ap, unsigned long);
74373+ ulong2 = va_arg(ap, unsigned long);
74374+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
74375+ break;
74376+ case GR_PTRACE:
74377+ task = va_arg(ap, struct task_struct *);
74378+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
74379+ break;
74380+ case GR_RESOURCE:
74381+ task = va_arg(ap, struct task_struct *);
74382+ cred = __task_cred(task);
74383+ pcred = __task_cred(task->real_parent);
74384+ ulong1 = va_arg(ap, unsigned long);
74385+ str1 = va_arg(ap, char *);
74386+ ulong2 = va_arg(ap, unsigned long);
74387+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
74388+ break;
74389+ case GR_CAP:
74390+ task = va_arg(ap, struct task_struct *);
74391+ cred = __task_cred(task);
74392+ pcred = __task_cred(task->real_parent);
74393+ str1 = va_arg(ap, char *);
74394+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
74395+ break;
74396+ case GR_SIG:
74397+ str1 = va_arg(ap, char *);
74398+ voidptr = va_arg(ap, void *);
74399+ gr_log_middle_varargs(audit, msg, str1, voidptr);
74400+ break;
74401+ case GR_SIG2:
74402+ task = va_arg(ap, struct task_struct *);
74403+ cred = __task_cred(task);
74404+ pcred = __task_cred(task->real_parent);
74405+ num1 = va_arg(ap, int);
74406+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
74407+ break;
74408+ case GR_CRASH1:
74409+ task = va_arg(ap, struct task_struct *);
74410+ cred = __task_cred(task);
74411+ pcred = __task_cred(task->real_parent);
74412+ ulong1 = va_arg(ap, unsigned long);
74413+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
74414+ break;
74415+ case GR_CRASH2:
74416+ task = va_arg(ap, struct task_struct *);
74417+ cred = __task_cred(task);
74418+ pcred = __task_cred(task->real_parent);
74419+ ulong1 = va_arg(ap, unsigned long);
74420+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
74421+ break;
74422+ case GR_RWXMAP:
74423+ file = va_arg(ap, struct file *);
74424+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
74425+ break;
74426+ case GR_RWXMAPVMA:
74427+ vma = va_arg(ap, struct vm_area_struct *);
74428+ if (vma->vm_file)
74429+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
74430+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
74431+ str1 = "<stack>";
74432+ else if (vma->vm_start <= current->mm->brk &&
74433+ vma->vm_end >= current->mm->start_brk)
74434+ str1 = "<heap>";
74435+ else
74436+ str1 = "<anonymous mapping>";
74437+ gr_log_middle_varargs(audit, msg, str1);
74438+ break;
74439+ case GR_PSACCT:
74440+ {
74441+ unsigned int wday, cday;
74442+ __u8 whr, chr;
74443+ __u8 wmin, cmin;
74444+ __u8 wsec, csec;
74445+ char cur_tty[64] = { 0 };
74446+ char parent_tty[64] = { 0 };
74447+
74448+ task = va_arg(ap, struct task_struct *);
74449+ wday = va_arg(ap, unsigned int);
74450+ cday = va_arg(ap, unsigned int);
74451+ whr = va_arg(ap, int);
74452+ chr = va_arg(ap, int);
74453+ wmin = va_arg(ap, int);
74454+ cmin = va_arg(ap, int);
74455+ wsec = va_arg(ap, int);
74456+ csec = va_arg(ap, int);
74457+ ulong1 = va_arg(ap, unsigned long);
74458+ cred = __task_cred(task);
74459+ pcred = __task_cred(task->real_parent);
74460+
74461+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
74462+ }
74463+ break;
74464+ default:
74465+ gr_log_middle(audit, msg, ap);
74466+ }
74467+ va_end(ap);
74468+ // these don't need DEFAULTSECARGS printed on the end
74469+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
74470+ gr_log_end(audit, 0);
74471+ else
74472+ gr_log_end(audit, 1);
74473+ END_LOCKS(audit);
74474+}
74475diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
74476new file mode 100644
74477index 0000000..0e39d8c
74478--- /dev/null
74479+++ b/grsecurity/grsec_mem.c
74480@@ -0,0 +1,48 @@
74481+#include <linux/kernel.h>
74482+#include <linux/sched.h>
74483+#include <linux/mm.h>
74484+#include <linux/mman.h>
74485+#include <linux/module.h>
74486+#include <linux/grinternal.h>
74487+
74488+void gr_handle_msr_write(void)
74489+{
74490+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
74491+ return;
74492+}
74493+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
74494+
74495+void
74496+gr_handle_ioperm(void)
74497+{
74498+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
74499+ return;
74500+}
74501+
74502+void
74503+gr_handle_iopl(void)
74504+{
74505+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
74506+ return;
74507+}
74508+
74509+void
74510+gr_handle_mem_readwrite(u64 from, u64 to)
74511+{
74512+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
74513+ return;
74514+}
74515+
74516+void
74517+gr_handle_vm86(void)
74518+{
74519+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
74520+ return;
74521+}
74522+
74523+void
74524+gr_log_badprocpid(const char *entry)
74525+{
74526+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
74527+ return;
74528+}
74529diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
74530new file mode 100644
74531index 0000000..cd9e124
74532--- /dev/null
74533+++ b/grsecurity/grsec_mount.c
74534@@ -0,0 +1,65 @@
74535+#include <linux/kernel.h>
74536+#include <linux/sched.h>
74537+#include <linux/mount.h>
74538+#include <linux/major.h>
74539+#include <linux/grsecurity.h>
74540+#include <linux/grinternal.h>
74541+
74542+void
74543+gr_log_remount(const char *devname, const int retval)
74544+{
74545+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
74546+ if (grsec_enable_mount && (retval >= 0))
74547+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
74548+#endif
74549+ return;
74550+}
74551+
74552+void
74553+gr_log_unmount(const char *devname, const int retval)
74554+{
74555+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
74556+ if (grsec_enable_mount && (retval >= 0))
74557+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
74558+#endif
74559+ return;
74560+}
74561+
74562+void
74563+gr_log_mount(const char *from, const char *to, const int retval)
74564+{
74565+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
74566+ if (grsec_enable_mount && (retval >= 0))
74567+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
74568+#endif
74569+ return;
74570+}
74571+
74572+int
74573+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
74574+{
74575+#ifdef CONFIG_GRKERNSEC_ROFS
74576+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
74577+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
74578+ return -EPERM;
74579+ } else
74580+ return 0;
74581+#endif
74582+ return 0;
74583+}
74584+
74585+int
74586+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
74587+{
74588+#ifdef CONFIG_GRKERNSEC_ROFS
74589+ struct inode *inode = dentry->d_inode;
74590+
74591+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
74592+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
74593+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
74594+ return -EPERM;
74595+ } else
74596+ return 0;
74597+#endif
74598+ return 0;
74599+}
74600diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
74601new file mode 100644
74602index 0000000..6ee9d50
74603--- /dev/null
74604+++ b/grsecurity/grsec_pax.c
74605@@ -0,0 +1,45 @@
74606+#include <linux/kernel.h>
74607+#include <linux/sched.h>
74608+#include <linux/mm.h>
74609+#include <linux/file.h>
74610+#include <linux/grinternal.h>
74611+#include <linux/grsecurity.h>
74612+
74613+void
74614+gr_log_textrel(struct vm_area_struct * vma)
74615+{
74616+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
74617+ if (grsec_enable_log_rwxmaps)
74618+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
74619+#endif
74620+ return;
74621+}
74622+
74623+void gr_log_ptgnustack(struct file *file)
74624+{
74625+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
74626+ if (grsec_enable_log_rwxmaps)
74627+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
74628+#endif
74629+ return;
74630+}
74631+
74632+void
74633+gr_log_rwxmmap(struct file *file)
74634+{
74635+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
74636+ if (grsec_enable_log_rwxmaps)
74637+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
74638+#endif
74639+ return;
74640+}
74641+
74642+void
74643+gr_log_rwxmprotect(struct vm_area_struct *vma)
74644+{
74645+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
74646+ if (grsec_enable_log_rwxmaps)
74647+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
74648+#endif
74649+ return;
74650+}
74651diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
74652new file mode 100644
74653index 0000000..f7f29aa
74654--- /dev/null
74655+++ b/grsecurity/grsec_ptrace.c
74656@@ -0,0 +1,30 @@
74657+#include <linux/kernel.h>
74658+#include <linux/sched.h>
74659+#include <linux/grinternal.h>
74660+#include <linux/security.h>
74661+
74662+void
74663+gr_audit_ptrace(struct task_struct *task)
74664+{
74665+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
74666+ if (grsec_enable_audit_ptrace)
74667+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
74668+#endif
74669+ return;
74670+}
74671+
74672+int
74673+gr_ptrace_readexec(struct file *file, int unsafe_flags)
74674+{
74675+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
74676+ const struct dentry *dentry = file->f_path.dentry;
74677+ const struct vfsmount *mnt = file->f_path.mnt;
74678+
74679+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
74680+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
74681+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
74682+ return -EACCES;
74683+ }
74684+#endif
74685+ return 0;
74686+}
74687diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
74688new file mode 100644
74689index 0000000..3860c7e
74690--- /dev/null
74691+++ b/grsecurity/grsec_sig.c
74692@@ -0,0 +1,236 @@
74693+#include <linux/kernel.h>
74694+#include <linux/sched.h>
74695+#include <linux/fs.h>
74696+#include <linux/delay.h>
74697+#include <linux/grsecurity.h>
74698+#include <linux/grinternal.h>
74699+#include <linux/hardirq.h>
74700+
74701+char *signames[] = {
74702+ [SIGSEGV] = "Segmentation fault",
74703+ [SIGILL] = "Illegal instruction",
74704+ [SIGABRT] = "Abort",
74705+ [SIGBUS] = "Invalid alignment/Bus error"
74706+};
74707+
74708+void
74709+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
74710+{
74711+#ifdef CONFIG_GRKERNSEC_SIGNAL
74712+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
74713+ (sig == SIGABRT) || (sig == SIGBUS))) {
74714+ if (task_pid_nr(t) == task_pid_nr(current)) {
74715+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
74716+ } else {
74717+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
74718+ }
74719+ }
74720+#endif
74721+ return;
74722+}
74723+
74724+int
74725+gr_handle_signal(const struct task_struct *p, const int sig)
74726+{
74727+#ifdef CONFIG_GRKERNSEC
74728+ /* ignore the 0 signal for protected task checks */
74729+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
74730+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
74731+ return -EPERM;
74732+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
74733+ return -EPERM;
74734+ }
74735+#endif
74736+ return 0;
74737+}
74738+
74739+#ifdef CONFIG_GRKERNSEC
74740+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
74741+
74742+int gr_fake_force_sig(int sig, struct task_struct *t)
74743+{
74744+ unsigned long int flags;
74745+ int ret, blocked, ignored;
74746+ struct k_sigaction *action;
74747+
74748+ spin_lock_irqsave(&t->sighand->siglock, flags);
74749+ action = &t->sighand->action[sig-1];
74750+ ignored = action->sa.sa_handler == SIG_IGN;
74751+ blocked = sigismember(&t->blocked, sig);
74752+ if (blocked || ignored) {
74753+ action->sa.sa_handler = SIG_DFL;
74754+ if (blocked) {
74755+ sigdelset(&t->blocked, sig);
74756+ recalc_sigpending_and_wake(t);
74757+ }
74758+ }
74759+ if (action->sa.sa_handler == SIG_DFL)
74760+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
74761+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
74762+
74763+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
74764+
74765+ return ret;
74766+}
74767+#endif
74768+
74769+#define GR_USER_BAN_TIME (15 * 60)
74770+#define GR_DAEMON_BRUTE_TIME (30 * 60)
74771+
74772+void gr_handle_brute_attach(int dumpable)
74773+{
74774+#ifdef CONFIG_GRKERNSEC_BRUTE
74775+ struct task_struct *p = current;
74776+ kuid_t uid = GLOBAL_ROOT_UID;
74777+ int daemon = 0;
74778+
74779+ if (!grsec_enable_brute)
74780+ return;
74781+
74782+ rcu_read_lock();
74783+ read_lock(&tasklist_lock);
74784+ read_lock(&grsec_exec_file_lock);
74785+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
74786+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
74787+ p->real_parent->brute = 1;
74788+ daemon = 1;
74789+ } else {
74790+ const struct cred *cred = __task_cred(p), *cred2;
74791+ struct task_struct *tsk, *tsk2;
74792+
74793+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
74794+ struct user_struct *user;
74795+
74796+ uid = cred->uid;
74797+
74798+ /* this is put upon execution past expiration */
74799+ user = find_user(uid);
74800+ if (user == NULL)
74801+ goto unlock;
74802+ user->suid_banned = 1;
74803+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
74804+ if (user->suid_ban_expires == ~0UL)
74805+ user->suid_ban_expires--;
74806+
74807+ /* only kill other threads of the same binary, from the same user */
74808+ do_each_thread(tsk2, tsk) {
74809+ cred2 = __task_cred(tsk);
74810+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
74811+ gr_fake_force_sig(SIGKILL, tsk);
74812+ } while_each_thread(tsk2, tsk);
74813+ }
74814+ }
74815+unlock:
74816+ read_unlock(&grsec_exec_file_lock);
74817+ read_unlock(&tasklist_lock);
74818+ rcu_read_unlock();
74819+
74820+ if (gr_is_global_nonroot(uid))
74821+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
74822+ else if (daemon)
74823+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
74824+
74825+#endif
74826+ return;
74827+}
74828+
74829+void gr_handle_brute_check(void)
74830+{
74831+#ifdef CONFIG_GRKERNSEC_BRUTE
74832+ struct task_struct *p = current;
74833+
74834+ if (unlikely(p->brute)) {
74835+ if (!grsec_enable_brute)
74836+ p->brute = 0;
74837+ else if (time_before(get_seconds(), p->brute_expires))
74838+ msleep(30 * 1000);
74839+ }
74840+#endif
74841+ return;
74842+}
74843+
74844+void gr_handle_kernel_exploit(void)
74845+{
74846+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
74847+ const struct cred *cred;
74848+ struct task_struct *tsk, *tsk2;
74849+ struct user_struct *user;
74850+ kuid_t uid;
74851+
74852+ if (in_irq() || in_serving_softirq() || in_nmi())
74853+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
74854+
74855+ uid = current_uid();
74856+
74857+ if (gr_is_global_root(uid))
74858+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
74859+ else {
74860+ /* kill all the processes of this user, hold a reference
74861+ to their creds struct, and prevent them from creating
74862+ another process until system reset
74863+ */
74864+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
74865+ GR_GLOBAL_UID(uid));
74866+ /* we intentionally leak this ref */
74867+ user = get_uid(current->cred->user);
74868+ if (user)
74869+ user->kernel_banned = 1;
74870+
74871+ /* kill all processes of this user */
74872+ read_lock(&tasklist_lock);
74873+ do_each_thread(tsk2, tsk) {
74874+ cred = __task_cred(tsk);
74875+ if (uid_eq(cred->uid, uid))
74876+ gr_fake_force_sig(SIGKILL, tsk);
74877+ } while_each_thread(tsk2, tsk);
74878+ read_unlock(&tasklist_lock);
74879+ }
74880+#endif
74881+}
74882+
74883+#ifdef CONFIG_GRKERNSEC_BRUTE
74884+static bool suid_ban_expired(struct user_struct *user)
74885+{
74886+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
74887+ user->suid_banned = 0;
74888+ user->suid_ban_expires = 0;
74889+ free_uid(user);
74890+ return true;
74891+ }
74892+
74893+ return false;
74894+}
74895+#endif
74896+
74897+int gr_process_kernel_exec_ban(void)
74898+{
74899+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
74900+ if (unlikely(current->cred->user->kernel_banned))
74901+ return -EPERM;
74902+#endif
74903+ return 0;
74904+}
74905+
74906+int gr_process_kernel_setuid_ban(struct user_struct *user)
74907+{
74908+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
74909+ if (unlikely(user->kernel_banned))
74910+ gr_fake_force_sig(SIGKILL, current);
74911+#endif
74912+ return 0;
74913+}
74914+
74915+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
74916+{
74917+#ifdef CONFIG_GRKERNSEC_BRUTE
74918+ struct user_struct *user = current->cred->user;
74919+ if (unlikely(user->suid_banned)) {
74920+ if (suid_ban_expired(user))
74921+ return 0;
74922+ /* disallow execution of suid binaries only */
74923+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
74924+ return -EPERM;
74925+ }
74926+#endif
74927+ return 0;
74928+}
74929diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
74930new file mode 100644
74931index 0000000..c0aef3a
74932--- /dev/null
74933+++ b/grsecurity/grsec_sock.c
74934@@ -0,0 +1,244 @@
74935+#include <linux/kernel.h>
74936+#include <linux/module.h>
74937+#include <linux/sched.h>
74938+#include <linux/file.h>
74939+#include <linux/net.h>
74940+#include <linux/in.h>
74941+#include <linux/ip.h>
74942+#include <net/sock.h>
74943+#include <net/inet_sock.h>
74944+#include <linux/grsecurity.h>
74945+#include <linux/grinternal.h>
74946+#include <linux/gracl.h>
74947+
74948+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
74949+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
74950+
74951+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
74952+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
74953+
74954+#ifdef CONFIG_UNIX_MODULE
74955+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
74956+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
74957+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
74958+EXPORT_SYMBOL_GPL(gr_handle_create);
74959+#endif
74960+
74961+#ifdef CONFIG_GRKERNSEC
74962+#define gr_conn_table_size 32749
74963+struct conn_table_entry {
74964+ struct conn_table_entry *next;
74965+ struct signal_struct *sig;
74966+};
74967+
74968+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
74969+DEFINE_SPINLOCK(gr_conn_table_lock);
74970+
74971+extern const char * gr_socktype_to_name(unsigned char type);
74972+extern const char * gr_proto_to_name(unsigned char proto);
74973+extern const char * gr_sockfamily_to_name(unsigned char family);
74974+
74975+static __inline__ int
74976+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
74977+{
74978+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
74979+}
74980+
74981+static __inline__ int
74982+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
74983+ __u16 sport, __u16 dport)
74984+{
74985+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
74986+ sig->gr_sport == sport && sig->gr_dport == dport))
74987+ return 1;
74988+ else
74989+ return 0;
74990+}
74991+
74992+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
74993+{
74994+ struct conn_table_entry **match;
74995+ unsigned int index;
74996+
74997+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
74998+ sig->gr_sport, sig->gr_dport,
74999+ gr_conn_table_size);
75000+
75001+ newent->sig = sig;
75002+
75003+ match = &gr_conn_table[index];
75004+ newent->next = *match;
75005+ *match = newent;
75006+
75007+ return;
75008+}
75009+
75010+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
75011+{
75012+ struct conn_table_entry *match, *last = NULL;
75013+ unsigned int index;
75014+
75015+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
75016+ sig->gr_sport, sig->gr_dport,
75017+ gr_conn_table_size);
75018+
75019+ match = gr_conn_table[index];
75020+ while (match && !conn_match(match->sig,
75021+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
75022+ sig->gr_dport)) {
75023+ last = match;
75024+ match = match->next;
75025+ }
75026+
75027+ if (match) {
75028+ if (last)
75029+ last->next = match->next;
75030+ else
75031+ gr_conn_table[index] = NULL;
75032+ kfree(match);
75033+ }
75034+
75035+ return;
75036+}
75037+
75038+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
75039+ __u16 sport, __u16 dport)
75040+{
75041+ struct conn_table_entry *match;
75042+ unsigned int index;
75043+
75044+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
75045+
75046+ match = gr_conn_table[index];
75047+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
75048+ match = match->next;
75049+
75050+ if (match)
75051+ return match->sig;
75052+ else
75053+ return NULL;
75054+}
75055+
75056+#endif
75057+
75058+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
75059+{
75060+#ifdef CONFIG_GRKERNSEC
75061+ struct signal_struct *sig = task->signal;
75062+ struct conn_table_entry *newent;
75063+
75064+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
75065+ if (newent == NULL)
75066+ return;
75067+ /* no bh lock needed since we are called with bh disabled */
75068+ spin_lock(&gr_conn_table_lock);
75069+ gr_del_task_from_ip_table_nolock(sig);
75070+ sig->gr_saddr = inet->inet_rcv_saddr;
75071+ sig->gr_daddr = inet->inet_daddr;
75072+ sig->gr_sport = inet->inet_sport;
75073+ sig->gr_dport = inet->inet_dport;
75074+ gr_add_to_task_ip_table_nolock(sig, newent);
75075+ spin_unlock(&gr_conn_table_lock);
75076+#endif
75077+ return;
75078+}
75079+
75080+void gr_del_task_from_ip_table(struct task_struct *task)
75081+{
75082+#ifdef CONFIG_GRKERNSEC
75083+ spin_lock_bh(&gr_conn_table_lock);
75084+ gr_del_task_from_ip_table_nolock(task->signal);
75085+ spin_unlock_bh(&gr_conn_table_lock);
75086+#endif
75087+ return;
75088+}
75089+
75090+void
75091+gr_attach_curr_ip(const struct sock *sk)
75092+{
75093+#ifdef CONFIG_GRKERNSEC
75094+ struct signal_struct *p, *set;
75095+ const struct inet_sock *inet = inet_sk(sk);
75096+
75097+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
75098+ return;
75099+
75100+ set = current->signal;
75101+
75102+ spin_lock_bh(&gr_conn_table_lock);
75103+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
75104+ inet->inet_dport, inet->inet_sport);
75105+ if (unlikely(p != NULL)) {
75106+ set->curr_ip = p->curr_ip;
75107+ set->used_accept = 1;
75108+ gr_del_task_from_ip_table_nolock(p);
75109+ spin_unlock_bh(&gr_conn_table_lock);
75110+ return;
75111+ }
75112+ spin_unlock_bh(&gr_conn_table_lock);
75113+
75114+ set->curr_ip = inet->inet_daddr;
75115+ set->used_accept = 1;
75116+#endif
75117+ return;
75118+}
75119+
75120+int
75121+gr_handle_sock_all(const int family, const int type, const int protocol)
75122+{
75123+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
75124+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
75125+ (family != AF_UNIX)) {
75126+ if (family == AF_INET)
75127+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
75128+ else
75129+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
75130+ return -EACCES;
75131+ }
75132+#endif
75133+ return 0;
75134+}
75135+
75136+int
75137+gr_handle_sock_server(const struct sockaddr *sck)
75138+{
75139+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
75140+ if (grsec_enable_socket_server &&
75141+ in_group_p(grsec_socket_server_gid) &&
75142+ sck && (sck->sa_family != AF_UNIX) &&
75143+ (sck->sa_family != AF_LOCAL)) {
75144+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
75145+ return -EACCES;
75146+ }
75147+#endif
75148+ return 0;
75149+}
75150+
75151+int
75152+gr_handle_sock_server_other(const struct sock *sck)
75153+{
75154+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
75155+ if (grsec_enable_socket_server &&
75156+ in_group_p(grsec_socket_server_gid) &&
75157+ sck && (sck->sk_family != AF_UNIX) &&
75158+ (sck->sk_family != AF_LOCAL)) {
75159+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
75160+ return -EACCES;
75161+ }
75162+#endif
75163+ return 0;
75164+}
75165+
75166+int
75167+gr_handle_sock_client(const struct sockaddr *sck)
75168+{
75169+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
75170+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
75171+ sck && (sck->sa_family != AF_UNIX) &&
75172+ (sck->sa_family != AF_LOCAL)) {
75173+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
75174+ return -EACCES;
75175+ }
75176+#endif
75177+ return 0;
75178+}
75179diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
75180new file mode 100644
75181index 0000000..8159888
75182--- /dev/null
75183+++ b/grsecurity/grsec_sysctl.c
75184@@ -0,0 +1,479 @@
75185+#include <linux/kernel.h>
75186+#include <linux/sched.h>
75187+#include <linux/sysctl.h>
75188+#include <linux/grsecurity.h>
75189+#include <linux/grinternal.h>
75190+
75191+int
75192+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
75193+{
75194+#ifdef CONFIG_GRKERNSEC_SYSCTL
75195+ if (dirname == NULL || name == NULL)
75196+ return 0;
75197+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
75198+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
75199+ return -EACCES;
75200+ }
75201+#endif
75202+ return 0;
75203+}
75204+
75205+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
75206+static int __maybe_unused __read_only one = 1;
75207+#endif
75208+
75209+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
75210+ defined(CONFIG_GRKERNSEC_DENYUSB)
75211+struct ctl_table grsecurity_table[] = {
75212+#ifdef CONFIG_GRKERNSEC_SYSCTL
75213+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
75214+#ifdef CONFIG_GRKERNSEC_IO
75215+ {
75216+ .procname = "disable_priv_io",
75217+ .data = &grsec_disable_privio,
75218+ .maxlen = sizeof(int),
75219+ .mode = 0600,
75220+ .proc_handler = &proc_dointvec,
75221+ },
75222+#endif
75223+#endif
75224+#ifdef CONFIG_GRKERNSEC_LINK
75225+ {
75226+ .procname = "linking_restrictions",
75227+ .data = &grsec_enable_link,
75228+ .maxlen = sizeof(int),
75229+ .mode = 0600,
75230+ .proc_handler = &proc_dointvec,
75231+ },
75232+#endif
75233+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
75234+ {
75235+ .procname = "enforce_symlinksifowner",
75236+ .data = &grsec_enable_symlinkown,
75237+ .maxlen = sizeof(int),
75238+ .mode = 0600,
75239+ .proc_handler = &proc_dointvec,
75240+ },
75241+ {
75242+ .procname = "symlinkown_gid",
75243+ .data = &grsec_symlinkown_gid,
75244+ .maxlen = sizeof(int),
75245+ .mode = 0600,
75246+ .proc_handler = &proc_dointvec,
75247+ },
75248+#endif
75249+#ifdef CONFIG_GRKERNSEC_BRUTE
75250+ {
75251+ .procname = "deter_bruteforce",
75252+ .data = &grsec_enable_brute,
75253+ .maxlen = sizeof(int),
75254+ .mode = 0600,
75255+ .proc_handler = &proc_dointvec,
75256+ },
75257+#endif
75258+#ifdef CONFIG_GRKERNSEC_FIFO
75259+ {
75260+ .procname = "fifo_restrictions",
75261+ .data = &grsec_enable_fifo,
75262+ .maxlen = sizeof(int),
75263+ .mode = 0600,
75264+ .proc_handler = &proc_dointvec,
75265+ },
75266+#endif
75267+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
75268+ {
75269+ .procname = "ptrace_readexec",
75270+ .data = &grsec_enable_ptrace_readexec,
75271+ .maxlen = sizeof(int),
75272+ .mode = 0600,
75273+ .proc_handler = &proc_dointvec,
75274+ },
75275+#endif
75276+#ifdef CONFIG_GRKERNSEC_SETXID
75277+ {
75278+ .procname = "consistent_setxid",
75279+ .data = &grsec_enable_setxid,
75280+ .maxlen = sizeof(int),
75281+ .mode = 0600,
75282+ .proc_handler = &proc_dointvec,
75283+ },
75284+#endif
75285+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75286+ {
75287+ .procname = "ip_blackhole",
75288+ .data = &grsec_enable_blackhole,
75289+ .maxlen = sizeof(int),
75290+ .mode = 0600,
75291+ .proc_handler = &proc_dointvec,
75292+ },
75293+ {
75294+ .procname = "lastack_retries",
75295+ .data = &grsec_lastack_retries,
75296+ .maxlen = sizeof(int),
75297+ .mode = 0600,
75298+ .proc_handler = &proc_dointvec,
75299+ },
75300+#endif
75301+#ifdef CONFIG_GRKERNSEC_EXECLOG
75302+ {
75303+ .procname = "exec_logging",
75304+ .data = &grsec_enable_execlog,
75305+ .maxlen = sizeof(int),
75306+ .mode = 0600,
75307+ .proc_handler = &proc_dointvec,
75308+ },
75309+#endif
75310+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
75311+ {
75312+ .procname = "rwxmap_logging",
75313+ .data = &grsec_enable_log_rwxmaps,
75314+ .maxlen = sizeof(int),
75315+ .mode = 0600,
75316+ .proc_handler = &proc_dointvec,
75317+ },
75318+#endif
75319+#ifdef CONFIG_GRKERNSEC_SIGNAL
75320+ {
75321+ .procname = "signal_logging",
75322+ .data = &grsec_enable_signal,
75323+ .maxlen = sizeof(int),
75324+ .mode = 0600,
75325+ .proc_handler = &proc_dointvec,
75326+ },
75327+#endif
75328+#ifdef CONFIG_GRKERNSEC_FORKFAIL
75329+ {
75330+ .procname = "forkfail_logging",
75331+ .data = &grsec_enable_forkfail,
75332+ .maxlen = sizeof(int),
75333+ .mode = 0600,
75334+ .proc_handler = &proc_dointvec,
75335+ },
75336+#endif
75337+#ifdef CONFIG_GRKERNSEC_TIME
75338+ {
75339+ .procname = "timechange_logging",
75340+ .data = &grsec_enable_time,
75341+ .maxlen = sizeof(int),
75342+ .mode = 0600,
75343+ .proc_handler = &proc_dointvec,
75344+ },
75345+#endif
75346+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
75347+ {
75348+ .procname = "chroot_deny_shmat",
75349+ .data = &grsec_enable_chroot_shmat,
75350+ .maxlen = sizeof(int),
75351+ .mode = 0600,
75352+ .proc_handler = &proc_dointvec,
75353+ },
75354+#endif
75355+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
75356+ {
75357+ .procname = "chroot_deny_unix",
75358+ .data = &grsec_enable_chroot_unix,
75359+ .maxlen = sizeof(int),
75360+ .mode = 0600,
75361+ .proc_handler = &proc_dointvec,
75362+ },
75363+#endif
75364+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
75365+ {
75366+ .procname = "chroot_deny_mount",
75367+ .data = &grsec_enable_chroot_mount,
75368+ .maxlen = sizeof(int),
75369+ .mode = 0600,
75370+ .proc_handler = &proc_dointvec,
75371+ },
75372+#endif
75373+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
75374+ {
75375+ .procname = "chroot_deny_fchdir",
75376+ .data = &grsec_enable_chroot_fchdir,
75377+ .maxlen = sizeof(int),
75378+ .mode = 0600,
75379+ .proc_handler = &proc_dointvec,
75380+ },
75381+#endif
75382+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
75383+ {
75384+ .procname = "chroot_deny_chroot",
75385+ .data = &grsec_enable_chroot_double,
75386+ .maxlen = sizeof(int),
75387+ .mode = 0600,
75388+ .proc_handler = &proc_dointvec,
75389+ },
75390+#endif
75391+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
75392+ {
75393+ .procname = "chroot_deny_pivot",
75394+ .data = &grsec_enable_chroot_pivot,
75395+ .maxlen = sizeof(int),
75396+ .mode = 0600,
75397+ .proc_handler = &proc_dointvec,
75398+ },
75399+#endif
75400+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
75401+ {
75402+ .procname = "chroot_enforce_chdir",
75403+ .data = &grsec_enable_chroot_chdir,
75404+ .maxlen = sizeof(int),
75405+ .mode = 0600,
75406+ .proc_handler = &proc_dointvec,
75407+ },
75408+#endif
75409+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
75410+ {
75411+ .procname = "chroot_deny_chmod",
75412+ .data = &grsec_enable_chroot_chmod,
75413+ .maxlen = sizeof(int),
75414+ .mode = 0600,
75415+ .proc_handler = &proc_dointvec,
75416+ },
75417+#endif
75418+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
75419+ {
75420+ .procname = "chroot_deny_mknod",
75421+ .data = &grsec_enable_chroot_mknod,
75422+ .maxlen = sizeof(int),
75423+ .mode = 0600,
75424+ .proc_handler = &proc_dointvec,
75425+ },
75426+#endif
75427+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
75428+ {
75429+ .procname = "chroot_restrict_nice",
75430+ .data = &grsec_enable_chroot_nice,
75431+ .maxlen = sizeof(int),
75432+ .mode = 0600,
75433+ .proc_handler = &proc_dointvec,
75434+ },
75435+#endif
75436+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
75437+ {
75438+ .procname = "chroot_execlog",
75439+ .data = &grsec_enable_chroot_execlog,
75440+ .maxlen = sizeof(int),
75441+ .mode = 0600,
75442+ .proc_handler = &proc_dointvec,
75443+ },
75444+#endif
75445+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
75446+ {
75447+ .procname = "chroot_caps",
75448+ .data = &grsec_enable_chroot_caps,
75449+ .maxlen = sizeof(int),
75450+ .mode = 0600,
75451+ .proc_handler = &proc_dointvec,
75452+ },
75453+#endif
75454+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
75455+ {
75456+ .procname = "chroot_deny_sysctl",
75457+ .data = &grsec_enable_chroot_sysctl,
75458+ .maxlen = sizeof(int),
75459+ .mode = 0600,
75460+ .proc_handler = &proc_dointvec,
75461+ },
75462+#endif
75463+#ifdef CONFIG_GRKERNSEC_TPE
75464+ {
75465+ .procname = "tpe",
75466+ .data = &grsec_enable_tpe,
75467+ .maxlen = sizeof(int),
75468+ .mode = 0600,
75469+ .proc_handler = &proc_dointvec,
75470+ },
75471+ {
75472+ .procname = "tpe_gid",
75473+ .data = &grsec_tpe_gid,
75474+ .maxlen = sizeof(int),
75475+ .mode = 0600,
75476+ .proc_handler = &proc_dointvec,
75477+ },
75478+#endif
75479+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
75480+ {
75481+ .procname = "tpe_invert",
75482+ .data = &grsec_enable_tpe_invert,
75483+ .maxlen = sizeof(int),
75484+ .mode = 0600,
75485+ .proc_handler = &proc_dointvec,
75486+ },
75487+#endif
75488+#ifdef CONFIG_GRKERNSEC_TPE_ALL
75489+ {
75490+ .procname = "tpe_restrict_all",
75491+ .data = &grsec_enable_tpe_all,
75492+ .maxlen = sizeof(int),
75493+ .mode = 0600,
75494+ .proc_handler = &proc_dointvec,
75495+ },
75496+#endif
75497+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
75498+ {
75499+ .procname = "socket_all",
75500+ .data = &grsec_enable_socket_all,
75501+ .maxlen = sizeof(int),
75502+ .mode = 0600,
75503+ .proc_handler = &proc_dointvec,
75504+ },
75505+ {
75506+ .procname = "socket_all_gid",
75507+ .data = &grsec_socket_all_gid,
75508+ .maxlen = sizeof(int),
75509+ .mode = 0600,
75510+ .proc_handler = &proc_dointvec,
75511+ },
75512+#endif
75513+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
75514+ {
75515+ .procname = "socket_client",
75516+ .data = &grsec_enable_socket_client,
75517+ .maxlen = sizeof(int),
75518+ .mode = 0600,
75519+ .proc_handler = &proc_dointvec,
75520+ },
75521+ {
75522+ .procname = "socket_client_gid",
75523+ .data = &grsec_socket_client_gid,
75524+ .maxlen = sizeof(int),
75525+ .mode = 0600,
75526+ .proc_handler = &proc_dointvec,
75527+ },
75528+#endif
75529+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
75530+ {
75531+ .procname = "socket_server",
75532+ .data = &grsec_enable_socket_server,
75533+ .maxlen = sizeof(int),
75534+ .mode = 0600,
75535+ .proc_handler = &proc_dointvec,
75536+ },
75537+ {
75538+ .procname = "socket_server_gid",
75539+ .data = &grsec_socket_server_gid,
75540+ .maxlen = sizeof(int),
75541+ .mode = 0600,
75542+ .proc_handler = &proc_dointvec,
75543+ },
75544+#endif
75545+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
75546+ {
75547+ .procname = "audit_group",
75548+ .data = &grsec_enable_group,
75549+ .maxlen = sizeof(int),
75550+ .mode = 0600,
75551+ .proc_handler = &proc_dointvec,
75552+ },
75553+ {
75554+ .procname = "audit_gid",
75555+ .data = &grsec_audit_gid,
75556+ .maxlen = sizeof(int),
75557+ .mode = 0600,
75558+ .proc_handler = &proc_dointvec,
75559+ },
75560+#endif
75561+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
75562+ {
75563+ .procname = "audit_chdir",
75564+ .data = &grsec_enable_chdir,
75565+ .maxlen = sizeof(int),
75566+ .mode = 0600,
75567+ .proc_handler = &proc_dointvec,
75568+ },
75569+#endif
75570+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
75571+ {
75572+ .procname = "audit_mount",
75573+ .data = &grsec_enable_mount,
75574+ .maxlen = sizeof(int),
75575+ .mode = 0600,
75576+ .proc_handler = &proc_dointvec,
75577+ },
75578+#endif
75579+#ifdef CONFIG_GRKERNSEC_DMESG
75580+ {
75581+ .procname = "dmesg",
75582+ .data = &grsec_enable_dmesg,
75583+ .maxlen = sizeof(int),
75584+ .mode = 0600,
75585+ .proc_handler = &proc_dointvec,
75586+ },
75587+#endif
75588+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75589+ {
75590+ .procname = "chroot_findtask",
75591+ .data = &grsec_enable_chroot_findtask,
75592+ .maxlen = sizeof(int),
75593+ .mode = 0600,
75594+ .proc_handler = &proc_dointvec,
75595+ },
75596+#endif
75597+#ifdef CONFIG_GRKERNSEC_RESLOG
75598+ {
75599+ .procname = "resource_logging",
75600+ .data = &grsec_resource_logging,
75601+ .maxlen = sizeof(int),
75602+ .mode = 0600,
75603+ .proc_handler = &proc_dointvec,
75604+ },
75605+#endif
75606+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
75607+ {
75608+ .procname = "audit_ptrace",
75609+ .data = &grsec_enable_audit_ptrace,
75610+ .maxlen = sizeof(int),
75611+ .mode = 0600,
75612+ .proc_handler = &proc_dointvec,
75613+ },
75614+#endif
75615+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
75616+ {
75617+ .procname = "harden_ptrace",
75618+ .data = &grsec_enable_harden_ptrace,
75619+ .maxlen = sizeof(int),
75620+ .mode = 0600,
75621+ .proc_handler = &proc_dointvec,
75622+ },
75623+#endif
75624+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
75625+ {
75626+ .procname = "harden_ipc",
75627+ .data = &grsec_enable_harden_ipc,
75628+ .maxlen = sizeof(int),
75629+ .mode = 0600,
75630+ .proc_handler = &proc_dointvec,
75631+ },
75632+#endif
75633+ {
75634+ .procname = "grsec_lock",
75635+ .data = &grsec_lock,
75636+ .maxlen = sizeof(int),
75637+ .mode = 0600,
75638+ .proc_handler = &proc_dointvec,
75639+ },
75640+#endif
75641+#ifdef CONFIG_GRKERNSEC_ROFS
75642+ {
75643+ .procname = "romount_protect",
75644+ .data = &grsec_enable_rofs,
75645+ .maxlen = sizeof(int),
75646+ .mode = 0600,
75647+ .proc_handler = &proc_dointvec_minmax,
75648+ .extra1 = &one,
75649+ .extra2 = &one,
75650+ },
75651+#endif
75652+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
75653+ {
75654+ .procname = "deny_new_usb",
75655+ .data = &grsec_deny_new_usb,
75656+ .maxlen = sizeof(int),
75657+ .mode = 0600,
75658+ .proc_handler = &proc_dointvec,
75659+ },
75660+#endif
75661+ { }
75662+};
75663+#endif
75664diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
75665new file mode 100644
75666index 0000000..61b514e
75667--- /dev/null
75668+++ b/grsecurity/grsec_time.c
75669@@ -0,0 +1,16 @@
75670+#include <linux/kernel.h>
75671+#include <linux/sched.h>
75672+#include <linux/grinternal.h>
75673+#include <linux/module.h>
75674+
75675+void
75676+gr_log_timechange(void)
75677+{
75678+#ifdef CONFIG_GRKERNSEC_TIME
75679+ if (grsec_enable_time)
75680+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
75681+#endif
75682+ return;
75683+}
75684+
75685+EXPORT_SYMBOL_GPL(gr_log_timechange);
75686diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
75687new file mode 100644
75688index 0000000..ee57dcf
75689--- /dev/null
75690+++ b/grsecurity/grsec_tpe.c
75691@@ -0,0 +1,73 @@
75692+#include <linux/kernel.h>
75693+#include <linux/sched.h>
75694+#include <linux/file.h>
75695+#include <linux/fs.h>
75696+#include <linux/grinternal.h>
75697+
75698+extern int gr_acl_tpe_check(void);
75699+
75700+int
75701+gr_tpe_allow(const struct file *file)
75702+{
75703+#ifdef CONFIG_GRKERNSEC
75704+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
75705+ const struct cred *cred = current_cred();
75706+ char *msg = NULL;
75707+ char *msg2 = NULL;
75708+
75709+ // never restrict root
75710+ if (gr_is_global_root(cred->uid))
75711+ return 1;
75712+
75713+ if (grsec_enable_tpe) {
75714+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
75715+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
75716+ msg = "not being in trusted group";
75717+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
75718+ msg = "being in untrusted group";
75719+#else
75720+ if (in_group_p(grsec_tpe_gid))
75721+ msg = "being in untrusted group";
75722+#endif
75723+ }
75724+ if (!msg && gr_acl_tpe_check())
75725+ msg = "being in untrusted role";
75726+
75727+ // not in any affected group/role
75728+ if (!msg)
75729+ goto next_check;
75730+
75731+ if (gr_is_global_nonroot(inode->i_uid))
75732+ msg2 = "file in non-root-owned directory";
75733+ else if (inode->i_mode & S_IWOTH)
75734+ msg2 = "file in world-writable directory";
75735+ else if (inode->i_mode & S_IWGRP)
75736+ msg2 = "file in group-writable directory";
75737+
75738+ if (msg && msg2) {
75739+ char fullmsg[70] = {0};
75740+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
75741+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
75742+ return 0;
75743+ }
75744+ msg = NULL;
75745+next_check:
75746+#ifdef CONFIG_GRKERNSEC_TPE_ALL
75747+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
75748+ return 1;
75749+
75750+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
75751+ msg = "directory not owned by user";
75752+ else if (inode->i_mode & S_IWOTH)
75753+ msg = "file in world-writable directory";
75754+ else if (inode->i_mode & S_IWGRP)
75755+ msg = "file in group-writable directory";
75756+
75757+ if (msg) {
75758+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
75759+ return 0;
75760+ }
75761+#endif
75762+#endif
75763+ return 1;
75764+}
75765diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
75766new file mode 100644
75767index 0000000..ae02d8e
75768--- /dev/null
75769+++ b/grsecurity/grsec_usb.c
75770@@ -0,0 +1,15 @@
75771+#include <linux/kernel.h>
75772+#include <linux/grinternal.h>
75773+#include <linux/module.h>
75774+
75775+int gr_handle_new_usb(void)
75776+{
75777+#ifdef CONFIG_GRKERNSEC_DENYUSB
75778+ if (grsec_deny_new_usb) {
75779+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
75780+ return 1;
75781+ }
75782+#endif
75783+ return 0;
75784+}
75785+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
75786diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
75787new file mode 100644
75788index 0000000..9f7b1ac
75789--- /dev/null
75790+++ b/grsecurity/grsum.c
75791@@ -0,0 +1,61 @@
75792+#include <linux/err.h>
75793+#include <linux/kernel.h>
75794+#include <linux/sched.h>
75795+#include <linux/mm.h>
75796+#include <linux/scatterlist.h>
75797+#include <linux/crypto.h>
75798+#include <linux/gracl.h>
75799+
75800+
75801+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
75802+#error "crypto and sha256 must be built into the kernel"
75803+#endif
75804+
75805+int
75806+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
75807+{
75808+ char *p;
75809+ struct crypto_hash *tfm;
75810+ struct hash_desc desc;
75811+ struct scatterlist sg;
75812+ unsigned char temp_sum[GR_SHA_LEN];
75813+ volatile int retval = 0;
75814+ volatile int dummy = 0;
75815+ unsigned int i;
75816+
75817+ sg_init_table(&sg, 1);
75818+
75819+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
75820+ if (IS_ERR(tfm)) {
75821+ /* should never happen, since sha256 should be built in */
75822+ return 1;
75823+ }
75824+
75825+ desc.tfm = tfm;
75826+ desc.flags = 0;
75827+
75828+ crypto_hash_init(&desc);
75829+
75830+ p = salt;
75831+ sg_set_buf(&sg, p, GR_SALT_LEN);
75832+ crypto_hash_update(&desc, &sg, sg.length);
75833+
75834+ p = entry->pw;
75835+ sg_set_buf(&sg, p, strlen(p));
75836+
75837+ crypto_hash_update(&desc, &sg, sg.length);
75838+
75839+ crypto_hash_final(&desc, temp_sum);
75840+
75841+ memset(entry->pw, 0, GR_PW_LEN);
75842+
75843+ for (i = 0; i < GR_SHA_LEN; i++)
75844+ if (sum[i] != temp_sum[i])
75845+ retval = 1;
75846+ else
75847+ dummy = 1; // waste a cycle
75848+
75849+ crypto_free_hash(tfm);
75850+
75851+ return retval;
75852+}
75853diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
75854index 77ff547..181834f 100644
75855--- a/include/asm-generic/4level-fixup.h
75856+++ b/include/asm-generic/4level-fixup.h
75857@@ -13,8 +13,10 @@
75858 #define pmd_alloc(mm, pud, address) \
75859 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
75860 NULL: pmd_offset(pud, address))
75861+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
75862
75863 #define pud_alloc(mm, pgd, address) (pgd)
75864+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
75865 #define pud_offset(pgd, start) (pgd)
75866 #define pud_none(pud) 0
75867 #define pud_bad(pud) 0
75868diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
75869index b7babf0..97f4c4f 100644
75870--- a/include/asm-generic/atomic-long.h
75871+++ b/include/asm-generic/atomic-long.h
75872@@ -22,6 +22,12 @@
75873
75874 typedef atomic64_t atomic_long_t;
75875
75876+#ifdef CONFIG_PAX_REFCOUNT
75877+typedef atomic64_unchecked_t atomic_long_unchecked_t;
75878+#else
75879+typedef atomic64_t atomic_long_unchecked_t;
75880+#endif
75881+
75882 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
75883
75884 static inline long atomic_long_read(atomic_long_t *l)
75885@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
75886 return (long)atomic64_read(v);
75887 }
75888
75889+#ifdef CONFIG_PAX_REFCOUNT
75890+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
75891+{
75892+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75893+
75894+ return (long)atomic64_read_unchecked(v);
75895+}
75896+#endif
75897+
75898 static inline void atomic_long_set(atomic_long_t *l, long i)
75899 {
75900 atomic64_t *v = (atomic64_t *)l;
75901@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
75902 atomic64_set(v, i);
75903 }
75904
75905+#ifdef CONFIG_PAX_REFCOUNT
75906+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
75907+{
75908+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75909+
75910+ atomic64_set_unchecked(v, i);
75911+}
75912+#endif
75913+
75914 static inline void atomic_long_inc(atomic_long_t *l)
75915 {
75916 atomic64_t *v = (atomic64_t *)l;
75917@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
75918 atomic64_inc(v);
75919 }
75920
75921+#ifdef CONFIG_PAX_REFCOUNT
75922+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
75923+{
75924+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75925+
75926+ atomic64_inc_unchecked(v);
75927+}
75928+#endif
75929+
75930 static inline void atomic_long_dec(atomic_long_t *l)
75931 {
75932 atomic64_t *v = (atomic64_t *)l;
75933@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
75934 atomic64_dec(v);
75935 }
75936
75937+#ifdef CONFIG_PAX_REFCOUNT
75938+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
75939+{
75940+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75941+
75942+ atomic64_dec_unchecked(v);
75943+}
75944+#endif
75945+
75946 static inline void atomic_long_add(long i, atomic_long_t *l)
75947 {
75948 atomic64_t *v = (atomic64_t *)l;
75949@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
75950 atomic64_add(i, v);
75951 }
75952
75953+#ifdef CONFIG_PAX_REFCOUNT
75954+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
75955+{
75956+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75957+
75958+ atomic64_add_unchecked(i, v);
75959+}
75960+#endif
75961+
75962 static inline void atomic_long_sub(long i, atomic_long_t *l)
75963 {
75964 atomic64_t *v = (atomic64_t *)l;
75965@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
75966 atomic64_sub(i, v);
75967 }
75968
75969+#ifdef CONFIG_PAX_REFCOUNT
75970+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
75971+{
75972+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75973+
75974+ atomic64_sub_unchecked(i, v);
75975+}
75976+#endif
75977+
75978 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
75979 {
75980 atomic64_t *v = (atomic64_t *)l;
75981@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
75982 return atomic64_add_negative(i, v);
75983 }
75984
75985-static inline long atomic_long_add_return(long i, atomic_long_t *l)
75986+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
75987 {
75988 atomic64_t *v = (atomic64_t *)l;
75989
75990 return (long)atomic64_add_return(i, v);
75991 }
75992
75993+#ifdef CONFIG_PAX_REFCOUNT
75994+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
75995+{
75996+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
75997+
75998+ return (long)atomic64_add_return_unchecked(i, v);
75999+}
76000+#endif
76001+
76002 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
76003 {
76004 atomic64_t *v = (atomic64_t *)l;
76005@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
76006 return (long)atomic64_inc_return(v);
76007 }
76008
76009+#ifdef CONFIG_PAX_REFCOUNT
76010+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
76011+{
76012+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
76013+
76014+ return (long)atomic64_inc_return_unchecked(v);
76015+}
76016+#endif
76017+
76018 static inline long atomic_long_dec_return(atomic_long_t *l)
76019 {
76020 atomic64_t *v = (atomic64_t *)l;
76021@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
76022
76023 typedef atomic_t atomic_long_t;
76024
76025+#ifdef CONFIG_PAX_REFCOUNT
76026+typedef atomic_unchecked_t atomic_long_unchecked_t;
76027+#else
76028+typedef atomic_t atomic_long_unchecked_t;
76029+#endif
76030+
76031 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
76032 static inline long atomic_long_read(atomic_long_t *l)
76033 {
76034@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
76035 return (long)atomic_read(v);
76036 }
76037
76038+#ifdef CONFIG_PAX_REFCOUNT
76039+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
76040+{
76041+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76042+
76043+ return (long)atomic_read_unchecked(v);
76044+}
76045+#endif
76046+
76047 static inline void atomic_long_set(atomic_long_t *l, long i)
76048 {
76049 atomic_t *v = (atomic_t *)l;
76050@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
76051 atomic_set(v, i);
76052 }
76053
76054+#ifdef CONFIG_PAX_REFCOUNT
76055+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
76056+{
76057+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76058+
76059+ atomic_set_unchecked(v, i);
76060+}
76061+#endif
76062+
76063 static inline void atomic_long_inc(atomic_long_t *l)
76064 {
76065 atomic_t *v = (atomic_t *)l;
76066@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
76067 atomic_inc(v);
76068 }
76069
76070+#ifdef CONFIG_PAX_REFCOUNT
76071+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
76072+{
76073+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76074+
76075+ atomic_inc_unchecked(v);
76076+}
76077+#endif
76078+
76079 static inline void atomic_long_dec(atomic_long_t *l)
76080 {
76081 atomic_t *v = (atomic_t *)l;
76082@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
76083 atomic_dec(v);
76084 }
76085
76086+#ifdef CONFIG_PAX_REFCOUNT
76087+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
76088+{
76089+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76090+
76091+ atomic_dec_unchecked(v);
76092+}
76093+#endif
76094+
76095 static inline void atomic_long_add(long i, atomic_long_t *l)
76096 {
76097 atomic_t *v = (atomic_t *)l;
76098@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
76099 atomic_add(i, v);
76100 }
76101
76102+#ifdef CONFIG_PAX_REFCOUNT
76103+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
76104+{
76105+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76106+
76107+ atomic_add_unchecked(i, v);
76108+}
76109+#endif
76110+
76111 static inline void atomic_long_sub(long i, atomic_long_t *l)
76112 {
76113 atomic_t *v = (atomic_t *)l;
76114@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
76115 atomic_sub(i, v);
76116 }
76117
76118+#ifdef CONFIG_PAX_REFCOUNT
76119+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
76120+{
76121+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76122+
76123+ atomic_sub_unchecked(i, v);
76124+}
76125+#endif
76126+
76127 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
76128 {
76129 atomic_t *v = (atomic_t *)l;
76130@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
76131 return (long)atomic_add_return(i, v);
76132 }
76133
76134+#ifdef CONFIG_PAX_REFCOUNT
76135+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
76136+{
76137+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76138+
76139+ return (long)atomic_add_return_unchecked(i, v);
76140+}
76141+
76142+#endif
76143+
76144 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
76145 {
76146 atomic_t *v = (atomic_t *)l;
76147@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
76148 return (long)atomic_inc_return(v);
76149 }
76150
76151+#ifdef CONFIG_PAX_REFCOUNT
76152+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
76153+{
76154+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
76155+
76156+ return (long)atomic_inc_return_unchecked(v);
76157+}
76158+#endif
76159+
76160 static inline long atomic_long_dec_return(atomic_long_t *l)
76161 {
76162 atomic_t *v = (atomic_t *)l;
76163@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
76164
76165 #endif /* BITS_PER_LONG == 64 */
76166
76167+#ifdef CONFIG_PAX_REFCOUNT
76168+static inline void pax_refcount_needs_these_functions(void)
76169+{
76170+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
76171+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
76172+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
76173+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
76174+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
76175+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
76176+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
76177+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
76178+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
76179+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
76180+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
76181+#ifdef CONFIG_X86
76182+ atomic_clear_mask_unchecked(0, NULL);
76183+ atomic_set_mask_unchecked(0, NULL);
76184+#endif
76185+
76186+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
76187+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
76188+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
76189+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
76190+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
76191+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
76192+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
76193+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
76194+}
76195+#else
76196+#define atomic_read_unchecked(v) atomic_read(v)
76197+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
76198+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
76199+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
76200+#define atomic_inc_unchecked(v) atomic_inc(v)
76201+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
76202+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
76203+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
76204+#define atomic_dec_unchecked(v) atomic_dec(v)
76205+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
76206+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
76207+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
76208+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
76209+
76210+#define atomic_long_read_unchecked(v) atomic_long_read(v)
76211+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
76212+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
76213+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
76214+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
76215+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
76216+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
76217+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
76218+#endif
76219+
76220 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
76221diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
76222index 33bd2de..f31bff97 100644
76223--- a/include/asm-generic/atomic.h
76224+++ b/include/asm-generic/atomic.h
76225@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
76226 * Atomically clears the bits set in @mask from @v
76227 */
76228 #ifndef atomic_clear_mask
76229-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
76230+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
76231 {
76232 unsigned long flags;
76233
76234diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
76235index b18ce4f..2ee2843 100644
76236--- a/include/asm-generic/atomic64.h
76237+++ b/include/asm-generic/atomic64.h
76238@@ -16,6 +16,8 @@ typedef struct {
76239 long long counter;
76240 } atomic64_t;
76241
76242+typedef atomic64_t atomic64_unchecked_t;
76243+
76244 #define ATOMIC64_INIT(i) { (i) }
76245
76246 extern long long atomic64_read(const atomic64_t *v);
76247@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
76248 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
76249 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
76250
76251+#define atomic64_read_unchecked(v) atomic64_read(v)
76252+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
76253+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
76254+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
76255+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
76256+#define atomic64_inc_unchecked(v) atomic64_inc(v)
76257+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
76258+#define atomic64_dec_unchecked(v) atomic64_dec(v)
76259+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
76260+
76261 #endif /* _ASM_GENERIC_ATOMIC64_H */
76262diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
76263index a60a7cc..0fe12f2 100644
76264--- a/include/asm-generic/bitops/__fls.h
76265+++ b/include/asm-generic/bitops/__fls.h
76266@@ -9,7 +9,7 @@
76267 *
76268 * Undefined if no set bit exists, so code should check against 0 first.
76269 */
76270-static __always_inline unsigned long __fls(unsigned long word)
76271+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
76272 {
76273 int num = BITS_PER_LONG - 1;
76274
76275diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
76276index 0576d1f..dad6c71 100644
76277--- a/include/asm-generic/bitops/fls.h
76278+++ b/include/asm-generic/bitops/fls.h
76279@@ -9,7 +9,7 @@
76280 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
76281 */
76282
76283-static __always_inline int fls(int x)
76284+static __always_inline int __intentional_overflow(-1) fls(int x)
76285 {
76286 int r = 32;
76287
76288diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
76289index b097cf8..3d40e14 100644
76290--- a/include/asm-generic/bitops/fls64.h
76291+++ b/include/asm-generic/bitops/fls64.h
76292@@ -15,7 +15,7 @@
76293 * at position 64.
76294 */
76295 #if BITS_PER_LONG == 32
76296-static __always_inline int fls64(__u64 x)
76297+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
76298 {
76299 __u32 h = x >> 32;
76300 if (h)
76301@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
76302 return fls(x);
76303 }
76304 #elif BITS_PER_LONG == 64
76305-static __always_inline int fls64(__u64 x)
76306+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
76307 {
76308 if (x == 0)
76309 return 0;
76310diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
76311index 1bfcfe5..e04c5c9 100644
76312--- a/include/asm-generic/cache.h
76313+++ b/include/asm-generic/cache.h
76314@@ -6,7 +6,7 @@
76315 * cache lines need to provide their own cache.h.
76316 */
76317
76318-#define L1_CACHE_SHIFT 5
76319-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
76320+#define L1_CACHE_SHIFT 5UL
76321+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
76322
76323 #endif /* __ASM_GENERIC_CACHE_H */
76324diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
76325index 0d68a1e..b74a761 100644
76326--- a/include/asm-generic/emergency-restart.h
76327+++ b/include/asm-generic/emergency-restart.h
76328@@ -1,7 +1,7 @@
76329 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
76330 #define _ASM_GENERIC_EMERGENCY_RESTART_H
76331
76332-static inline void machine_emergency_restart(void)
76333+static inline __noreturn void machine_emergency_restart(void)
76334 {
76335 machine_restart(NULL);
76336 }
76337diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
76338index 90f99c7..00ce236 100644
76339--- a/include/asm-generic/kmap_types.h
76340+++ b/include/asm-generic/kmap_types.h
76341@@ -2,9 +2,9 @@
76342 #define _ASM_GENERIC_KMAP_TYPES_H
76343
76344 #ifdef __WITH_KM_FENCE
76345-# define KM_TYPE_NR 41
76346+# define KM_TYPE_NR 42
76347 #else
76348-# define KM_TYPE_NR 20
76349+# define KM_TYPE_NR 21
76350 #endif
76351
76352 #endif
76353diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
76354index 9ceb03b..62b0b8f 100644
76355--- a/include/asm-generic/local.h
76356+++ b/include/asm-generic/local.h
76357@@ -23,24 +23,37 @@ typedef struct
76358 atomic_long_t a;
76359 } local_t;
76360
76361+typedef struct {
76362+ atomic_long_unchecked_t a;
76363+} local_unchecked_t;
76364+
76365 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
76366
76367 #define local_read(l) atomic_long_read(&(l)->a)
76368+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
76369 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
76370+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
76371 #define local_inc(l) atomic_long_inc(&(l)->a)
76372+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
76373 #define local_dec(l) atomic_long_dec(&(l)->a)
76374+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
76375 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
76376+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
76377 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
76378+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
76379
76380 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
76381 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
76382 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
76383 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
76384 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
76385+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
76386 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
76387 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
76388+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
76389
76390 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
76391+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
76392 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
76393 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
76394 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
76395diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
76396index 725612b..9cc513a 100644
76397--- a/include/asm-generic/pgtable-nopmd.h
76398+++ b/include/asm-generic/pgtable-nopmd.h
76399@@ -1,14 +1,19 @@
76400 #ifndef _PGTABLE_NOPMD_H
76401 #define _PGTABLE_NOPMD_H
76402
76403-#ifndef __ASSEMBLY__
76404-
76405 #include <asm-generic/pgtable-nopud.h>
76406
76407-struct mm_struct;
76408-
76409 #define __PAGETABLE_PMD_FOLDED
76410
76411+#define PMD_SHIFT PUD_SHIFT
76412+#define PTRS_PER_PMD 1
76413+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
76414+#define PMD_MASK (~(PMD_SIZE-1))
76415+
76416+#ifndef __ASSEMBLY__
76417+
76418+struct mm_struct;
76419+
76420 /*
76421 * Having the pmd type consist of a pud gets the size right, and allows
76422 * us to conceptually access the pud entry that this pmd is folded into
76423@@ -16,11 +21,6 @@ struct mm_struct;
76424 */
76425 typedef struct { pud_t pud; } pmd_t;
76426
76427-#define PMD_SHIFT PUD_SHIFT
76428-#define PTRS_PER_PMD 1
76429-#define PMD_SIZE (1UL << PMD_SHIFT)
76430-#define PMD_MASK (~(PMD_SIZE-1))
76431-
76432 /*
76433 * The "pud_xxx()" functions here are trivial for a folded two-level
76434 * setup: the pmd is never bad, and a pmd always exists (as it's folded
76435diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
76436index 810431d..0ec4804f 100644
76437--- a/include/asm-generic/pgtable-nopud.h
76438+++ b/include/asm-generic/pgtable-nopud.h
76439@@ -1,10 +1,15 @@
76440 #ifndef _PGTABLE_NOPUD_H
76441 #define _PGTABLE_NOPUD_H
76442
76443-#ifndef __ASSEMBLY__
76444-
76445 #define __PAGETABLE_PUD_FOLDED
76446
76447+#define PUD_SHIFT PGDIR_SHIFT
76448+#define PTRS_PER_PUD 1
76449+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
76450+#define PUD_MASK (~(PUD_SIZE-1))
76451+
76452+#ifndef __ASSEMBLY__
76453+
76454 /*
76455 * Having the pud type consist of a pgd gets the size right, and allows
76456 * us to conceptually access the pgd entry that this pud is folded into
76457@@ -12,11 +17,6 @@
76458 */
76459 typedef struct { pgd_t pgd; } pud_t;
76460
76461-#define PUD_SHIFT PGDIR_SHIFT
76462-#define PTRS_PER_PUD 1
76463-#define PUD_SIZE (1UL << PUD_SHIFT)
76464-#define PUD_MASK (~(PUD_SIZE-1))
76465-
76466 /*
76467 * The "pgd_xxx()" functions here are trivial for a folded two-level
76468 * setup: the pud is never bad, and a pud always exists (as it's folded
76469@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
76470 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
76471
76472 #define pgd_populate(mm, pgd, pud) do { } while (0)
76473+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
76474 /*
76475 * (puds are folded into pgds so this doesn't get actually called,
76476 * but the define is needed for a generic inline function.)
76477diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
76478index 8e4f41d..c5e9afd 100644
76479--- a/include/asm-generic/pgtable.h
76480+++ b/include/asm-generic/pgtable.h
76481@@ -748,6 +748,22 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
76482 }
76483 #endif /* CONFIG_NUMA_BALANCING */
76484
76485+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
76486+#ifdef CONFIG_PAX_KERNEXEC
76487+#error KERNEXEC requires pax_open_kernel
76488+#else
76489+static inline unsigned long pax_open_kernel(void) { return 0; }
76490+#endif
76491+#endif
76492+
76493+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
76494+#ifdef CONFIG_PAX_KERNEXEC
76495+#error KERNEXEC requires pax_close_kernel
76496+#else
76497+static inline unsigned long pax_close_kernel(void) { return 0; }
76498+#endif
76499+#endif
76500+
76501 #endif /* CONFIG_MMU */
76502
76503 #endif /* !__ASSEMBLY__ */
76504diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
76505index dc1269c..48a4f51 100644
76506--- a/include/asm-generic/uaccess.h
76507+++ b/include/asm-generic/uaccess.h
76508@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
76509 return __clear_user(to, n);
76510 }
76511
76512+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
76513+#ifdef CONFIG_PAX_MEMORY_UDEREF
76514+#error UDEREF requires pax_open_userland
76515+#else
76516+static inline unsigned long pax_open_userland(void) { return 0; }
76517+#endif
76518+#endif
76519+
76520+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
76521+#ifdef CONFIG_PAX_MEMORY_UDEREF
76522+#error UDEREF requires pax_close_userland
76523+#else
76524+static inline unsigned long pax_close_userland(void) { return 0; }
76525+#endif
76526+#endif
76527+
76528 #endif /* __ASM_GENERIC_UACCESS_H */
76529diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
76530index bc2121f..2f41f9a 100644
76531--- a/include/asm-generic/vmlinux.lds.h
76532+++ b/include/asm-generic/vmlinux.lds.h
76533@@ -232,6 +232,7 @@
76534 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
76535 VMLINUX_SYMBOL(__start_rodata) = .; \
76536 *(.rodata) *(.rodata.*) \
76537+ *(.data..read_only) \
76538 *(__vermagic) /* Kernel version magic */ \
76539 . = ALIGN(8); \
76540 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
76541@@ -716,17 +717,18 @@
76542 * section in the linker script will go there too. @phdr should have
76543 * a leading colon.
76544 *
76545- * Note that this macros defines __per_cpu_load as an absolute symbol.
76546+ * Note that this macros defines per_cpu_load as an absolute symbol.
76547 * If there is no need to put the percpu section at a predetermined
76548 * address, use PERCPU_SECTION.
76549 */
76550 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
76551- VMLINUX_SYMBOL(__per_cpu_load) = .; \
76552- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
76553+ per_cpu_load = .; \
76554+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
76555 - LOAD_OFFSET) { \
76556+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
76557 PERCPU_INPUT(cacheline) \
76558 } phdr \
76559- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
76560+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
76561
76562 /**
76563 * PERCPU_SECTION - define output section for percpu area, simple version
76564diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
76565index e73c19e..5b89e00 100644
76566--- a/include/crypto/algapi.h
76567+++ b/include/crypto/algapi.h
76568@@ -34,7 +34,7 @@ struct crypto_type {
76569 unsigned int maskclear;
76570 unsigned int maskset;
76571 unsigned int tfmsize;
76572-};
76573+} __do_const;
76574
76575 struct crypto_instance {
76576 struct crypto_alg alg;
76577diff --git a/include/drm/drmP.h b/include/drm/drmP.h
76578index 1d4a920..da65658 100644
76579--- a/include/drm/drmP.h
76580+++ b/include/drm/drmP.h
76581@@ -66,6 +66,7 @@
76582 #include <linux/workqueue.h>
76583 #include <linux/poll.h>
76584 #include <asm/pgalloc.h>
76585+#include <asm/local.h>
76586 #include <drm/drm.h>
76587 #include <drm/drm_sarea.h>
76588 #include <drm/drm_vma_manager.h>
76589@@ -278,10 +279,12 @@ do { \
76590 * \param cmd command.
76591 * \param arg argument.
76592 */
76593-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
76594+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
76595+ struct drm_file *file_priv);
76596+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
76597 struct drm_file *file_priv);
76598
76599-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
76600+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
76601 unsigned long arg);
76602
76603 #define DRM_IOCTL_NR(n) _IOC_NR(n)
76604@@ -297,10 +300,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
76605 struct drm_ioctl_desc {
76606 unsigned int cmd;
76607 int flags;
76608- drm_ioctl_t *func;
76609+ drm_ioctl_t func;
76610 unsigned int cmd_drv;
76611 const char *name;
76612-};
76613+} __do_const;
76614
76615 /**
76616 * Creates a driver or general drm_ioctl_desc array entry for the given
76617@@ -1013,7 +1016,8 @@ struct drm_info_list {
76618 int (*show)(struct seq_file*, void*); /** show callback */
76619 u32 driver_features; /**< Required driver features for this entry */
76620 void *data;
76621-};
76622+} __do_const;
76623+typedef struct drm_info_list __no_const drm_info_list_no_const;
76624
76625 /**
76626 * debugfs node structure. This structure represents a debugfs file.
76627@@ -1097,7 +1101,7 @@ struct drm_device {
76628
76629 /** \name Usage Counters */
76630 /*@{ */
76631- int open_count; /**< Outstanding files open */
76632+ local_t open_count; /**< Outstanding files open */
76633 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
76634 atomic_t vma_count; /**< Outstanding vma areas open */
76635 int buf_use; /**< Buffers in use -- cannot alloc */
76636diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
76637index ef6ad3a..be34b16 100644
76638--- a/include/drm/drm_crtc_helper.h
76639+++ b/include/drm/drm_crtc_helper.h
76640@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
76641 struct drm_connector *connector);
76642 /* disable encoder when not in use - more explicit than dpms off */
76643 void (*disable)(struct drm_encoder *encoder);
76644-};
76645+} __no_const;
76646
76647 /**
76648 * drm_connector_helper_funcs - helper operations for connectors
76649diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
76650index 940ece4..8cb727f 100644
76651--- a/include/drm/i915_pciids.h
76652+++ b/include/drm/i915_pciids.h
76653@@ -37,7 +37,7 @@
76654 */
76655 #define INTEL_VGA_DEVICE(id, info) { \
76656 0x8086, id, \
76657- ~0, ~0, \
76658+ PCI_ANY_ID, PCI_ANY_ID, \
76659 0x030000, 0xff0000, \
76660 (unsigned long) info }
76661
76662diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
76663index 72dcbe8..8db58d7 100644
76664--- a/include/drm/ttm/ttm_memory.h
76665+++ b/include/drm/ttm/ttm_memory.h
76666@@ -48,7 +48,7 @@
76667
76668 struct ttm_mem_shrink {
76669 int (*do_shrink) (struct ttm_mem_shrink *);
76670-};
76671+} __no_const;
76672
76673 /**
76674 * struct ttm_mem_global - Global memory accounting structure.
76675diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
76676index d1f61bf..2239439 100644
76677--- a/include/drm/ttm/ttm_page_alloc.h
76678+++ b/include/drm/ttm/ttm_page_alloc.h
76679@@ -78,6 +78,7 @@ void ttm_dma_page_alloc_fini(void);
76680 */
76681 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
76682
76683+struct device;
76684 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
76685 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
76686
76687diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
76688index 4b840e8..155d235 100644
76689--- a/include/keys/asymmetric-subtype.h
76690+++ b/include/keys/asymmetric-subtype.h
76691@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
76692 /* Verify the signature on a key of this subtype (optional) */
76693 int (*verify_signature)(const struct key *key,
76694 const struct public_key_signature *sig);
76695-};
76696+} __do_const;
76697
76698 /**
76699 * asymmetric_key_subtype - Get the subtype from an asymmetric key
76700diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
76701index c1da539..1dcec55 100644
76702--- a/include/linux/atmdev.h
76703+++ b/include/linux/atmdev.h
76704@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
76705 #endif
76706
76707 struct k_atm_aal_stats {
76708-#define __HANDLE_ITEM(i) atomic_t i
76709+#define __HANDLE_ITEM(i) atomic_unchecked_t i
76710 __AAL_STAT_ITEMS
76711 #undef __HANDLE_ITEM
76712 };
76713@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
76714 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
76715 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
76716 struct module *owner;
76717-};
76718+} __do_const ;
76719
76720 struct atmphy_ops {
76721 int (*start)(struct atm_dev *dev);
76722diff --git a/include/linux/audit.h b/include/linux/audit.h
76723index bf1ef22..2a55e1b 100644
76724--- a/include/linux/audit.h
76725+++ b/include/linux/audit.h
76726@@ -195,7 +195,7 @@ static inline void audit_ptrace(struct task_struct *t)
76727 extern unsigned int audit_serial(void);
76728 extern int auditsc_get_stamp(struct audit_context *ctx,
76729 struct timespec *t, unsigned int *serial);
76730-extern int audit_set_loginuid(kuid_t loginuid);
76731+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
76732
76733 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
76734 {
76735diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
76736index fd8bf32..2cccd5a 100644
76737--- a/include/linux/binfmts.h
76738+++ b/include/linux/binfmts.h
76739@@ -74,8 +74,10 @@ struct linux_binfmt {
76740 int (*load_binary)(struct linux_binprm *);
76741 int (*load_shlib)(struct file *);
76742 int (*core_dump)(struct coredump_params *cprm);
76743+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
76744+ void (*handle_mmap)(struct file *);
76745 unsigned long min_coredump; /* minimal dump size */
76746-};
76747+} __do_const;
76748
76749 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
76750
76751diff --git a/include/linux/bitops.h b/include/linux/bitops.h
76752index abc9ca7..e54ee27 100644
76753--- a/include/linux/bitops.h
76754+++ b/include/linux/bitops.h
76755@@ -102,7 +102,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
76756 * @word: value to rotate
76757 * @shift: bits to roll
76758 */
76759-static inline __u32 rol32(__u32 word, unsigned int shift)
76760+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
76761 {
76762 return (word << shift) | (word >> (32 - shift));
76763 }
76764@@ -112,7 +112,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
76765 * @word: value to rotate
76766 * @shift: bits to roll
76767 */
76768-static inline __u32 ror32(__u32 word, unsigned int shift)
76769+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
76770 {
76771 return (word >> shift) | (word << (32 - shift));
76772 }
76773@@ -168,7 +168,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
76774 return (__s32)(value << shift) >> shift;
76775 }
76776
76777-static inline unsigned fls_long(unsigned long l)
76778+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
76779 {
76780 if (sizeof(l) == 4)
76781 return fls(l);
76782diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
76783index 1b135d4..59fc876 100644
76784--- a/include/linux/blkdev.h
76785+++ b/include/linux/blkdev.h
76786@@ -1578,7 +1578,7 @@ struct block_device_operations {
76787 /* this callback is with swap_lock and sometimes page table lock held */
76788 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
76789 struct module *owner;
76790-};
76791+} __do_const;
76792
76793 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
76794 unsigned long);
76795diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
76796index afc1343..9735539 100644
76797--- a/include/linux/blktrace_api.h
76798+++ b/include/linux/blktrace_api.h
76799@@ -25,7 +25,7 @@ struct blk_trace {
76800 struct dentry *dropped_file;
76801 struct dentry *msg_file;
76802 struct list_head running_list;
76803- atomic_t dropped;
76804+ atomic_unchecked_t dropped;
76805 };
76806
76807 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
76808diff --git a/include/linux/cache.h b/include/linux/cache.h
76809index 4c57065..40346da 100644
76810--- a/include/linux/cache.h
76811+++ b/include/linux/cache.h
76812@@ -16,6 +16,14 @@
76813 #define __read_mostly
76814 #endif
76815
76816+#ifndef __read_only
76817+#ifdef CONFIG_PAX_KERNEXEC
76818+#error KERNEXEC requires __read_only
76819+#else
76820+#define __read_only __read_mostly
76821+#endif
76822+#endif
76823+
76824 #ifndef ____cacheline_aligned
76825 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
76826 #endif
76827diff --git a/include/linux/capability.h b/include/linux/capability.h
76828index a6ee1f9..e1ca49d 100644
76829--- a/include/linux/capability.h
76830+++ b/include/linux/capability.h
76831@@ -212,8 +212,13 @@ extern bool capable(int cap);
76832 extern bool ns_capable(struct user_namespace *ns, int cap);
76833 extern bool inode_capable(const struct inode *inode, int cap);
76834 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
76835+extern bool capable_nolog(int cap);
76836+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
76837+extern bool inode_capable_nolog(const struct inode *inode, int cap);
76838
76839 /* audit system wants to get cap info from files as well */
76840 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
76841
76842+extern int is_privileged_binary(const struct dentry *dentry);
76843+
76844 #endif /* !_LINUX_CAPABILITY_H */
76845diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
76846index 8609d57..86e4d79 100644
76847--- a/include/linux/cdrom.h
76848+++ b/include/linux/cdrom.h
76849@@ -87,7 +87,6 @@ struct cdrom_device_ops {
76850
76851 /* driver specifications */
76852 const int capability; /* capability flags */
76853- int n_minors; /* number of active minor devices */
76854 /* handle uniform packets for scsi type devices (scsi,atapi) */
76855 int (*generic_packet) (struct cdrom_device_info *,
76856 struct packet_command *);
76857diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
76858index 4ce9056..86caac6 100644
76859--- a/include/linux/cleancache.h
76860+++ b/include/linux/cleancache.h
76861@@ -31,7 +31,7 @@ struct cleancache_ops {
76862 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
76863 void (*invalidate_inode)(int, struct cleancache_filekey);
76864 void (*invalidate_fs)(int);
76865-};
76866+} __no_const;
76867
76868 extern struct cleancache_ops *
76869 cleancache_register_ops(struct cleancache_ops *ops);
76870diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
76871index 7e59253..d6e4cae 100644
76872--- a/include/linux/clk-provider.h
76873+++ b/include/linux/clk-provider.h
76874@@ -141,6 +141,7 @@ struct clk_ops {
76875 unsigned long);
76876 void (*init)(struct clk_hw *hw);
76877 };
76878+typedef struct clk_ops __no_const clk_ops_no_const;
76879
76880 /**
76881 * struct clk_init_data - holds init data that's common to all clocks and is
76882diff --git a/include/linux/compat.h b/include/linux/compat.h
76883index 19f6003..90b64f4 100644
76884--- a/include/linux/compat.h
76885+++ b/include/linux/compat.h
76886@@ -313,7 +313,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
76887 compat_size_t __user *len_ptr);
76888
76889 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
76890-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
76891+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
76892 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
76893 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
76894 compat_ssize_t msgsz, int msgflg);
76895@@ -420,7 +420,7 @@ extern int compat_ptrace_request(struct task_struct *child,
76896 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
76897 compat_ulong_t addr, compat_ulong_t data);
76898 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
76899- compat_long_t addr, compat_long_t data);
76900+ compat_ulong_t addr, compat_ulong_t data);
76901
76902 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
76903 /*
76904diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
76905index 2507fd2..55203f8 100644
76906--- a/include/linux/compiler-gcc4.h
76907+++ b/include/linux/compiler-gcc4.h
76908@@ -39,9 +39,34 @@
76909 # define __compiletime_warning(message) __attribute__((warning(message)))
76910 # define __compiletime_error(message) __attribute__((error(message)))
76911 #endif /* __CHECKER__ */
76912+
76913+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
76914+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
76915+#define __bos0(ptr) __bos((ptr), 0)
76916+#define __bos1(ptr) __bos((ptr), 1)
76917 #endif /* GCC_VERSION >= 40300 */
76918
76919 #if GCC_VERSION >= 40500
76920+
76921+#ifdef RANDSTRUCT_PLUGIN
76922+#define __randomize_layout __attribute__((randomize_layout))
76923+#define __no_randomize_layout __attribute__((no_randomize_layout))
76924+#endif
76925+
76926+#ifdef CONSTIFY_PLUGIN
76927+#define __no_const __attribute__((no_const))
76928+#define __do_const __attribute__((do_const))
76929+#endif
76930+
76931+#ifdef SIZE_OVERFLOW_PLUGIN
76932+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
76933+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
76934+#endif
76935+
76936+#ifdef LATENT_ENTROPY_PLUGIN
76937+#define __latent_entropy __attribute__((latent_entropy))
76938+#endif
76939+
76940 /*
76941 * Mark a position in code as unreachable. This can be used to
76942 * suppress control flow warnings after asm blocks that transfer
76943diff --git a/include/linux/compiler.h b/include/linux/compiler.h
76944index 92669cd..cc564c0 100644
76945--- a/include/linux/compiler.h
76946+++ b/include/linux/compiler.h
76947@@ -5,11 +5,14 @@
76948
76949 #ifdef __CHECKER__
76950 # define __user __attribute__((noderef, address_space(1)))
76951+# define __force_user __force __user
76952 # define __kernel __attribute__((address_space(0)))
76953+# define __force_kernel __force __kernel
76954 # define __safe __attribute__((safe))
76955 # define __force __attribute__((force))
76956 # define __nocast __attribute__((nocast))
76957 # define __iomem __attribute__((noderef, address_space(2)))
76958+# define __force_iomem __force __iomem
76959 # define __must_hold(x) __attribute__((context(x,1,1)))
76960 # define __acquires(x) __attribute__((context(x,0,1)))
76961 # define __releases(x) __attribute__((context(x,1,0)))
76962@@ -17,20 +20,37 @@
76963 # define __release(x) __context__(x,-1)
76964 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
76965 # define __percpu __attribute__((noderef, address_space(3)))
76966+# define __force_percpu __force __percpu
76967 #ifdef CONFIG_SPARSE_RCU_POINTER
76968 # define __rcu __attribute__((noderef, address_space(4)))
76969+# define __force_rcu __force __rcu
76970 #else
76971 # define __rcu
76972+# define __force_rcu
76973 #endif
76974 extern void __chk_user_ptr(const volatile void __user *);
76975 extern void __chk_io_ptr(const volatile void __iomem *);
76976 #else
76977-# define __user
76978-# define __kernel
76979+# ifdef CHECKER_PLUGIN
76980+//# define __user
76981+//# define __force_user
76982+//# define __kernel
76983+//# define __force_kernel
76984+# else
76985+# ifdef STRUCTLEAK_PLUGIN
76986+# define __user __attribute__((user))
76987+# else
76988+# define __user
76989+# endif
76990+# define __force_user
76991+# define __kernel
76992+# define __force_kernel
76993+# endif
76994 # define __safe
76995 # define __force
76996 # define __nocast
76997 # define __iomem
76998+# define __force_iomem
76999 # define __chk_user_ptr(x) (void)0
77000 # define __chk_io_ptr(x) (void)0
77001 # define __builtin_warning(x, y...) (1)
77002@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
77003 # define __release(x) (void)0
77004 # define __cond_lock(x,c) (c)
77005 # define __percpu
77006+# define __force_percpu
77007 # define __rcu
77008+# define __force_rcu
77009 #endif
77010
77011 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
77012@@ -275,6 +297,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
77013 # define __attribute_const__ /* unimplemented */
77014 #endif
77015
77016+#ifndef __randomize_layout
77017+# define __randomize_layout
77018+#endif
77019+
77020+#ifndef __no_randomize_layout
77021+# define __no_randomize_layout
77022+#endif
77023+
77024+#ifndef __no_const
77025+# define __no_const
77026+#endif
77027+
77028+#ifndef __do_const
77029+# define __do_const
77030+#endif
77031+
77032+#ifndef __size_overflow
77033+# define __size_overflow(...)
77034+#endif
77035+
77036+#ifndef __intentional_overflow
77037+# define __intentional_overflow(...)
77038+#endif
77039+
77040+#ifndef __latent_entropy
77041+# define __latent_entropy
77042+#endif
77043+
77044 /*
77045 * Tell gcc if a function is cold. The compiler will assume any path
77046 * directly leading to the call is unlikely.
77047@@ -284,6 +334,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
77048 #define __cold
77049 #endif
77050
77051+#ifndef __alloc_size
77052+#define __alloc_size(...)
77053+#endif
77054+
77055+#ifndef __bos
77056+#define __bos(ptr, arg)
77057+#endif
77058+
77059+#ifndef __bos0
77060+#define __bos0(ptr)
77061+#endif
77062+
77063+#ifndef __bos1
77064+#define __bos1(ptr)
77065+#endif
77066+
77067 /* Simple shorthand for a section definition */
77068 #ifndef __section
77069 # define __section(S) __attribute__ ((__section__(#S)))
77070@@ -349,7 +415,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
77071 * use is to mediate communication between process-level code and irq/NMI
77072 * handlers, all running on the same CPU.
77073 */
77074-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
77075+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
77076+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
77077
77078 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
77079 #ifdef CONFIG_KPROBES
77080diff --git a/include/linux/completion.h b/include/linux/completion.h
77081index 5d5aaae..0ea9b84 100644
77082--- a/include/linux/completion.h
77083+++ b/include/linux/completion.h
77084@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
77085
77086 extern void wait_for_completion(struct completion *);
77087 extern void wait_for_completion_io(struct completion *);
77088-extern int wait_for_completion_interruptible(struct completion *x);
77089-extern int wait_for_completion_killable(struct completion *x);
77090+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
77091+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
77092 extern unsigned long wait_for_completion_timeout(struct completion *x,
77093- unsigned long timeout);
77094+ unsigned long timeout) __intentional_overflow(-1);
77095 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
77096- unsigned long timeout);
77097+ unsigned long timeout) __intentional_overflow(-1);
77098 extern long wait_for_completion_interruptible_timeout(
77099- struct completion *x, unsigned long timeout);
77100+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
77101 extern long wait_for_completion_killable_timeout(
77102- struct completion *x, unsigned long timeout);
77103+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
77104 extern bool try_wait_for_completion(struct completion *x);
77105 extern bool completion_done(struct completion *x);
77106
77107diff --git a/include/linux/configfs.h b/include/linux/configfs.h
77108index 34025df..d94bbbc 100644
77109--- a/include/linux/configfs.h
77110+++ b/include/linux/configfs.h
77111@@ -125,7 +125,7 @@ struct configfs_attribute {
77112 const char *ca_name;
77113 struct module *ca_owner;
77114 umode_t ca_mode;
77115-};
77116+} __do_const;
77117
77118 /*
77119 * Users often need to create attribute structures for their configurable
77120diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
77121index dc196bb..c55a50f 100644
77122--- a/include/linux/cpufreq.h
77123+++ b/include/linux/cpufreq.h
77124@@ -189,6 +189,7 @@ struct global_attr {
77125 ssize_t (*store)(struct kobject *a, struct attribute *b,
77126 const char *c, size_t count);
77127 };
77128+typedef struct global_attr __no_const global_attr_no_const;
77129
77130 #define define_one_global_ro(_name) \
77131 static struct global_attr _name = \
77132@@ -225,7 +226,7 @@ struct cpufreq_driver {
77133 int (*suspend) (struct cpufreq_policy *policy);
77134 int (*resume) (struct cpufreq_policy *policy);
77135 struct freq_attr **attr;
77136-};
77137+} __do_const;
77138
77139 /* flags */
77140 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
77141diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
77142index 50fcbb0..9d2dbd9 100644
77143--- a/include/linux/cpuidle.h
77144+++ b/include/linux/cpuidle.h
77145@@ -50,7 +50,8 @@ struct cpuidle_state {
77146 int index);
77147
77148 int (*enter_dead) (struct cpuidle_device *dev, int index);
77149-};
77150+} __do_const;
77151+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
77152
77153 /* Idle State Flags */
77154 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
77155@@ -192,7 +193,7 @@ struct cpuidle_governor {
77156 void (*reflect) (struct cpuidle_device *dev, int index);
77157
77158 struct module *owner;
77159-};
77160+} __do_const;
77161
77162 #ifdef CONFIG_CPU_IDLE
77163 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
77164diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
77165index d08e4d2..95fad61 100644
77166--- a/include/linux/cpumask.h
77167+++ b/include/linux/cpumask.h
77168@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
77169 }
77170
77171 /* Valid inputs for n are -1 and 0. */
77172-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
77173+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
77174 {
77175 return n+1;
77176 }
77177
77178-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
77179+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
77180 {
77181 return n+1;
77182 }
77183
77184-static inline unsigned int cpumask_next_and(int n,
77185+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
77186 const struct cpumask *srcp,
77187 const struct cpumask *andp)
77188 {
77189@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
77190 *
77191 * Returns >= nr_cpu_ids if no further cpus set.
77192 */
77193-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
77194+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
77195 {
77196 /* -1 is a legal arg here. */
77197 if (n != -1)
77198@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
77199 *
77200 * Returns >= nr_cpu_ids if no further cpus unset.
77201 */
77202-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
77203+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
77204 {
77205 /* -1 is a legal arg here. */
77206 if (n != -1)
77207@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
77208 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
77209 }
77210
77211-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
77212+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
77213 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
77214
77215 /**
77216diff --git a/include/linux/cred.h b/include/linux/cred.h
77217index 04421e8..117e17a 100644
77218--- a/include/linux/cred.h
77219+++ b/include/linux/cred.h
77220@@ -35,7 +35,7 @@ struct group_info {
77221 int nblocks;
77222 kgid_t small_block[NGROUPS_SMALL];
77223 kgid_t *blocks[0];
77224-};
77225+} __randomize_layout;
77226
77227 /**
77228 * get_group_info - Get a reference to a group info structure
77229@@ -136,7 +136,7 @@ struct cred {
77230 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
77231 struct group_info *group_info; /* supplementary groups for euid/fsgid */
77232 struct rcu_head rcu; /* RCU deletion hook */
77233-};
77234+} __randomize_layout;
77235
77236 extern void __put_cred(struct cred *);
77237 extern void exit_creds(struct task_struct *);
77238@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
77239 static inline void validate_process_creds(void)
77240 {
77241 }
77242+static inline void validate_task_creds(struct task_struct *task)
77243+{
77244+}
77245 #endif
77246
77247 /**
77248diff --git a/include/linux/crypto.h b/include/linux/crypto.h
77249index b92eadf..b4ecdc1 100644
77250--- a/include/linux/crypto.h
77251+++ b/include/linux/crypto.h
77252@@ -373,7 +373,7 @@ struct cipher_tfm {
77253 const u8 *key, unsigned int keylen);
77254 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
77255 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
77256-};
77257+} __no_const;
77258
77259 struct hash_tfm {
77260 int (*init)(struct hash_desc *desc);
77261@@ -394,13 +394,13 @@ struct compress_tfm {
77262 int (*cot_decompress)(struct crypto_tfm *tfm,
77263 const u8 *src, unsigned int slen,
77264 u8 *dst, unsigned int *dlen);
77265-};
77266+} __no_const;
77267
77268 struct rng_tfm {
77269 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
77270 unsigned int dlen);
77271 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
77272-};
77273+} __no_const;
77274
77275 #define crt_ablkcipher crt_u.ablkcipher
77276 #define crt_aead crt_u.aead
77277diff --git a/include/linux/ctype.h b/include/linux/ctype.h
77278index 653589e..4ef254a 100644
77279--- a/include/linux/ctype.h
77280+++ b/include/linux/ctype.h
77281@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
77282 * Fast implementation of tolower() for internal usage. Do not use in your
77283 * code.
77284 */
77285-static inline char _tolower(const char c)
77286+static inline unsigned char _tolower(const unsigned char c)
77287 {
77288 return c | 0x20;
77289 }
77290diff --git a/include/linux/dcache.h b/include/linux/dcache.h
77291index bf72e9a..4ca7927 100644
77292--- a/include/linux/dcache.h
77293+++ b/include/linux/dcache.h
77294@@ -133,7 +133,7 @@ struct dentry {
77295 } d_u;
77296 struct list_head d_subdirs; /* our children */
77297 struct hlist_node d_alias; /* inode alias list */
77298-};
77299+} __randomize_layout;
77300
77301 /*
77302 * dentry->d_lock spinlock nesting subclasses:
77303diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
77304index 7925bf0..d5143d2 100644
77305--- a/include/linux/decompress/mm.h
77306+++ b/include/linux/decompress/mm.h
77307@@ -77,7 +77,7 @@ static void free(void *where)
77308 * warnings when not needed (indeed large_malloc / large_free are not
77309 * needed by inflate */
77310
77311-#define malloc(a) kmalloc(a, GFP_KERNEL)
77312+#define malloc(a) kmalloc((a), GFP_KERNEL)
77313 #define free(a) kfree(a)
77314
77315 #define large_malloc(a) vmalloc(a)
77316diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
77317index d48dc00..211ee54 100644
77318--- a/include/linux/devfreq.h
77319+++ b/include/linux/devfreq.h
77320@@ -114,7 +114,7 @@ struct devfreq_governor {
77321 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
77322 int (*event_handler)(struct devfreq *devfreq,
77323 unsigned int event, void *data);
77324-};
77325+} __do_const;
77326
77327 /**
77328 * struct devfreq - Device devfreq structure
77329diff --git a/include/linux/device.h b/include/linux/device.h
77330index 952b010..d5b7691 100644
77331--- a/include/linux/device.h
77332+++ b/include/linux/device.h
77333@@ -310,7 +310,7 @@ struct subsys_interface {
77334 struct list_head node;
77335 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
77336 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
77337-};
77338+} __do_const;
77339
77340 int subsys_interface_register(struct subsys_interface *sif);
77341 void subsys_interface_unregister(struct subsys_interface *sif);
77342@@ -506,7 +506,7 @@ struct device_type {
77343 void (*release)(struct device *dev);
77344
77345 const struct dev_pm_ops *pm;
77346-};
77347+} __do_const;
77348
77349 /* interface for exporting device attributes */
77350 struct device_attribute {
77351@@ -516,11 +516,12 @@ struct device_attribute {
77352 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
77353 const char *buf, size_t count);
77354 };
77355+typedef struct device_attribute __no_const device_attribute_no_const;
77356
77357 struct dev_ext_attribute {
77358 struct device_attribute attr;
77359 void *var;
77360-};
77361+} __do_const;
77362
77363 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
77364 char *buf);
77365diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
77366index fd4aee2..1f28db9 100644
77367--- a/include/linux/dma-mapping.h
77368+++ b/include/linux/dma-mapping.h
77369@@ -54,7 +54,7 @@ struct dma_map_ops {
77370 u64 (*get_required_mask)(struct device *dev);
77371 #endif
77372 int is_phys;
77373-};
77374+} __do_const;
77375
77376 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
77377
77378diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
77379index 41cf0c3..f3b771c 100644
77380--- a/include/linux/dmaengine.h
77381+++ b/include/linux/dmaengine.h
77382@@ -1114,9 +1114,9 @@ struct dma_pinned_list {
77383 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
77384 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
77385
77386-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
77387+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
77388 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
77389-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
77390+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
77391 struct dma_pinned_list *pinned_list, struct page *page,
77392 unsigned int offset, size_t len);
77393
77394diff --git a/include/linux/efi.h b/include/linux/efi.h
77395index 11ce678..7b8c69c 100644
77396--- a/include/linux/efi.h
77397+++ b/include/linux/efi.h
77398@@ -764,6 +764,7 @@ struct efivar_operations {
77399 efi_set_variable_t *set_variable;
77400 efi_query_variable_store_t *query_variable_store;
77401 };
77402+typedef struct efivar_operations __no_const efivar_operations_no_const;
77403
77404 struct efivars {
77405 /*
77406diff --git a/include/linux/elf.h b/include/linux/elf.h
77407index 67a5fa7..b817372 100644
77408--- a/include/linux/elf.h
77409+++ b/include/linux/elf.h
77410@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
77411 #define elf_note elf32_note
77412 #define elf_addr_t Elf32_Off
77413 #define Elf_Half Elf32_Half
77414+#define elf_dyn Elf32_Dyn
77415
77416 #else
77417
77418@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
77419 #define elf_note elf64_note
77420 #define elf_addr_t Elf64_Off
77421 #define Elf_Half Elf64_Half
77422+#define elf_dyn Elf64_Dyn
77423
77424 #endif
77425
77426diff --git a/include/linux/err.h b/include/linux/err.h
77427index 15f92e0..e825a8e 100644
77428--- a/include/linux/err.h
77429+++ b/include/linux/err.h
77430@@ -19,12 +19,12 @@
77431
77432 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
77433
77434-static inline void * __must_check ERR_PTR(long error)
77435+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
77436 {
77437 return (void *) error;
77438 }
77439
77440-static inline long __must_check PTR_ERR(__force const void *ptr)
77441+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
77442 {
77443 return (long) ptr;
77444 }
77445diff --git a/include/linux/extcon.h b/include/linux/extcon.h
77446index 21c59af..6057a03 100644
77447--- a/include/linux/extcon.h
77448+++ b/include/linux/extcon.h
77449@@ -135,7 +135,7 @@ struct extcon_dev {
77450 /* /sys/class/extcon/.../mutually_exclusive/... */
77451 struct attribute_group attr_g_muex;
77452 struct attribute **attrs_muex;
77453- struct device_attribute *d_attrs_muex;
77454+ device_attribute_no_const *d_attrs_muex;
77455 };
77456
77457 /**
77458diff --git a/include/linux/fb.h b/include/linux/fb.h
77459index 70c4836..ff3daec 100644
77460--- a/include/linux/fb.h
77461+++ b/include/linux/fb.h
77462@@ -304,7 +304,7 @@ struct fb_ops {
77463 /* called at KDB enter and leave time to prepare the console */
77464 int (*fb_debug_enter)(struct fb_info *info);
77465 int (*fb_debug_leave)(struct fb_info *info);
77466-};
77467+} __do_const;
77468
77469 #ifdef CONFIG_FB_TILEBLITTING
77470 #define FB_TILE_CURSOR_NONE 0
77471diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
77472index 085197b..0fa6f0b 100644
77473--- a/include/linux/fdtable.h
77474+++ b/include/linux/fdtable.h
77475@@ -95,7 +95,7 @@ struct files_struct *get_files_struct(struct task_struct *);
77476 void put_files_struct(struct files_struct *fs);
77477 void reset_files_struct(struct files_struct *);
77478 int unshare_files(struct files_struct **);
77479-struct files_struct *dup_fd(struct files_struct *, int *);
77480+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
77481 void do_close_on_exec(struct files_struct *);
77482 int iterate_fd(struct files_struct *, unsigned,
77483 int (*)(const void *, struct file *, unsigned),
77484diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
77485index 8293262..2b3b8bd 100644
77486--- a/include/linux/frontswap.h
77487+++ b/include/linux/frontswap.h
77488@@ -11,7 +11,7 @@ struct frontswap_ops {
77489 int (*load)(unsigned, pgoff_t, struct page *);
77490 void (*invalidate_page)(unsigned, pgoff_t);
77491 void (*invalidate_area)(unsigned);
77492-};
77493+} __no_const;
77494
77495 extern bool frontswap_enabled;
77496 extern struct frontswap_ops *
77497diff --git a/include/linux/fs.h b/include/linux/fs.h
77498index 121f11f..0f2a863 100644
77499--- a/include/linux/fs.h
77500+++ b/include/linux/fs.h
77501@@ -423,7 +423,7 @@ struct address_space {
77502 spinlock_t private_lock; /* for use by the address_space */
77503 struct list_head private_list; /* ditto */
77504 void *private_data; /* ditto */
77505-} __attribute__((aligned(sizeof(long))));
77506+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
77507 /*
77508 * On most architectures that alignment is already the case; but
77509 * must be enforced here for CRIS, to let the least significant bit
77510@@ -466,7 +466,7 @@ struct block_device {
77511 int bd_fsfreeze_count;
77512 /* Mutex for freeze */
77513 struct mutex bd_fsfreeze_mutex;
77514-};
77515+} __randomize_layout;
77516
77517 /*
77518 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
77519@@ -610,7 +610,7 @@ struct inode {
77520 atomic_t i_readcount; /* struct files open RO */
77521 #endif
77522 void *i_private; /* fs or device private pointer */
77523-};
77524+} __randomize_layout;
77525
77526 static inline int inode_unhashed(struct inode *inode)
77527 {
77528@@ -808,7 +808,7 @@ struct file {
77529 #ifdef CONFIG_DEBUG_WRITECOUNT
77530 unsigned long f_mnt_write_state;
77531 #endif
77532-};
77533+} __randomize_layout;
77534
77535 struct file_handle {
77536 __u32 handle_bytes;
77537@@ -978,7 +978,7 @@ struct file_lock {
77538 int state; /* state of grant or error if -ve */
77539 } afs;
77540 } fl_u;
77541-};
77542+} __randomize_layout;
77543
77544 /* The following constant reflects the upper bound of the file/locking space */
77545 #ifndef OFFSET_MAX
77546@@ -1325,7 +1325,7 @@ struct super_block {
77547 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
77548 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
77549 struct rcu_head rcu;
77550-};
77551+} __randomize_layout;
77552
77553 extern struct timespec current_fs_time(struct super_block *sb);
77554
77555@@ -1547,7 +1547,8 @@ struct file_operations {
77556 long (*fallocate)(struct file *file, int mode, loff_t offset,
77557 loff_t len);
77558 int (*show_fdinfo)(struct seq_file *m, struct file *f);
77559-};
77560+} __do_const __randomize_layout;
77561+typedef struct file_operations __no_const file_operations_no_const;
77562
77563 struct inode_operations {
77564 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
77565@@ -2808,4 +2809,14 @@ static inline bool dir_relax(struct inode *inode)
77566 return !IS_DEADDIR(inode);
77567 }
77568
77569+static inline bool is_sidechannel_device(const struct inode *inode)
77570+{
77571+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
77572+ umode_t mode = inode->i_mode;
77573+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
77574+#else
77575+ return false;
77576+#endif
77577+}
77578+
77579 #endif /* _LINUX_FS_H */
77580diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
77581index 0efc3e6..fd23610 100644
77582--- a/include/linux/fs_struct.h
77583+++ b/include/linux/fs_struct.h
77584@@ -6,13 +6,13 @@
77585 #include <linux/seqlock.h>
77586
77587 struct fs_struct {
77588- int users;
77589+ atomic_t users;
77590 spinlock_t lock;
77591 seqcount_t seq;
77592 int umask;
77593 int in_exec;
77594 struct path root, pwd;
77595-};
77596+} __randomize_layout;
77597
77598 extern struct kmem_cache *fs_cachep;
77599
77600diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
77601index 7714849..a4a5c7a 100644
77602--- a/include/linux/fscache-cache.h
77603+++ b/include/linux/fscache-cache.h
77604@@ -113,7 +113,7 @@ struct fscache_operation {
77605 fscache_operation_release_t release;
77606 };
77607
77608-extern atomic_t fscache_op_debug_id;
77609+extern atomic_unchecked_t fscache_op_debug_id;
77610 extern void fscache_op_work_func(struct work_struct *work);
77611
77612 extern void fscache_enqueue_operation(struct fscache_operation *);
77613@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
77614 INIT_WORK(&op->work, fscache_op_work_func);
77615 atomic_set(&op->usage, 1);
77616 op->state = FSCACHE_OP_ST_INITIALISED;
77617- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
77618+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
77619 op->processor = processor;
77620 op->release = release;
77621 INIT_LIST_HEAD(&op->pend_link);
77622diff --git a/include/linux/fscache.h b/include/linux/fscache.h
77623index 115bb81..e7b812b 100644
77624--- a/include/linux/fscache.h
77625+++ b/include/linux/fscache.h
77626@@ -152,7 +152,7 @@ struct fscache_cookie_def {
77627 * - this is mandatory for any object that may have data
77628 */
77629 void (*now_uncached)(void *cookie_netfs_data);
77630-};
77631+} __do_const;
77632
77633 /*
77634 * fscache cached network filesystem type
77635diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
77636index 1c804b0..1432c2b 100644
77637--- a/include/linux/fsnotify.h
77638+++ b/include/linux/fsnotify.h
77639@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
77640 struct inode *inode = file_inode(file);
77641 __u32 mask = FS_ACCESS;
77642
77643+ if (is_sidechannel_device(inode))
77644+ return;
77645+
77646 if (S_ISDIR(inode->i_mode))
77647 mask |= FS_ISDIR;
77648
77649@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
77650 struct inode *inode = file_inode(file);
77651 __u32 mask = FS_MODIFY;
77652
77653+ if (is_sidechannel_device(inode))
77654+ return;
77655+
77656 if (S_ISDIR(inode->i_mode))
77657 mask |= FS_ISDIR;
77658
77659@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
77660 */
77661 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
77662 {
77663- return kstrdup(name, GFP_KERNEL);
77664+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
77665 }
77666
77667 /*
77668diff --git a/include/linux/genhd.h b/include/linux/genhd.h
77669index 9f3c275..8bdff5d 100644
77670--- a/include/linux/genhd.h
77671+++ b/include/linux/genhd.h
77672@@ -194,7 +194,7 @@ struct gendisk {
77673 struct kobject *slave_dir;
77674
77675 struct timer_rand_state *random;
77676- atomic_t sync_io; /* RAID */
77677+ atomic_unchecked_t sync_io; /* RAID */
77678 struct disk_events *ev;
77679 #ifdef CONFIG_BLK_DEV_INTEGRITY
77680 struct blk_integrity *integrity;
77681@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
77682 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
77683
77684 /* drivers/char/random.c */
77685-extern void add_disk_randomness(struct gendisk *disk);
77686+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
77687 extern void rand_initialize_disk(struct gendisk *disk);
77688
77689 static inline sector_t get_start_sect(struct block_device *bdev)
77690diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
77691index c0894dd..2fbf10c 100644
77692--- a/include/linux/genl_magic_func.h
77693+++ b/include/linux/genl_magic_func.h
77694@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
77695 },
77696
77697 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
77698-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
77699+static struct genl_ops ZZZ_genl_ops[] = {
77700 #include GENL_MAGIC_INCLUDE_FILE
77701 };
77702
77703diff --git a/include/linux/gfp.h b/include/linux/gfp.h
77704index 9b4dd49..61fd41d 100644
77705--- a/include/linux/gfp.h
77706+++ b/include/linux/gfp.h
77707@@ -35,6 +35,13 @@ struct vm_area_struct;
77708 #define ___GFP_NO_KSWAPD 0x400000u
77709 #define ___GFP_OTHER_NODE 0x800000u
77710 #define ___GFP_WRITE 0x1000000u
77711+
77712+#ifdef CONFIG_PAX_USERCOPY_SLABS
77713+#define ___GFP_USERCOPY 0x2000000u
77714+#else
77715+#define ___GFP_USERCOPY 0
77716+#endif
77717+
77718 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
77719
77720 /*
77721@@ -92,6 +99,7 @@ struct vm_area_struct;
77722 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
77723 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
77724 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
77725+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
77726
77727 /*
77728 * This may seem redundant, but it's a way of annotating false positives vs.
77729@@ -99,7 +107,7 @@ struct vm_area_struct;
77730 */
77731 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
77732
77733-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
77734+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
77735 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
77736
77737 /* This equals 0, but use constants in case they ever change */
77738@@ -153,6 +161,8 @@ struct vm_area_struct;
77739 /* 4GB DMA on some platforms */
77740 #define GFP_DMA32 __GFP_DMA32
77741
77742+#define GFP_USERCOPY __GFP_USERCOPY
77743+
77744 /* Convert GFP flags to their corresponding migrate type */
77745 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
77746 {
77747diff --git a/include/linux/gracl.h b/include/linux/gracl.h
77748new file mode 100644
77749index 0000000..edb2cb6
77750--- /dev/null
77751+++ b/include/linux/gracl.h
77752@@ -0,0 +1,340 @@
77753+#ifndef GR_ACL_H
77754+#define GR_ACL_H
77755+
77756+#include <linux/grdefs.h>
77757+#include <linux/resource.h>
77758+#include <linux/capability.h>
77759+#include <linux/dcache.h>
77760+#include <asm/resource.h>
77761+
77762+/* Major status information */
77763+
77764+#define GR_VERSION "grsecurity 3.0"
77765+#define GRSECURITY_VERSION 0x3000
77766+
77767+enum {
77768+ GR_SHUTDOWN = 0,
77769+ GR_ENABLE = 1,
77770+ GR_SPROLE = 2,
77771+ GR_OLDRELOAD = 3,
77772+ GR_SEGVMOD = 4,
77773+ GR_STATUS = 5,
77774+ GR_UNSPROLE = 6,
77775+ GR_PASSSET = 7,
77776+ GR_SPROLEPAM = 8,
77777+ GR_RELOAD = 9,
77778+};
77779+
77780+/* Password setup definitions
77781+ * kernel/grhash.c */
77782+enum {
77783+ GR_PW_LEN = 128,
77784+ GR_SALT_LEN = 16,
77785+ GR_SHA_LEN = 32,
77786+};
77787+
77788+enum {
77789+ GR_SPROLE_LEN = 64,
77790+};
77791+
77792+enum {
77793+ GR_NO_GLOB = 0,
77794+ GR_REG_GLOB,
77795+ GR_CREATE_GLOB
77796+};
77797+
77798+#define GR_NLIMITS 32
77799+
77800+/* Begin Data Structures */
77801+
77802+struct sprole_pw {
77803+ unsigned char *rolename;
77804+ unsigned char salt[GR_SALT_LEN];
77805+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
77806+};
77807+
77808+struct name_entry {
77809+ __u32 key;
77810+ ino_t inode;
77811+ dev_t device;
77812+ char *name;
77813+ __u16 len;
77814+ __u8 deleted;
77815+ struct name_entry *prev;
77816+ struct name_entry *next;
77817+};
77818+
77819+struct inodev_entry {
77820+ struct name_entry *nentry;
77821+ struct inodev_entry *prev;
77822+ struct inodev_entry *next;
77823+};
77824+
77825+struct acl_role_db {
77826+ struct acl_role_label **r_hash;
77827+ __u32 r_size;
77828+};
77829+
77830+struct inodev_db {
77831+ struct inodev_entry **i_hash;
77832+ __u32 i_size;
77833+};
77834+
77835+struct name_db {
77836+ struct name_entry **n_hash;
77837+ __u32 n_size;
77838+};
77839+
77840+struct crash_uid {
77841+ uid_t uid;
77842+ unsigned long expires;
77843+};
77844+
77845+struct gr_hash_struct {
77846+ void **table;
77847+ void **nametable;
77848+ void *first;
77849+ __u32 table_size;
77850+ __u32 used_size;
77851+ int type;
77852+};
77853+
77854+/* Userspace Grsecurity ACL data structures */
77855+
77856+struct acl_subject_label {
77857+ char *filename;
77858+ ino_t inode;
77859+ dev_t device;
77860+ __u32 mode;
77861+ kernel_cap_t cap_mask;
77862+ kernel_cap_t cap_lower;
77863+ kernel_cap_t cap_invert_audit;
77864+
77865+ struct rlimit res[GR_NLIMITS];
77866+ __u32 resmask;
77867+
77868+ __u8 user_trans_type;
77869+ __u8 group_trans_type;
77870+ uid_t *user_transitions;
77871+ gid_t *group_transitions;
77872+ __u16 user_trans_num;
77873+ __u16 group_trans_num;
77874+
77875+ __u32 sock_families[2];
77876+ __u32 ip_proto[8];
77877+ __u32 ip_type;
77878+ struct acl_ip_label **ips;
77879+ __u32 ip_num;
77880+ __u32 inaddr_any_override;
77881+
77882+ __u32 crashes;
77883+ unsigned long expires;
77884+
77885+ struct acl_subject_label *parent_subject;
77886+ struct gr_hash_struct *hash;
77887+ struct acl_subject_label *prev;
77888+ struct acl_subject_label *next;
77889+
77890+ struct acl_object_label **obj_hash;
77891+ __u32 obj_hash_size;
77892+ __u16 pax_flags;
77893+};
77894+
77895+struct role_allowed_ip {
77896+ __u32 addr;
77897+ __u32 netmask;
77898+
77899+ struct role_allowed_ip *prev;
77900+ struct role_allowed_ip *next;
77901+};
77902+
77903+struct role_transition {
77904+ char *rolename;
77905+
77906+ struct role_transition *prev;
77907+ struct role_transition *next;
77908+};
77909+
77910+struct acl_role_label {
77911+ char *rolename;
77912+ uid_t uidgid;
77913+ __u16 roletype;
77914+
77915+ __u16 auth_attempts;
77916+ unsigned long expires;
77917+
77918+ struct acl_subject_label *root_label;
77919+ struct gr_hash_struct *hash;
77920+
77921+ struct acl_role_label *prev;
77922+ struct acl_role_label *next;
77923+
77924+ struct role_transition *transitions;
77925+ struct role_allowed_ip *allowed_ips;
77926+ uid_t *domain_children;
77927+ __u16 domain_child_num;
77928+
77929+ umode_t umask;
77930+
77931+ struct acl_subject_label **subj_hash;
77932+ __u32 subj_hash_size;
77933+};
77934+
77935+struct user_acl_role_db {
77936+ struct acl_role_label **r_table;
77937+ __u32 num_pointers; /* Number of allocations to track */
77938+ __u32 num_roles; /* Number of roles */
77939+ __u32 num_domain_children; /* Number of domain children */
77940+ __u32 num_subjects; /* Number of subjects */
77941+ __u32 num_objects; /* Number of objects */
77942+};
77943+
77944+struct acl_object_label {
77945+ char *filename;
77946+ ino_t inode;
77947+ dev_t device;
77948+ __u32 mode;
77949+
77950+ struct acl_subject_label *nested;
77951+ struct acl_object_label *globbed;
77952+
77953+ /* next two structures not used */
77954+
77955+ struct acl_object_label *prev;
77956+ struct acl_object_label *next;
77957+};
77958+
77959+struct acl_ip_label {
77960+ char *iface;
77961+ __u32 addr;
77962+ __u32 netmask;
77963+ __u16 low, high;
77964+ __u8 mode;
77965+ __u32 type;
77966+ __u32 proto[8];
77967+
77968+ /* next two structures not used */
77969+
77970+ struct acl_ip_label *prev;
77971+ struct acl_ip_label *next;
77972+};
77973+
77974+struct gr_arg {
77975+ struct user_acl_role_db role_db;
77976+ unsigned char pw[GR_PW_LEN];
77977+ unsigned char salt[GR_SALT_LEN];
77978+ unsigned char sum[GR_SHA_LEN];
77979+ unsigned char sp_role[GR_SPROLE_LEN];
77980+ struct sprole_pw *sprole_pws;
77981+ dev_t segv_device;
77982+ ino_t segv_inode;
77983+ uid_t segv_uid;
77984+ __u16 num_sprole_pws;
77985+ __u16 mode;
77986+};
77987+
77988+struct gr_arg_wrapper {
77989+ struct gr_arg *arg;
77990+ __u32 version;
77991+ __u32 size;
77992+};
77993+
77994+struct subject_map {
77995+ struct acl_subject_label *user;
77996+ struct acl_subject_label *kernel;
77997+ struct subject_map *prev;
77998+ struct subject_map *next;
77999+};
78000+
78001+struct acl_subj_map_db {
78002+ struct subject_map **s_hash;
78003+ __u32 s_size;
78004+};
78005+
78006+struct gr_policy_state {
78007+ struct sprole_pw **acl_special_roles;
78008+ __u16 num_sprole_pws;
78009+ struct acl_role_label *kernel_role;
78010+ struct acl_role_label *role_list;
78011+ struct acl_role_label *default_role;
78012+ struct acl_role_db acl_role_set;
78013+ struct acl_subj_map_db subj_map_set;
78014+ struct name_db name_set;
78015+ struct inodev_db inodev_set;
78016+};
78017+
78018+struct gr_alloc_state {
78019+ unsigned long alloc_stack_next;
78020+ unsigned long alloc_stack_size;
78021+ void **alloc_stack;
78022+};
78023+
78024+struct gr_reload_state {
78025+ struct gr_policy_state oldpolicy;
78026+ struct gr_alloc_state oldalloc;
78027+ struct gr_policy_state newpolicy;
78028+ struct gr_alloc_state newalloc;
78029+ struct gr_policy_state *oldpolicy_ptr;
78030+ struct gr_alloc_state *oldalloc_ptr;
78031+ unsigned char oldmode;
78032+};
78033+
78034+/* End Data Structures Section */
78035+
78036+/* Hash functions generated by empirical testing by Brad Spengler
78037+ Makes good use of the low bits of the inode. Generally 0-1 times
78038+ in loop for successful match. 0-3 for unsuccessful match.
78039+ Shift/add algorithm with modulus of table size and an XOR*/
78040+
78041+static __inline__ unsigned int
78042+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
78043+{
78044+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
78045+}
78046+
78047+ static __inline__ unsigned int
78048+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
78049+{
78050+ return ((const unsigned long)userp % sz);
78051+}
78052+
78053+static __inline__ unsigned int
78054+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
78055+{
78056+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
78057+}
78058+
78059+static __inline__ unsigned int
78060+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
78061+{
78062+ return full_name_hash((const unsigned char *)name, len) % sz;
78063+}
78064+
78065+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
78066+ subj = NULL; \
78067+ iter = 0; \
78068+ while (iter < role->subj_hash_size) { \
78069+ if (subj == NULL) \
78070+ subj = role->subj_hash[iter]; \
78071+ if (subj == NULL) { \
78072+ iter++; \
78073+ continue; \
78074+ }
78075+
78076+#define FOR_EACH_SUBJECT_END(subj,iter) \
78077+ subj = subj->next; \
78078+ if (subj == NULL) \
78079+ iter++; \
78080+ }
78081+
78082+
78083+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
78084+ subj = role->hash->first; \
78085+ while (subj != NULL) {
78086+
78087+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
78088+ subj = subj->next; \
78089+ }
78090+
78091+#endif
78092+
78093diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
78094new file mode 100644
78095index 0000000..33ebd1f
78096--- /dev/null
78097+++ b/include/linux/gracl_compat.h
78098@@ -0,0 +1,156 @@
78099+#ifndef GR_ACL_COMPAT_H
78100+#define GR_ACL_COMPAT_H
78101+
78102+#include <linux/resource.h>
78103+#include <asm/resource.h>
78104+
78105+struct sprole_pw_compat {
78106+ compat_uptr_t rolename;
78107+ unsigned char salt[GR_SALT_LEN];
78108+ unsigned char sum[GR_SHA_LEN];
78109+};
78110+
78111+struct gr_hash_struct_compat {
78112+ compat_uptr_t table;
78113+ compat_uptr_t nametable;
78114+ compat_uptr_t first;
78115+ __u32 table_size;
78116+ __u32 used_size;
78117+ int type;
78118+};
78119+
78120+struct acl_subject_label_compat {
78121+ compat_uptr_t filename;
78122+ compat_ino_t inode;
78123+ __u32 device;
78124+ __u32 mode;
78125+ kernel_cap_t cap_mask;
78126+ kernel_cap_t cap_lower;
78127+ kernel_cap_t cap_invert_audit;
78128+
78129+ struct compat_rlimit res[GR_NLIMITS];
78130+ __u32 resmask;
78131+
78132+ __u8 user_trans_type;
78133+ __u8 group_trans_type;
78134+ compat_uptr_t user_transitions;
78135+ compat_uptr_t group_transitions;
78136+ __u16 user_trans_num;
78137+ __u16 group_trans_num;
78138+
78139+ __u32 sock_families[2];
78140+ __u32 ip_proto[8];
78141+ __u32 ip_type;
78142+ compat_uptr_t ips;
78143+ __u32 ip_num;
78144+ __u32 inaddr_any_override;
78145+
78146+ __u32 crashes;
78147+ compat_ulong_t expires;
78148+
78149+ compat_uptr_t parent_subject;
78150+ compat_uptr_t hash;
78151+ compat_uptr_t prev;
78152+ compat_uptr_t next;
78153+
78154+ compat_uptr_t obj_hash;
78155+ __u32 obj_hash_size;
78156+ __u16 pax_flags;
78157+};
78158+
78159+struct role_allowed_ip_compat {
78160+ __u32 addr;
78161+ __u32 netmask;
78162+
78163+ compat_uptr_t prev;
78164+ compat_uptr_t next;
78165+};
78166+
78167+struct role_transition_compat {
78168+ compat_uptr_t rolename;
78169+
78170+ compat_uptr_t prev;
78171+ compat_uptr_t next;
78172+};
78173+
78174+struct acl_role_label_compat {
78175+ compat_uptr_t rolename;
78176+ uid_t uidgid;
78177+ __u16 roletype;
78178+
78179+ __u16 auth_attempts;
78180+ compat_ulong_t expires;
78181+
78182+ compat_uptr_t root_label;
78183+ compat_uptr_t hash;
78184+
78185+ compat_uptr_t prev;
78186+ compat_uptr_t next;
78187+
78188+ compat_uptr_t transitions;
78189+ compat_uptr_t allowed_ips;
78190+ compat_uptr_t domain_children;
78191+ __u16 domain_child_num;
78192+
78193+ umode_t umask;
78194+
78195+ compat_uptr_t subj_hash;
78196+ __u32 subj_hash_size;
78197+};
78198+
78199+struct user_acl_role_db_compat {
78200+ compat_uptr_t r_table;
78201+ __u32 num_pointers;
78202+ __u32 num_roles;
78203+ __u32 num_domain_children;
78204+ __u32 num_subjects;
78205+ __u32 num_objects;
78206+};
78207+
78208+struct acl_object_label_compat {
78209+ compat_uptr_t filename;
78210+ compat_ino_t inode;
78211+ __u32 device;
78212+ __u32 mode;
78213+
78214+ compat_uptr_t nested;
78215+ compat_uptr_t globbed;
78216+
78217+ compat_uptr_t prev;
78218+ compat_uptr_t next;
78219+};
78220+
78221+struct acl_ip_label_compat {
78222+ compat_uptr_t iface;
78223+ __u32 addr;
78224+ __u32 netmask;
78225+ __u16 low, high;
78226+ __u8 mode;
78227+ __u32 type;
78228+ __u32 proto[8];
78229+
78230+ compat_uptr_t prev;
78231+ compat_uptr_t next;
78232+};
78233+
78234+struct gr_arg_compat {
78235+ struct user_acl_role_db_compat role_db;
78236+ unsigned char pw[GR_PW_LEN];
78237+ unsigned char salt[GR_SALT_LEN];
78238+ unsigned char sum[GR_SHA_LEN];
78239+ unsigned char sp_role[GR_SPROLE_LEN];
78240+ compat_uptr_t sprole_pws;
78241+ __u32 segv_device;
78242+ compat_ino_t segv_inode;
78243+ uid_t segv_uid;
78244+ __u16 num_sprole_pws;
78245+ __u16 mode;
78246+};
78247+
78248+struct gr_arg_wrapper_compat {
78249+ compat_uptr_t arg;
78250+ __u32 version;
78251+ __u32 size;
78252+};
78253+
78254+#endif
78255diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
78256new file mode 100644
78257index 0000000..323ecf2
78258--- /dev/null
78259+++ b/include/linux/gralloc.h
78260@@ -0,0 +1,9 @@
78261+#ifndef __GRALLOC_H
78262+#define __GRALLOC_H
78263+
78264+void acl_free_all(void);
78265+int acl_alloc_stack_init(unsigned long size);
78266+void *acl_alloc(unsigned long len);
78267+void *acl_alloc_num(unsigned long num, unsigned long len);
78268+
78269+#endif
78270diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
78271new file mode 100644
78272index 0000000..be66033
78273--- /dev/null
78274+++ b/include/linux/grdefs.h
78275@@ -0,0 +1,140 @@
78276+#ifndef GRDEFS_H
78277+#define GRDEFS_H
78278+
78279+/* Begin grsecurity status declarations */
78280+
78281+enum {
78282+ GR_READY = 0x01,
78283+ GR_STATUS_INIT = 0x00 // disabled state
78284+};
78285+
78286+/* Begin ACL declarations */
78287+
78288+/* Role flags */
78289+
78290+enum {
78291+ GR_ROLE_USER = 0x0001,
78292+ GR_ROLE_GROUP = 0x0002,
78293+ GR_ROLE_DEFAULT = 0x0004,
78294+ GR_ROLE_SPECIAL = 0x0008,
78295+ GR_ROLE_AUTH = 0x0010,
78296+ GR_ROLE_NOPW = 0x0020,
78297+ GR_ROLE_GOD = 0x0040,
78298+ GR_ROLE_LEARN = 0x0080,
78299+ GR_ROLE_TPE = 0x0100,
78300+ GR_ROLE_DOMAIN = 0x0200,
78301+ GR_ROLE_PAM = 0x0400,
78302+ GR_ROLE_PERSIST = 0x0800
78303+};
78304+
78305+/* ACL Subject and Object mode flags */
78306+enum {
78307+ GR_DELETED = 0x80000000
78308+};
78309+
78310+/* ACL Object-only mode flags */
78311+enum {
78312+ GR_READ = 0x00000001,
78313+ GR_APPEND = 0x00000002,
78314+ GR_WRITE = 0x00000004,
78315+ GR_EXEC = 0x00000008,
78316+ GR_FIND = 0x00000010,
78317+ GR_INHERIT = 0x00000020,
78318+ GR_SETID = 0x00000040,
78319+ GR_CREATE = 0x00000080,
78320+ GR_DELETE = 0x00000100,
78321+ GR_LINK = 0x00000200,
78322+ GR_AUDIT_READ = 0x00000400,
78323+ GR_AUDIT_APPEND = 0x00000800,
78324+ GR_AUDIT_WRITE = 0x00001000,
78325+ GR_AUDIT_EXEC = 0x00002000,
78326+ GR_AUDIT_FIND = 0x00004000,
78327+ GR_AUDIT_INHERIT= 0x00008000,
78328+ GR_AUDIT_SETID = 0x00010000,
78329+ GR_AUDIT_CREATE = 0x00020000,
78330+ GR_AUDIT_DELETE = 0x00040000,
78331+ GR_AUDIT_LINK = 0x00080000,
78332+ GR_PTRACERD = 0x00100000,
78333+ GR_NOPTRACE = 0x00200000,
78334+ GR_SUPPRESS = 0x00400000,
78335+ GR_NOLEARN = 0x00800000,
78336+ GR_INIT_TRANSFER= 0x01000000
78337+};
78338+
78339+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
78340+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
78341+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
78342+
78343+/* ACL subject-only mode flags */
78344+enum {
78345+ GR_KILL = 0x00000001,
78346+ GR_VIEW = 0x00000002,
78347+ GR_PROTECTED = 0x00000004,
78348+ GR_LEARN = 0x00000008,
78349+ GR_OVERRIDE = 0x00000010,
78350+ /* just a placeholder, this mode is only used in userspace */
78351+ GR_DUMMY = 0x00000020,
78352+ GR_PROTSHM = 0x00000040,
78353+ GR_KILLPROC = 0x00000080,
78354+ GR_KILLIPPROC = 0x00000100,
78355+ /* just a placeholder, this mode is only used in userspace */
78356+ GR_NOTROJAN = 0x00000200,
78357+ GR_PROTPROCFD = 0x00000400,
78358+ GR_PROCACCT = 0x00000800,
78359+ GR_RELAXPTRACE = 0x00001000,
78360+ //GR_NESTED = 0x00002000,
78361+ GR_INHERITLEARN = 0x00004000,
78362+ GR_PROCFIND = 0x00008000,
78363+ GR_POVERRIDE = 0x00010000,
78364+ GR_KERNELAUTH = 0x00020000,
78365+ GR_ATSECURE = 0x00040000,
78366+ GR_SHMEXEC = 0x00080000
78367+};
78368+
78369+enum {
78370+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
78371+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
78372+ GR_PAX_ENABLE_MPROTECT = 0x0004,
78373+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
78374+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
78375+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
78376+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
78377+ GR_PAX_DISABLE_MPROTECT = 0x0400,
78378+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
78379+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
78380+};
78381+
78382+enum {
78383+ GR_ID_USER = 0x01,
78384+ GR_ID_GROUP = 0x02,
78385+};
78386+
78387+enum {
78388+ GR_ID_ALLOW = 0x01,
78389+ GR_ID_DENY = 0x02,
78390+};
78391+
78392+#define GR_CRASH_RES 31
78393+#define GR_UIDTABLE_MAX 500
78394+
78395+/* begin resource learning section */
78396+enum {
78397+ GR_RLIM_CPU_BUMP = 60,
78398+ GR_RLIM_FSIZE_BUMP = 50000,
78399+ GR_RLIM_DATA_BUMP = 10000,
78400+ GR_RLIM_STACK_BUMP = 1000,
78401+ GR_RLIM_CORE_BUMP = 10000,
78402+ GR_RLIM_RSS_BUMP = 500000,
78403+ GR_RLIM_NPROC_BUMP = 1,
78404+ GR_RLIM_NOFILE_BUMP = 5,
78405+ GR_RLIM_MEMLOCK_BUMP = 50000,
78406+ GR_RLIM_AS_BUMP = 500000,
78407+ GR_RLIM_LOCKS_BUMP = 2,
78408+ GR_RLIM_SIGPENDING_BUMP = 5,
78409+ GR_RLIM_MSGQUEUE_BUMP = 10000,
78410+ GR_RLIM_NICE_BUMP = 1,
78411+ GR_RLIM_RTPRIO_BUMP = 1,
78412+ GR_RLIM_RTTIME_BUMP = 1000000
78413+};
78414+
78415+#endif
78416diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
78417new file mode 100644
78418index 0000000..d25522e
78419--- /dev/null
78420+++ b/include/linux/grinternal.h
78421@@ -0,0 +1,229 @@
78422+#ifndef __GRINTERNAL_H
78423+#define __GRINTERNAL_H
78424+
78425+#ifdef CONFIG_GRKERNSEC
78426+
78427+#include <linux/fs.h>
78428+#include <linux/mnt_namespace.h>
78429+#include <linux/nsproxy.h>
78430+#include <linux/gracl.h>
78431+#include <linux/grdefs.h>
78432+#include <linux/grmsg.h>
78433+
78434+void gr_add_learn_entry(const char *fmt, ...)
78435+ __attribute__ ((format (printf, 1, 2)));
78436+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
78437+ const struct vfsmount *mnt);
78438+__u32 gr_check_create(const struct dentry *new_dentry,
78439+ const struct dentry *parent,
78440+ const struct vfsmount *mnt, const __u32 mode);
78441+int gr_check_protected_task(const struct task_struct *task);
78442+__u32 to_gr_audit(const __u32 reqmode);
78443+int gr_set_acls(const int type);
78444+int gr_acl_is_enabled(void);
78445+char gr_roletype_to_char(void);
78446+
78447+void gr_handle_alertkill(struct task_struct *task);
78448+char *gr_to_filename(const struct dentry *dentry,
78449+ const struct vfsmount *mnt);
78450+char *gr_to_filename1(const struct dentry *dentry,
78451+ const struct vfsmount *mnt);
78452+char *gr_to_filename2(const struct dentry *dentry,
78453+ const struct vfsmount *mnt);
78454+char *gr_to_filename3(const struct dentry *dentry,
78455+ const struct vfsmount *mnt);
78456+
78457+extern int grsec_enable_ptrace_readexec;
78458+extern int grsec_enable_harden_ptrace;
78459+extern int grsec_enable_link;
78460+extern int grsec_enable_fifo;
78461+extern int grsec_enable_execve;
78462+extern int grsec_enable_shm;
78463+extern int grsec_enable_execlog;
78464+extern int grsec_enable_signal;
78465+extern int grsec_enable_audit_ptrace;
78466+extern int grsec_enable_forkfail;
78467+extern int grsec_enable_time;
78468+extern int grsec_enable_rofs;
78469+extern int grsec_deny_new_usb;
78470+extern int grsec_enable_chroot_shmat;
78471+extern int grsec_enable_chroot_mount;
78472+extern int grsec_enable_chroot_double;
78473+extern int grsec_enable_chroot_pivot;
78474+extern int grsec_enable_chroot_chdir;
78475+extern int grsec_enable_chroot_chmod;
78476+extern int grsec_enable_chroot_mknod;
78477+extern int grsec_enable_chroot_fchdir;
78478+extern int grsec_enable_chroot_nice;
78479+extern int grsec_enable_chroot_execlog;
78480+extern int grsec_enable_chroot_caps;
78481+extern int grsec_enable_chroot_sysctl;
78482+extern int grsec_enable_chroot_unix;
78483+extern int grsec_enable_symlinkown;
78484+extern kgid_t grsec_symlinkown_gid;
78485+extern int grsec_enable_tpe;
78486+extern kgid_t grsec_tpe_gid;
78487+extern int grsec_enable_tpe_all;
78488+extern int grsec_enable_tpe_invert;
78489+extern int grsec_enable_socket_all;
78490+extern kgid_t grsec_socket_all_gid;
78491+extern int grsec_enable_socket_client;
78492+extern kgid_t grsec_socket_client_gid;
78493+extern int grsec_enable_socket_server;
78494+extern kgid_t grsec_socket_server_gid;
78495+extern kgid_t grsec_audit_gid;
78496+extern int grsec_enable_group;
78497+extern int grsec_enable_log_rwxmaps;
78498+extern int grsec_enable_mount;
78499+extern int grsec_enable_chdir;
78500+extern int grsec_resource_logging;
78501+extern int grsec_enable_blackhole;
78502+extern int grsec_lastack_retries;
78503+extern int grsec_enable_brute;
78504+extern int grsec_enable_harden_ipc;
78505+extern int grsec_lock;
78506+
78507+extern spinlock_t grsec_alert_lock;
78508+extern unsigned long grsec_alert_wtime;
78509+extern unsigned long grsec_alert_fyet;
78510+
78511+extern spinlock_t grsec_audit_lock;
78512+
78513+extern rwlock_t grsec_exec_file_lock;
78514+
78515+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
78516+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
78517+ (tsk)->exec_file->f_path.mnt) : "/")
78518+
78519+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
78520+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
78521+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
78522+
78523+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
78524+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
78525+ (tsk)->exec_file->f_path.mnt) : "/")
78526+
78527+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
78528+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
78529+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
78530+
78531+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
78532+
78533+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
78534+
78535+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
78536+{
78537+ if (file1 && file2) {
78538+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
78539+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
78540+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
78541+ return true;
78542+ }
78543+
78544+ return false;
78545+}
78546+
78547+#define GR_CHROOT_CAPS {{ \
78548+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
78549+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
78550+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
78551+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
78552+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
78553+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
78554+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
78555+
78556+#define security_learn(normal_msg,args...) \
78557+({ \
78558+ read_lock(&grsec_exec_file_lock); \
78559+ gr_add_learn_entry(normal_msg "\n", ## args); \
78560+ read_unlock(&grsec_exec_file_lock); \
78561+})
78562+
78563+enum {
78564+ GR_DO_AUDIT,
78565+ GR_DONT_AUDIT,
78566+ /* used for non-audit messages that we shouldn't kill the task on */
78567+ GR_DONT_AUDIT_GOOD
78568+};
78569+
78570+enum {
78571+ GR_TTYSNIFF,
78572+ GR_RBAC,
78573+ GR_RBAC_STR,
78574+ GR_STR_RBAC,
78575+ GR_RBAC_MODE2,
78576+ GR_RBAC_MODE3,
78577+ GR_FILENAME,
78578+ GR_SYSCTL_HIDDEN,
78579+ GR_NOARGS,
78580+ GR_ONE_INT,
78581+ GR_ONE_INT_TWO_STR,
78582+ GR_ONE_STR,
78583+ GR_STR_INT,
78584+ GR_TWO_STR_INT,
78585+ GR_TWO_INT,
78586+ GR_TWO_U64,
78587+ GR_THREE_INT,
78588+ GR_FIVE_INT_TWO_STR,
78589+ GR_TWO_STR,
78590+ GR_THREE_STR,
78591+ GR_FOUR_STR,
78592+ GR_STR_FILENAME,
78593+ GR_FILENAME_STR,
78594+ GR_FILENAME_TWO_INT,
78595+ GR_FILENAME_TWO_INT_STR,
78596+ GR_TEXTREL,
78597+ GR_PTRACE,
78598+ GR_RESOURCE,
78599+ GR_CAP,
78600+ GR_SIG,
78601+ GR_SIG2,
78602+ GR_CRASH1,
78603+ GR_CRASH2,
78604+ GR_PSACCT,
78605+ GR_RWXMAP,
78606+ GR_RWXMAPVMA
78607+};
78608+
78609+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
78610+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
78611+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
78612+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
78613+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
78614+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
78615+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
78616+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
78617+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
78618+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
78619+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
78620+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
78621+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
78622+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
78623+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
78624+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
78625+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
78626+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
78627+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
78628+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
78629+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
78630+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
78631+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
78632+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
78633+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
78634+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
78635+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
78636+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
78637+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
78638+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
78639+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
78640+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
78641+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
78642+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
78643+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
78644+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
78645+
78646+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
78647+
78648+#endif
78649+
78650+#endif
78651diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
78652new file mode 100644
78653index 0000000..ba93581
78654--- /dev/null
78655+++ b/include/linux/grmsg.h
78656@@ -0,0 +1,116 @@
78657+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
78658+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
78659+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
78660+#define GR_STOPMOD_MSG "denied modification of module state by "
78661+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
78662+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
78663+#define GR_IOPERM_MSG "denied use of ioperm() by "
78664+#define GR_IOPL_MSG "denied use of iopl() by "
78665+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
78666+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
78667+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
78668+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
78669+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
78670+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
78671+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
78672+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
78673+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
78674+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
78675+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
78676+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
78677+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
78678+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
78679+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
78680+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
78681+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
78682+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
78683+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
78684+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
78685+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
78686+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
78687+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
78688+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
78689+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
78690+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
78691+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
78692+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
78693+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
78694+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
78695+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
78696+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
78697+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
78698+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
78699+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
78700+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
78701+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
78702+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
78703+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
78704+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
78705+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
78706+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
78707+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
78708+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
78709+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
78710+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
78711+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
78712+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
78713+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
78714+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
78715+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
78716+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
78717+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
78718+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
78719+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
78720+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
78721+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
78722+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
78723+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
78724+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
78725+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
78726+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
78727+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
78728+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
78729+#define GR_FAILFORK_MSG "failed fork with errno %s by "
78730+#define GR_NICE_CHROOT_MSG "denied priority change by "
78731+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
78732+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
78733+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
78734+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
78735+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
78736+#define GR_TIME_MSG "time set by "
78737+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
78738+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
78739+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
78740+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
78741+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
78742+#define GR_BIND_MSG "denied bind() by "
78743+#define GR_CONNECT_MSG "denied connect() by "
78744+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
78745+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
78746+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
78747+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
78748+#define GR_CAP_ACL_MSG "use of %s denied for "
78749+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
78750+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
78751+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
78752+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
78753+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
78754+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
78755+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
78756+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
78757+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
78758+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
78759+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
78760+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
78761+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
78762+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
78763+#define GR_VM86_MSG "denied use of vm86 by "
78764+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
78765+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
78766+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
78767+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
78768+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
78769+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
78770+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
78771+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
78772+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
78773diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
78774new file mode 100644
78775index 0000000..8108301
78776--- /dev/null
78777+++ b/include/linux/grsecurity.h
78778@@ -0,0 +1,246 @@
78779+#ifndef GR_SECURITY_H
78780+#define GR_SECURITY_H
78781+#include <linux/fs.h>
78782+#include <linux/fs_struct.h>
78783+#include <linux/binfmts.h>
78784+#include <linux/gracl.h>
78785+
78786+/* notify of brain-dead configs */
78787+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78788+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
78789+#endif
78790+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
78791+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
78792+#endif
78793+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
78794+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
78795+#endif
78796+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
78797+#error "CONFIG_PAX enabled, but no PaX options are enabled."
78798+#endif
78799+
78800+int gr_handle_new_usb(void);
78801+
78802+void gr_handle_brute_attach(int dumpable);
78803+void gr_handle_brute_check(void);
78804+void gr_handle_kernel_exploit(void);
78805+
78806+char gr_roletype_to_char(void);
78807+
78808+int gr_acl_enable_at_secure(void);
78809+
78810+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
78811+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
78812+
78813+void gr_del_task_from_ip_table(struct task_struct *p);
78814+
78815+int gr_pid_is_chrooted(struct task_struct *p);
78816+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
78817+int gr_handle_chroot_nice(void);
78818+int gr_handle_chroot_sysctl(const int op);
78819+int gr_handle_chroot_setpriority(struct task_struct *p,
78820+ const int niceval);
78821+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
78822+int gr_handle_chroot_chroot(const struct dentry *dentry,
78823+ const struct vfsmount *mnt);
78824+void gr_handle_chroot_chdir(const struct path *path);
78825+int gr_handle_chroot_chmod(const struct dentry *dentry,
78826+ const struct vfsmount *mnt, const int mode);
78827+int gr_handle_chroot_mknod(const struct dentry *dentry,
78828+ const struct vfsmount *mnt, const int mode);
78829+int gr_handle_chroot_mount(const struct dentry *dentry,
78830+ const struct vfsmount *mnt,
78831+ const char *dev_name);
78832+int gr_handle_chroot_pivot(void);
78833+int gr_handle_chroot_unix(const pid_t pid);
78834+
78835+int gr_handle_rawio(const struct inode *inode);
78836+
78837+void gr_handle_ioperm(void);
78838+void gr_handle_iopl(void);
78839+void gr_handle_msr_write(void);
78840+
78841+umode_t gr_acl_umask(void);
78842+
78843+int gr_tpe_allow(const struct file *file);
78844+
78845+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
78846+void gr_clear_chroot_entries(struct task_struct *task);
78847+
78848+void gr_log_forkfail(const int retval);
78849+void gr_log_timechange(void);
78850+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
78851+void gr_log_chdir(const struct dentry *dentry,
78852+ const struct vfsmount *mnt);
78853+void gr_log_chroot_exec(const struct dentry *dentry,
78854+ const struct vfsmount *mnt);
78855+void gr_log_remount(const char *devname, const int retval);
78856+void gr_log_unmount(const char *devname, const int retval);
78857+void gr_log_mount(const char *from, const char *to, const int retval);
78858+void gr_log_textrel(struct vm_area_struct *vma);
78859+void gr_log_ptgnustack(struct file *file);
78860+void gr_log_rwxmmap(struct file *file);
78861+void gr_log_rwxmprotect(struct vm_area_struct *vma);
78862+
78863+int gr_handle_follow_link(const struct inode *parent,
78864+ const struct inode *inode,
78865+ const struct dentry *dentry,
78866+ const struct vfsmount *mnt);
78867+int gr_handle_fifo(const struct dentry *dentry,
78868+ const struct vfsmount *mnt,
78869+ const struct dentry *dir, const int flag,
78870+ const int acc_mode);
78871+int gr_handle_hardlink(const struct dentry *dentry,
78872+ const struct vfsmount *mnt,
78873+ struct inode *inode,
78874+ const int mode, const struct filename *to);
78875+
78876+int gr_is_capable(const int cap);
78877+int gr_is_capable_nolog(const int cap);
78878+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
78879+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
78880+
78881+void gr_copy_label(struct task_struct *tsk);
78882+void gr_handle_crash(struct task_struct *task, const int sig);
78883+int gr_handle_signal(const struct task_struct *p, const int sig);
78884+int gr_check_crash_uid(const kuid_t uid);
78885+int gr_check_protected_task(const struct task_struct *task);
78886+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
78887+int gr_acl_handle_mmap(const struct file *file,
78888+ const unsigned long prot);
78889+int gr_acl_handle_mprotect(const struct file *file,
78890+ const unsigned long prot);
78891+int gr_check_hidden_task(const struct task_struct *tsk);
78892+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
78893+ const struct vfsmount *mnt);
78894+__u32 gr_acl_handle_utime(const struct dentry *dentry,
78895+ const struct vfsmount *mnt);
78896+__u32 gr_acl_handle_access(const struct dentry *dentry,
78897+ const struct vfsmount *mnt, const int fmode);
78898+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
78899+ const struct vfsmount *mnt, umode_t *mode);
78900+__u32 gr_acl_handle_chown(const struct dentry *dentry,
78901+ const struct vfsmount *mnt);
78902+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
78903+ const struct vfsmount *mnt);
78904+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
78905+ const struct vfsmount *mnt);
78906+int gr_handle_ptrace(struct task_struct *task, const long request);
78907+int gr_handle_proc_ptrace(struct task_struct *task);
78908+__u32 gr_acl_handle_execve(const struct dentry *dentry,
78909+ const struct vfsmount *mnt);
78910+int gr_check_crash_exec(const struct file *filp);
78911+int gr_acl_is_enabled(void);
78912+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
78913+ const kgid_t gid);
78914+int gr_set_proc_label(const struct dentry *dentry,
78915+ const struct vfsmount *mnt,
78916+ const int unsafe_flags);
78917+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
78918+ const struct vfsmount *mnt);
78919+__u32 gr_acl_handle_open(const struct dentry *dentry,
78920+ const struct vfsmount *mnt, int acc_mode);
78921+__u32 gr_acl_handle_creat(const struct dentry *dentry,
78922+ const struct dentry *p_dentry,
78923+ const struct vfsmount *p_mnt,
78924+ int open_flags, int acc_mode, const int imode);
78925+void gr_handle_create(const struct dentry *dentry,
78926+ const struct vfsmount *mnt);
78927+void gr_handle_proc_create(const struct dentry *dentry,
78928+ const struct inode *inode);
78929+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
78930+ const struct dentry *parent_dentry,
78931+ const struct vfsmount *parent_mnt,
78932+ const int mode);
78933+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
78934+ const struct dentry *parent_dentry,
78935+ const struct vfsmount *parent_mnt);
78936+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
78937+ const struct vfsmount *mnt);
78938+void gr_handle_delete(const ino_t ino, const dev_t dev);
78939+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
78940+ const struct vfsmount *mnt);
78941+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
78942+ const struct dentry *parent_dentry,
78943+ const struct vfsmount *parent_mnt,
78944+ const struct filename *from);
78945+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
78946+ const struct dentry *parent_dentry,
78947+ const struct vfsmount *parent_mnt,
78948+ const struct dentry *old_dentry,
78949+ const struct vfsmount *old_mnt, const struct filename *to);
78950+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
78951+int gr_acl_handle_rename(struct dentry *new_dentry,
78952+ struct dentry *parent_dentry,
78953+ const struct vfsmount *parent_mnt,
78954+ struct dentry *old_dentry,
78955+ struct inode *old_parent_inode,
78956+ struct vfsmount *old_mnt, const struct filename *newname);
78957+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
78958+ struct dentry *old_dentry,
78959+ struct dentry *new_dentry,
78960+ struct vfsmount *mnt, const __u8 replace);
78961+__u32 gr_check_link(const struct dentry *new_dentry,
78962+ const struct dentry *parent_dentry,
78963+ const struct vfsmount *parent_mnt,
78964+ const struct dentry *old_dentry,
78965+ const struct vfsmount *old_mnt);
78966+int gr_acl_handle_filldir(const struct file *file, const char *name,
78967+ const unsigned int namelen, const ino_t ino);
78968+
78969+__u32 gr_acl_handle_unix(const struct dentry *dentry,
78970+ const struct vfsmount *mnt);
78971+void gr_acl_handle_exit(void);
78972+void gr_acl_handle_psacct(struct task_struct *task, const long code);
78973+int gr_acl_handle_procpidmem(const struct task_struct *task);
78974+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
78975+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
78976+void gr_audit_ptrace(struct task_struct *task);
78977+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
78978+void gr_put_exec_file(struct task_struct *task);
78979+
78980+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
78981+
78982+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
78983+extern void gr_learn_resource(const struct task_struct *task, const int res,
78984+ const unsigned long wanted, const int gt);
78985+#else
78986+static inline void gr_learn_resource(const struct task_struct *task, const int res,
78987+ const unsigned long wanted, const int gt)
78988+{
78989+}
78990+#endif
78991+
78992+#ifdef CONFIG_GRKERNSEC_RESLOG
78993+extern void gr_log_resource(const struct task_struct *task, const int res,
78994+ const unsigned long wanted, const int gt);
78995+#else
78996+static inline void gr_log_resource(const struct task_struct *task, const int res,
78997+ const unsigned long wanted, const int gt)
78998+{
78999+}
79000+#endif
79001+
79002+#ifdef CONFIG_GRKERNSEC
79003+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
79004+void gr_handle_vm86(void);
79005+void gr_handle_mem_readwrite(u64 from, u64 to);
79006+
79007+void gr_log_badprocpid(const char *entry);
79008+
79009+extern int grsec_enable_dmesg;
79010+extern int grsec_disable_privio;
79011+
79012+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
79013+extern kgid_t grsec_proc_gid;
79014+#endif
79015+
79016+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79017+extern int grsec_enable_chroot_findtask;
79018+#endif
79019+#ifdef CONFIG_GRKERNSEC_SETXID
79020+extern int grsec_enable_setxid;
79021+#endif
79022+#endif
79023+
79024+#endif
79025diff --git a/include/linux/grsock.h b/include/linux/grsock.h
79026new file mode 100644
79027index 0000000..e7ffaaf
79028--- /dev/null
79029+++ b/include/linux/grsock.h
79030@@ -0,0 +1,19 @@
79031+#ifndef __GRSOCK_H
79032+#define __GRSOCK_H
79033+
79034+extern void gr_attach_curr_ip(const struct sock *sk);
79035+extern int gr_handle_sock_all(const int family, const int type,
79036+ const int protocol);
79037+extern int gr_handle_sock_server(const struct sockaddr *sck);
79038+extern int gr_handle_sock_server_other(const struct sock *sck);
79039+extern int gr_handle_sock_client(const struct sockaddr *sck);
79040+extern int gr_search_connect(struct socket * sock,
79041+ struct sockaddr_in * addr);
79042+extern int gr_search_bind(struct socket * sock,
79043+ struct sockaddr_in * addr);
79044+extern int gr_search_listen(struct socket * sock);
79045+extern int gr_search_accept(struct socket * sock);
79046+extern int gr_search_socket(const int domain, const int type,
79047+ const int protocol);
79048+
79049+#endif
79050diff --git a/include/linux/highmem.h b/include/linux/highmem.h
79051index 7fb31da..08b5114 100644
79052--- a/include/linux/highmem.h
79053+++ b/include/linux/highmem.h
79054@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
79055 kunmap_atomic(kaddr);
79056 }
79057
79058+static inline void sanitize_highpage(struct page *page)
79059+{
79060+ void *kaddr;
79061+ unsigned long flags;
79062+
79063+ local_irq_save(flags);
79064+ kaddr = kmap_atomic(page);
79065+ clear_page(kaddr);
79066+ kunmap_atomic(kaddr);
79067+ local_irq_restore(flags);
79068+}
79069+
79070 static inline void zero_user_segments(struct page *page,
79071 unsigned start1, unsigned end1,
79072 unsigned start2, unsigned end2)
79073diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
79074index 1c7b89a..7dda400 100644
79075--- a/include/linux/hwmon-sysfs.h
79076+++ b/include/linux/hwmon-sysfs.h
79077@@ -25,7 +25,8 @@
79078 struct sensor_device_attribute{
79079 struct device_attribute dev_attr;
79080 int index;
79081-};
79082+} __do_const;
79083+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
79084 #define to_sensor_dev_attr(_dev_attr) \
79085 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
79086
79087@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
79088 struct device_attribute dev_attr;
79089 u8 index;
79090 u8 nr;
79091-};
79092+} __do_const;
79093+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
79094 #define to_sensor_dev_attr_2(_dev_attr) \
79095 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
79096
79097diff --git a/include/linux/i2c.h b/include/linux/i2c.h
79098index d9c8dbd3..def6e5a 100644
79099--- a/include/linux/i2c.h
79100+++ b/include/linux/i2c.h
79101@@ -364,6 +364,7 @@ struct i2c_algorithm {
79102 /* To determine what the adapter supports */
79103 u32 (*functionality) (struct i2c_adapter *);
79104 };
79105+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
79106
79107 /**
79108 * struct i2c_bus_recovery_info - I2C bus recovery information
79109diff --git a/include/linux/i2o.h b/include/linux/i2o.h
79110index d23c3c2..eb63c81 100644
79111--- a/include/linux/i2o.h
79112+++ b/include/linux/i2o.h
79113@@ -565,7 +565,7 @@ struct i2o_controller {
79114 struct i2o_device *exec; /* Executive */
79115 #if BITS_PER_LONG == 64
79116 spinlock_t context_list_lock; /* lock for context_list */
79117- atomic_t context_list_counter; /* needed for unique contexts */
79118+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
79119 struct list_head context_list; /* list of context id's
79120 and pointers */
79121 #endif
79122diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
79123index aff7ad8..3942bbd 100644
79124--- a/include/linux/if_pppox.h
79125+++ b/include/linux/if_pppox.h
79126@@ -76,7 +76,7 @@ struct pppox_proto {
79127 int (*ioctl)(struct socket *sock, unsigned int cmd,
79128 unsigned long arg);
79129 struct module *owner;
79130-};
79131+} __do_const;
79132
79133 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
79134 extern void unregister_pppox_proto(int proto_num);
79135diff --git a/include/linux/init.h b/include/linux/init.h
79136index 8e68a64..3f977a0 100644
79137--- a/include/linux/init.h
79138+++ b/include/linux/init.h
79139@@ -37,9 +37,17 @@
79140 * section.
79141 */
79142
79143+#define add_init_latent_entropy __latent_entropy
79144+
79145+#ifdef CONFIG_MEMORY_HOTPLUG
79146+#define add_meminit_latent_entropy
79147+#else
79148+#define add_meminit_latent_entropy __latent_entropy
79149+#endif
79150+
79151 /* These are for everybody (although not all archs will actually
79152 discard it in modules) */
79153-#define __init __section(.init.text) __cold notrace
79154+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
79155 #define __initdata __section(.init.data)
79156 #define __initconst __constsection(.init.rodata)
79157 #define __exitdata __section(.exit.data)
79158@@ -100,7 +108,7 @@
79159 #define __cpuexitconst
79160
79161 /* Used for MEMORY_HOTPLUG */
79162-#define __meminit __section(.meminit.text) __cold notrace
79163+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
79164 #define __meminitdata __section(.meminit.data)
79165 #define __meminitconst __constsection(.meminit.rodata)
79166 #define __memexit __section(.memexit.text) __exitused __cold notrace
79167diff --git a/include/linux/init_task.h b/include/linux/init_task.h
79168index b0ed422..d79ea23 100644
79169--- a/include/linux/init_task.h
79170+++ b/include/linux/init_task.h
79171@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
79172
79173 #define INIT_TASK_COMM "swapper"
79174
79175+#ifdef CONFIG_X86
79176+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
79177+#else
79178+#define INIT_TASK_THREAD_INFO
79179+#endif
79180+
79181 /*
79182 * INIT_TASK is used to set up the first task table, touch at
79183 * your own risk!. Base=0, limit=0x1fffff (=2MB)
79184@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
79185 RCU_POINTER_INITIALIZER(cred, &init_cred), \
79186 .comm = INIT_TASK_COMM, \
79187 .thread = INIT_THREAD, \
79188+ INIT_TASK_THREAD_INFO \
79189 .fs = &init_fs, \
79190 .files = &init_files, \
79191 .signal = &init_signals, \
79192diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
79193index db43b58..5d5084b 100644
79194--- a/include/linux/interrupt.h
79195+++ b/include/linux/interrupt.h
79196@@ -360,7 +360,7 @@ enum
79197 /* map softirq index to softirq name. update 'softirq_to_name' in
79198 * kernel/softirq.c when adding a new softirq.
79199 */
79200-extern char *softirq_to_name[NR_SOFTIRQS];
79201+extern const char * const softirq_to_name[NR_SOFTIRQS];
79202
79203 /* softirq mask and active fields moved to irq_cpustat_t in
79204 * asm/hardirq.h to get better cache usage. KAO
79205@@ -368,8 +368,8 @@ extern char *softirq_to_name[NR_SOFTIRQS];
79206
79207 struct softirq_action
79208 {
79209- void (*action)(struct softirq_action *);
79210-};
79211+ void (*action)(void);
79212+} __no_const;
79213
79214 asmlinkage void do_softirq(void);
79215 asmlinkage void __do_softirq(void);
79216@@ -383,7 +383,7 @@ static inline void do_softirq_own_stack(void)
79217 }
79218 #endif
79219
79220-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
79221+extern void open_softirq(int nr, void (*action)(void));
79222 extern void softirq_init(void);
79223 extern void __raise_softirq_irqoff(unsigned int nr);
79224
79225diff --git a/include/linux/iommu.h b/include/linux/iommu.h
79226index a444c79..8c41ea9 100644
79227--- a/include/linux/iommu.h
79228+++ b/include/linux/iommu.h
79229@@ -130,7 +130,7 @@ struct iommu_ops {
79230 u32 (*domain_get_windows)(struct iommu_domain *domain);
79231
79232 unsigned long pgsize_bitmap;
79233-};
79234+} __do_const;
79235
79236 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
79237 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
79238diff --git a/include/linux/ioport.h b/include/linux/ioport.h
79239index 89b7c24..382af74 100644
79240--- a/include/linux/ioport.h
79241+++ b/include/linux/ioport.h
79242@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
79243 int adjust_resource(struct resource *res, resource_size_t start,
79244 resource_size_t size);
79245 resource_size_t resource_alignment(struct resource *res);
79246-static inline resource_size_t resource_size(const struct resource *res)
79247+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
79248 {
79249 return res->end - res->start + 1;
79250 }
79251diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
79252index d6ad91f..f10f279 100644
79253--- a/include/linux/ipc_namespace.h
79254+++ b/include/linux/ipc_namespace.h
79255@@ -70,7 +70,7 @@ struct ipc_namespace {
79256 struct user_namespace *user_ns;
79257
79258 unsigned int proc_inum;
79259-};
79260+} __randomize_layout;
79261
79262 extern struct ipc_namespace init_ipc_ns;
79263 extern atomic_t nr_ipc_ns;
79264diff --git a/include/linux/irq.h b/include/linux/irq.h
79265index 7dc1003..407327b 100644
79266--- a/include/linux/irq.h
79267+++ b/include/linux/irq.h
79268@@ -338,7 +338,8 @@ struct irq_chip {
79269 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
79270
79271 unsigned long flags;
79272-};
79273+} __do_const;
79274+typedef struct irq_chip __no_const irq_chip_no_const;
79275
79276 /*
79277 * irq_chip specific flags
79278diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
79279index cac496b..ffa0567 100644
79280--- a/include/linux/irqchip/arm-gic.h
79281+++ b/include/linux/irqchip/arm-gic.h
79282@@ -61,9 +61,11 @@
79283
79284 #ifndef __ASSEMBLY__
79285
79286+#include <linux/irq.h>
79287+
79288 struct device_node;
79289
79290-extern struct irq_chip gic_arch_extn;
79291+extern irq_chip_no_const gic_arch_extn;
79292
79293 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
79294 u32 offset, struct device_node *);
79295diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
79296index d235e88..8ccbe74 100644
79297--- a/include/linux/jiffies.h
79298+++ b/include/linux/jiffies.h
79299@@ -292,14 +292,14 @@ extern unsigned long preset_lpj;
79300 /*
79301 * Convert various time units to each other:
79302 */
79303-extern unsigned int jiffies_to_msecs(const unsigned long j);
79304-extern unsigned int jiffies_to_usecs(const unsigned long j);
79305-extern unsigned long msecs_to_jiffies(const unsigned int m);
79306-extern unsigned long usecs_to_jiffies(const unsigned int u);
79307-extern unsigned long timespec_to_jiffies(const struct timespec *value);
79308+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
79309+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
79310+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
79311+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
79312+extern unsigned long timespec_to_jiffies(const struct timespec *value) __intentional_overflow(-1);
79313 extern void jiffies_to_timespec(const unsigned long jiffies,
79314 struct timespec *value);
79315-extern unsigned long timeval_to_jiffies(const struct timeval *value);
79316+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
79317 extern void jiffies_to_timeval(const unsigned long jiffies,
79318 struct timeval *value);
79319
79320diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
79321index 6883e19..e854fcb 100644
79322--- a/include/linux/kallsyms.h
79323+++ b/include/linux/kallsyms.h
79324@@ -15,7 +15,8 @@
79325
79326 struct module;
79327
79328-#ifdef CONFIG_KALLSYMS
79329+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
79330+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
79331 /* Lookup the address for a symbol. Returns 0 if not found. */
79332 unsigned long kallsyms_lookup_name(const char *name);
79333
79334@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
79335 /* Stupid that this does nothing, but I didn't create this mess. */
79336 #define __print_symbol(fmt, addr)
79337 #endif /*CONFIG_KALLSYMS*/
79338+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
79339+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
79340+extern unsigned long kallsyms_lookup_name(const char *name);
79341+extern void __print_symbol(const char *fmt, unsigned long address);
79342+extern int sprint_backtrace(char *buffer, unsigned long address);
79343+extern int sprint_symbol(char *buffer, unsigned long address);
79344+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
79345+const char *kallsyms_lookup(unsigned long addr,
79346+ unsigned long *symbolsize,
79347+ unsigned long *offset,
79348+ char **modname, char *namebuf);
79349+extern int kallsyms_lookup_size_offset(unsigned long addr,
79350+ unsigned long *symbolsize,
79351+ unsigned long *offset);
79352+#endif
79353
79354 /* This macro allows us to keep printk typechecking */
79355 static __printf(1, 2)
79356diff --git a/include/linux/key-type.h b/include/linux/key-type.h
79357index a74c3a8..28d3f21 100644
79358--- a/include/linux/key-type.h
79359+++ b/include/linux/key-type.h
79360@@ -131,7 +131,7 @@ struct key_type {
79361 /* internal fields */
79362 struct list_head link; /* link in types list */
79363 struct lock_class_key lock_class; /* key->sem lock class */
79364-};
79365+} __do_const;
79366
79367 extern struct key_type key_type_keyring;
79368
79369diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
79370index dfb4f2f..7927e62 100644
79371--- a/include/linux/kgdb.h
79372+++ b/include/linux/kgdb.h
79373@@ -52,7 +52,7 @@ extern int kgdb_connected;
79374 extern int kgdb_io_module_registered;
79375
79376 extern atomic_t kgdb_setting_breakpoint;
79377-extern atomic_t kgdb_cpu_doing_single_step;
79378+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
79379
79380 extern struct task_struct *kgdb_usethread;
79381 extern struct task_struct *kgdb_contthread;
79382@@ -254,7 +254,7 @@ struct kgdb_arch {
79383 void (*correct_hw_break)(void);
79384
79385 void (*enable_nmi)(bool on);
79386-};
79387+} __do_const;
79388
79389 /**
79390 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
79391@@ -279,7 +279,7 @@ struct kgdb_io {
79392 void (*pre_exception) (void);
79393 void (*post_exception) (void);
79394 int is_console;
79395-};
79396+} __do_const;
79397
79398 extern struct kgdb_arch arch_kgdb_ops;
79399
79400diff --git a/include/linux/kmod.h b/include/linux/kmod.h
79401index 0555cc6..40116ce 100644
79402--- a/include/linux/kmod.h
79403+++ b/include/linux/kmod.h
79404@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
79405 * usually useless though. */
79406 extern __printf(2, 3)
79407 int __request_module(bool wait, const char *name, ...);
79408+extern __printf(3, 4)
79409+int ___request_module(bool wait, char *param_name, const char *name, ...);
79410 #define request_module(mod...) __request_module(true, mod)
79411 #define request_module_nowait(mod...) __request_module(false, mod)
79412 #define try_then_request_module(x, mod...) \
79413@@ -57,6 +59,9 @@ struct subprocess_info {
79414 struct work_struct work;
79415 struct completion *complete;
79416 char *path;
79417+#ifdef CONFIG_GRKERNSEC
79418+ char *origpath;
79419+#endif
79420 char **argv;
79421 char **envp;
79422 int wait;
79423diff --git a/include/linux/kobject.h b/include/linux/kobject.h
79424index e7ba650..0af3acb 100644
79425--- a/include/linux/kobject.h
79426+++ b/include/linux/kobject.h
79427@@ -116,7 +116,7 @@ struct kobj_type {
79428 struct attribute **default_attrs;
79429 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
79430 const void *(*namespace)(struct kobject *kobj);
79431-};
79432+} __do_const;
79433
79434 struct kobj_uevent_env {
79435 char *envp[UEVENT_NUM_ENVP];
79436@@ -139,6 +139,7 @@ struct kobj_attribute {
79437 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
79438 const char *buf, size_t count);
79439 };
79440+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
79441
79442 extern const struct sysfs_ops kobj_sysfs_ops;
79443
79444@@ -166,7 +167,7 @@ struct kset {
79445 spinlock_t list_lock;
79446 struct kobject kobj;
79447 const struct kset_uevent_ops *uevent_ops;
79448-};
79449+} __randomize_layout;
79450
79451 extern void kset_init(struct kset *kset);
79452 extern int __must_check kset_register(struct kset *kset);
79453diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
79454index df32d25..fb52e27 100644
79455--- a/include/linux/kobject_ns.h
79456+++ b/include/linux/kobject_ns.h
79457@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
79458 const void *(*netlink_ns)(struct sock *sk);
79459 const void *(*initial_ns)(void);
79460 void (*drop_ns)(void *);
79461-};
79462+} __do_const;
79463
79464 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
79465 int kobj_ns_type_registered(enum kobj_ns_type type);
79466diff --git a/include/linux/kref.h b/include/linux/kref.h
79467index 484604d..0f6c5b6 100644
79468--- a/include/linux/kref.h
79469+++ b/include/linux/kref.h
79470@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
79471 static inline int kref_sub(struct kref *kref, unsigned int count,
79472 void (*release)(struct kref *kref))
79473 {
79474- WARN_ON(release == NULL);
79475+ BUG_ON(release == NULL);
79476
79477 if (atomic_sub_and_test((int) count, &kref->refcount)) {
79478 release(kref);
79479diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
79480index 9523d2a..16c0424 100644
79481--- a/include/linux/kvm_host.h
79482+++ b/include/linux/kvm_host.h
79483@@ -457,7 +457,7 @@ static inline void kvm_irqfd_exit(void)
79484 {
79485 }
79486 #endif
79487-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79488+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
79489 struct module *module);
79490 void kvm_exit(void);
79491
79492@@ -632,7 +632,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
79493 struct kvm_guest_debug *dbg);
79494 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
79495
79496-int kvm_arch_init(void *opaque);
79497+int kvm_arch_init(const void *opaque);
79498 void kvm_arch_exit(void);
79499
79500 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
79501diff --git a/include/linux/libata.h b/include/linux/libata.h
79502index bec6dbe..2873d64 100644
79503--- a/include/linux/libata.h
79504+++ b/include/linux/libata.h
79505@@ -975,7 +975,7 @@ struct ata_port_operations {
79506 * fields must be pointers.
79507 */
79508 const struct ata_port_operations *inherits;
79509-};
79510+} __do_const;
79511
79512 struct ata_port_info {
79513 unsigned long flags;
79514diff --git a/include/linux/linkage.h b/include/linux/linkage.h
79515index d3e8ad2..a949f68 100644
79516--- a/include/linux/linkage.h
79517+++ b/include/linux/linkage.h
79518@@ -31,6 +31,7 @@
79519 #endif
79520
79521 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
79522+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
79523 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
79524
79525 /*
79526diff --git a/include/linux/list.h b/include/linux/list.h
79527index ef95941..82db65a 100644
79528--- a/include/linux/list.h
79529+++ b/include/linux/list.h
79530@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
79531 extern void list_del(struct list_head *entry);
79532 #endif
79533
79534+extern void __pax_list_add(struct list_head *new,
79535+ struct list_head *prev,
79536+ struct list_head *next);
79537+static inline void pax_list_add(struct list_head *new, struct list_head *head)
79538+{
79539+ __pax_list_add(new, head, head->next);
79540+}
79541+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
79542+{
79543+ __pax_list_add(new, head->prev, head);
79544+}
79545+extern void pax_list_del(struct list_head *entry);
79546+
79547 /**
79548 * list_replace - replace old entry by new one
79549 * @old : the element to be replaced
79550@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
79551 INIT_LIST_HEAD(entry);
79552 }
79553
79554+extern void pax_list_del_init(struct list_head *entry);
79555+
79556 /**
79557 * list_move - delete from one list and add as another's head
79558 * @list: the entry to move
79559diff --git a/include/linux/math64.h b/include/linux/math64.h
79560index c45c089..298841c 100644
79561--- a/include/linux/math64.h
79562+++ b/include/linux/math64.h
79563@@ -15,7 +15,7 @@
79564 * This is commonly provided by 32bit archs to provide an optimized 64bit
79565 * divide.
79566 */
79567-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
79568+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
79569 {
79570 *remainder = dividend % divisor;
79571 return dividend / divisor;
79572@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
79573 /**
79574 * div64_u64 - unsigned 64bit divide with 64bit divisor
79575 */
79576-static inline u64 div64_u64(u64 dividend, u64 divisor)
79577+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
79578 {
79579 return dividend / divisor;
79580 }
79581@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
79582 #define div64_ul(x, y) div_u64((x), (y))
79583
79584 #ifndef div_u64_rem
79585-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
79586+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
79587 {
79588 *remainder = do_div(dividend, divisor);
79589 return dividend;
79590@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
79591 #endif
79592
79593 #ifndef div64_u64
79594-extern u64 div64_u64(u64 dividend, u64 divisor);
79595+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
79596 #endif
79597
79598 #ifndef div64_s64
79599@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
79600 * divide.
79601 */
79602 #ifndef div_u64
79603-static inline u64 div_u64(u64 dividend, u32 divisor)
79604+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
79605 {
79606 u32 remainder;
79607 return div_u64_rem(dividend, divisor, &remainder);
79608diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
79609index 9fe426b..8148be6 100644
79610--- a/include/linux/mempolicy.h
79611+++ b/include/linux/mempolicy.h
79612@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
79613 }
79614
79615 #define vma_policy(vma) ((vma)->vm_policy)
79616+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
79617+{
79618+ vma->vm_policy = pol;
79619+}
79620
79621 static inline void mpol_get(struct mempolicy *pol)
79622 {
79623@@ -241,6 +245,9 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
79624 }
79625
79626 #define vma_policy(vma) NULL
79627+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
79628+{
79629+}
79630
79631 static inline int
79632 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
79633diff --git a/include/linux/mm.h b/include/linux/mm.h
79634index 0ab5439..2859c61 100644
79635--- a/include/linux/mm.h
79636+++ b/include/linux/mm.h
79637@@ -117,6 +117,11 @@ extern unsigned int kobjsize(const void *objp);
79638 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
79639 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
79640 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
79641+
79642+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
79643+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
79644+#endif
79645+
79646 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
79647
79648 #ifdef CONFIG_MEM_SOFT_DIRTY
79649@@ -219,8 +224,8 @@ struct vm_operations_struct {
79650 /* called by access_process_vm when get_user_pages() fails, typically
79651 * for use by special VMAs that can switch between memory and hardware
79652 */
79653- int (*access)(struct vm_area_struct *vma, unsigned long addr,
79654- void *buf, int len, int write);
79655+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
79656+ void *buf, size_t len, int write);
79657 #ifdef CONFIG_NUMA
79658 /*
79659 * set_policy() op must add a reference to any non-NULL @new mempolicy
79660@@ -250,6 +255,7 @@ struct vm_operations_struct {
79661 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
79662 unsigned long size, pgoff_t pgoff);
79663 };
79664+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
79665
79666 struct mmu_gather;
79667 struct inode;
79668@@ -1064,8 +1070,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
79669 unsigned long *pfn);
79670 int follow_phys(struct vm_area_struct *vma, unsigned long address,
79671 unsigned int flags, unsigned long *prot, resource_size_t *phys);
79672-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
79673- void *buf, int len, int write);
79674+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
79675+ void *buf, size_t len, int write);
79676
79677 static inline void unmap_shared_mapping_range(struct address_space *mapping,
79678 loff_t const holebegin, loff_t const holelen)
79679@@ -1104,9 +1110,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
79680 }
79681 #endif
79682
79683-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
79684-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
79685- void *buf, int len, int write);
79686+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
79687+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
79688+ void *buf, size_t len, int write);
79689
79690 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
79691 unsigned long start, unsigned long nr_pages,
79692@@ -1138,34 +1144,6 @@ int set_page_dirty(struct page *page);
79693 int set_page_dirty_lock(struct page *page);
79694 int clear_page_dirty_for_io(struct page *page);
79695
79696-/* Is the vma a continuation of the stack vma above it? */
79697-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
79698-{
79699- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
79700-}
79701-
79702-static inline int stack_guard_page_start(struct vm_area_struct *vma,
79703- unsigned long addr)
79704-{
79705- return (vma->vm_flags & VM_GROWSDOWN) &&
79706- (vma->vm_start == addr) &&
79707- !vma_growsdown(vma->vm_prev, addr);
79708-}
79709-
79710-/* Is the vma a continuation of the stack vma below it? */
79711-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
79712-{
79713- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
79714-}
79715-
79716-static inline int stack_guard_page_end(struct vm_area_struct *vma,
79717- unsigned long addr)
79718-{
79719- return (vma->vm_flags & VM_GROWSUP) &&
79720- (vma->vm_end == addr) &&
79721- !vma_growsup(vma->vm_next, addr);
79722-}
79723-
79724 extern pid_t
79725 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
79726
79727@@ -1265,6 +1243,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
79728 }
79729 #endif
79730
79731+#ifdef CONFIG_MMU
79732+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
79733+#else
79734+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
79735+{
79736+ return __pgprot(0);
79737+}
79738+#endif
79739+
79740 int vma_wants_writenotify(struct vm_area_struct *vma);
79741
79742 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
79743@@ -1283,8 +1270,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
79744 {
79745 return 0;
79746 }
79747+
79748+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
79749+ unsigned long address)
79750+{
79751+ return 0;
79752+}
79753 #else
79754 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
79755+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
79756 #endif
79757
79758 #ifdef __PAGETABLE_PMD_FOLDED
79759@@ -1293,8 +1287,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
79760 {
79761 return 0;
79762 }
79763+
79764+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
79765+ unsigned long address)
79766+{
79767+ return 0;
79768+}
79769 #else
79770 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
79771+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
79772 #endif
79773
79774 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
79775@@ -1312,11 +1313,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
79776 NULL: pud_offset(pgd, address);
79777 }
79778
79779+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
79780+{
79781+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
79782+ NULL: pud_offset(pgd, address);
79783+}
79784+
79785 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
79786 {
79787 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
79788 NULL: pmd_offset(pud, address);
79789 }
79790+
79791+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
79792+{
79793+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
79794+ NULL: pmd_offset(pud, address);
79795+}
79796 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
79797
79798 #if USE_SPLIT_PTE_PTLOCKS
79799@@ -1694,7 +1707,7 @@ extern int install_special_mapping(struct mm_struct *mm,
79800 unsigned long addr, unsigned long len,
79801 unsigned long flags, struct page **pages);
79802
79803-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
79804+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
79805
79806 extern unsigned long mmap_region(struct file *file, unsigned long addr,
79807 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
79808@@ -1702,6 +1715,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
79809 unsigned long len, unsigned long prot, unsigned long flags,
79810 unsigned long pgoff, unsigned long *populate);
79811 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
79812+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
79813
79814 #ifdef CONFIG_MMU
79815 extern int __mm_populate(unsigned long addr, unsigned long len,
79816@@ -1730,10 +1744,11 @@ struct vm_unmapped_area_info {
79817 unsigned long high_limit;
79818 unsigned long align_mask;
79819 unsigned long align_offset;
79820+ unsigned long threadstack_offset;
79821 };
79822
79823-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
79824-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
79825+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
79826+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
79827
79828 /*
79829 * Search for an unmapped address range.
79830@@ -1745,7 +1760,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
79831 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
79832 */
79833 static inline unsigned long
79834-vm_unmapped_area(struct vm_unmapped_area_info *info)
79835+vm_unmapped_area(const struct vm_unmapped_area_info *info)
79836 {
79837 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
79838 return unmapped_area(info);
79839@@ -1808,6 +1823,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
79840 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
79841 struct vm_area_struct **pprev);
79842
79843+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
79844+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
79845+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
79846+
79847 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
79848 NULL if none. Assume start_addr < end_addr. */
79849 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
79850@@ -1836,15 +1855,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
79851 return vma;
79852 }
79853
79854-#ifdef CONFIG_MMU
79855-pgprot_t vm_get_page_prot(unsigned long vm_flags);
79856-#else
79857-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
79858-{
79859- return __pgprot(0);
79860-}
79861-#endif
79862-
79863 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
79864 unsigned long change_prot_numa(struct vm_area_struct *vma,
79865 unsigned long start, unsigned long end);
79866@@ -1896,6 +1906,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
79867 static inline void vm_stat_account(struct mm_struct *mm,
79868 unsigned long flags, struct file *file, long pages)
79869 {
79870+
79871+#ifdef CONFIG_PAX_RANDMMAP
79872+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
79873+#endif
79874+
79875 mm->total_vm += pages;
79876 }
79877 #endif /* CONFIG_PROC_FS */
79878@@ -1977,7 +1992,7 @@ extern int unpoison_memory(unsigned long pfn);
79879 extern int sysctl_memory_failure_early_kill;
79880 extern int sysctl_memory_failure_recovery;
79881 extern void shake_page(struct page *p, int access);
79882-extern atomic_long_t num_poisoned_pages;
79883+extern atomic_long_unchecked_t num_poisoned_pages;
79884 extern int soft_offline_page(struct page *page, int flags);
79885
79886 extern void dump_page(struct page *page);
79887@@ -2014,5 +2029,11 @@ void __init setup_nr_node_ids(void);
79888 static inline void setup_nr_node_ids(void) {}
79889 #endif
79890
79891+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79892+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
79893+#else
79894+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
79895+#endif
79896+
79897 #endif /* __KERNEL__ */
79898 #endif /* _LINUX_MM_H */
79899diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
79900index 290901a..e99b01c 100644
79901--- a/include/linux/mm_types.h
79902+++ b/include/linux/mm_types.h
79903@@ -307,7 +307,9 @@ struct vm_area_struct {
79904 #ifdef CONFIG_NUMA
79905 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
79906 #endif
79907-};
79908+
79909+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
79910+} __randomize_layout;
79911
79912 struct core_thread {
79913 struct task_struct *task;
79914@@ -453,7 +455,25 @@ struct mm_struct {
79915 bool tlb_flush_pending;
79916 #endif
79917 struct uprobes_state uprobes_state;
79918-};
79919+
79920+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
79921+ unsigned long pax_flags;
79922+#endif
79923+
79924+#ifdef CONFIG_PAX_DLRESOLVE
79925+ unsigned long call_dl_resolve;
79926+#endif
79927+
79928+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
79929+ unsigned long call_syscall;
79930+#endif
79931+
79932+#ifdef CONFIG_PAX_ASLR
79933+ unsigned long delta_mmap; /* randomized offset */
79934+ unsigned long delta_stack; /* randomized offset */
79935+#endif
79936+
79937+} __randomize_layout;
79938
79939 static inline void mm_init_cpumask(struct mm_struct *mm)
79940 {
79941diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
79942index c5d5278..f0b68c8 100644
79943--- a/include/linux/mmiotrace.h
79944+++ b/include/linux/mmiotrace.h
79945@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
79946 /* Called from ioremap.c */
79947 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
79948 void __iomem *addr);
79949-extern void mmiotrace_iounmap(volatile void __iomem *addr);
79950+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
79951
79952 /* For anyone to insert markers. Remember trailing newline. */
79953 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
79954@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
79955 {
79956 }
79957
79958-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
79959+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
79960 {
79961 }
79962
79963diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
79964index bd791e4..8617c34f 100644
79965--- a/include/linux/mmzone.h
79966+++ b/include/linux/mmzone.h
79967@@ -396,7 +396,7 @@ struct zone {
79968 unsigned long flags; /* zone flags, see below */
79969
79970 /* Zone statistics */
79971- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79972+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79973
79974 /*
79975 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
79976diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
79977index 45e9214..a7227d6 100644
79978--- a/include/linux/mod_devicetable.h
79979+++ b/include/linux/mod_devicetable.h
79980@@ -13,7 +13,7 @@
79981 typedef unsigned long kernel_ulong_t;
79982 #endif
79983
79984-#define PCI_ANY_ID (~0)
79985+#define PCI_ANY_ID ((__u16)~0)
79986
79987 struct pci_device_id {
79988 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
79989@@ -139,7 +139,7 @@ struct usb_device_id {
79990 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
79991 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
79992
79993-#define HID_ANY_ID (~0)
79994+#define HID_ANY_ID (~0U)
79995 #define HID_BUS_ANY 0xffff
79996 #define HID_GROUP_ANY 0x0000
79997
79998@@ -467,7 +467,7 @@ struct dmi_system_id {
79999 const char *ident;
80000 struct dmi_strmatch matches[4];
80001 void *driver_data;
80002-};
80003+} __do_const;
80004 /*
80005 * struct dmi_device_id appears during expansion of
80006 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
80007diff --git a/include/linux/module.h b/include/linux/module.h
80008index 15cd6b1..f6e2e6a 100644
80009--- a/include/linux/module.h
80010+++ b/include/linux/module.h
80011@@ -17,9 +17,11 @@
80012 #include <linux/moduleparam.h>
80013 #include <linux/tracepoint.h>
80014 #include <linux/export.h>
80015+#include <linux/fs.h>
80016
80017 #include <linux/percpu.h>
80018 #include <asm/module.h>
80019+#include <asm/pgtable.h>
80020
80021 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
80022 #define MODULE_SIG_STRING "~Module signature appended~\n"
80023@@ -43,7 +45,7 @@ struct module_kobject {
80024 struct kobject *drivers_dir;
80025 struct module_param_attrs *mp;
80026 struct completion *kobj_completion;
80027-};
80028+} __randomize_layout;
80029
80030 struct module_attribute {
80031 struct attribute attr;
80032@@ -55,12 +57,13 @@ struct module_attribute {
80033 int (*test)(struct module *);
80034 void (*free)(struct module *);
80035 };
80036+typedef struct module_attribute __no_const module_attribute_no_const;
80037
80038 struct module_version_attribute {
80039 struct module_attribute mattr;
80040 const char *module_name;
80041 const char *version;
80042-} __attribute__ ((__aligned__(sizeof(void *))));
80043+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
80044
80045 extern ssize_t __modver_version_show(struct module_attribute *,
80046 struct module_kobject *, char *);
80047@@ -238,7 +241,7 @@ struct module
80048
80049 /* Sysfs stuff. */
80050 struct module_kobject mkobj;
80051- struct module_attribute *modinfo_attrs;
80052+ module_attribute_no_const *modinfo_attrs;
80053 const char *version;
80054 const char *srcversion;
80055 struct kobject *holders_dir;
80056@@ -287,19 +290,16 @@ struct module
80057 int (*init)(void);
80058
80059 /* If this is non-NULL, vfree after init() returns */
80060- void *module_init;
80061+ void *module_init_rx, *module_init_rw;
80062
80063 /* Here is the actual code + data, vfree'd on unload. */
80064- void *module_core;
80065+ void *module_core_rx, *module_core_rw;
80066
80067 /* Here are the sizes of the init and core sections */
80068- unsigned int init_size, core_size;
80069+ unsigned int init_size_rw, core_size_rw;
80070
80071 /* The size of the executable code in each section. */
80072- unsigned int init_text_size, core_text_size;
80073-
80074- /* Size of RO sections of the module (text+rodata) */
80075- unsigned int init_ro_size, core_ro_size;
80076+ unsigned int init_size_rx, core_size_rx;
80077
80078 /* Arch-specific module values */
80079 struct mod_arch_specific arch;
80080@@ -355,6 +355,10 @@ struct module
80081 #ifdef CONFIG_EVENT_TRACING
80082 struct ftrace_event_call **trace_events;
80083 unsigned int num_trace_events;
80084+ struct file_operations trace_id;
80085+ struct file_operations trace_enable;
80086+ struct file_operations trace_format;
80087+ struct file_operations trace_filter;
80088 #endif
80089 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
80090 unsigned int num_ftrace_callsites;
80091@@ -378,7 +382,7 @@ struct module
80092 ctor_fn_t *ctors;
80093 unsigned int num_ctors;
80094 #endif
80095-};
80096+} __randomize_layout;
80097 #ifndef MODULE_ARCH_INIT
80098 #define MODULE_ARCH_INIT {}
80099 #endif
80100@@ -399,16 +403,46 @@ bool is_module_address(unsigned long addr);
80101 bool is_module_percpu_address(unsigned long addr);
80102 bool is_module_text_address(unsigned long addr);
80103
80104+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
80105+{
80106+
80107+#ifdef CONFIG_PAX_KERNEXEC
80108+ if (ktla_ktva(addr) >= (unsigned long)start &&
80109+ ktla_ktva(addr) < (unsigned long)start + size)
80110+ return 1;
80111+#endif
80112+
80113+ return ((void *)addr >= start && (void *)addr < start + size);
80114+}
80115+
80116+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
80117+{
80118+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
80119+}
80120+
80121+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
80122+{
80123+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
80124+}
80125+
80126+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
80127+{
80128+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
80129+}
80130+
80131+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
80132+{
80133+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
80134+}
80135+
80136 static inline int within_module_core(unsigned long addr, const struct module *mod)
80137 {
80138- return (unsigned long)mod->module_core <= addr &&
80139- addr < (unsigned long)mod->module_core + mod->core_size;
80140+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
80141 }
80142
80143 static inline int within_module_init(unsigned long addr, const struct module *mod)
80144 {
80145- return (unsigned long)mod->module_init <= addr &&
80146- addr < (unsigned long)mod->module_init + mod->init_size;
80147+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
80148 }
80149
80150 /* Search for module by name: must hold module_mutex. */
80151diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
80152index 560ca53..ef621ef 100644
80153--- a/include/linux/moduleloader.h
80154+++ b/include/linux/moduleloader.h
80155@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
80156 sections. Returns NULL on failure. */
80157 void *module_alloc(unsigned long size);
80158
80159+#ifdef CONFIG_PAX_KERNEXEC
80160+void *module_alloc_exec(unsigned long size);
80161+#else
80162+#define module_alloc_exec(x) module_alloc(x)
80163+#endif
80164+
80165 /* Free memory returned from module_alloc. */
80166 void module_free(struct module *mod, void *module_region);
80167
80168+#ifdef CONFIG_PAX_KERNEXEC
80169+void module_free_exec(struct module *mod, void *module_region);
80170+#else
80171+#define module_free_exec(x, y) module_free((x), (y))
80172+#endif
80173+
80174 /*
80175 * Apply the given relocation to the (simplified) ELF. Return -error
80176 * or 0.
80177@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
80178 unsigned int relsec,
80179 struct module *me)
80180 {
80181+#ifdef CONFIG_MODULES
80182 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
80183+#endif
80184 return -ENOEXEC;
80185 }
80186 #endif
80187@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
80188 unsigned int relsec,
80189 struct module *me)
80190 {
80191+#ifdef CONFIG_MODULES
80192 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
80193+#endif
80194 return -ENOEXEC;
80195 }
80196 #endif
80197diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
80198index c3eb102..073c4a6 100644
80199--- a/include/linux/moduleparam.h
80200+++ b/include/linux/moduleparam.h
80201@@ -295,7 +295,7 @@ static inline void __kernel_param_unlock(void)
80202 * @len is usually just sizeof(string).
80203 */
80204 #define module_param_string(name, string, len, perm) \
80205- static const struct kparam_string __param_string_##name \
80206+ static const struct kparam_string __param_string_##name __used \
80207 = { len, string }; \
80208 __module_param_call(MODULE_PARAM_PREFIX, name, \
80209 &param_ops_string, \
80210@@ -434,7 +434,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
80211 */
80212 #define module_param_array_named(name, array, type, nump, perm) \
80213 param_check_##type(name, &(array)[0]); \
80214- static const struct kparam_array __param_arr_##name \
80215+ static const struct kparam_array __param_arr_##name __used \
80216 = { .max = ARRAY_SIZE(array), .num = nump, \
80217 .ops = &param_ops_##type, \
80218 .elemsize = sizeof(array[0]), .elem = array }; \
80219diff --git a/include/linux/mount.h b/include/linux/mount.h
80220index 371d346..fba2819 100644
80221--- a/include/linux/mount.h
80222+++ b/include/linux/mount.h
80223@@ -56,7 +56,7 @@ struct vfsmount {
80224 struct dentry *mnt_root; /* root of the mounted tree */
80225 struct super_block *mnt_sb; /* pointer to superblock */
80226 int mnt_flags;
80227-};
80228+} __randomize_layout;
80229
80230 struct file; /* forward dec */
80231
80232diff --git a/include/linux/namei.h b/include/linux/namei.h
80233index 492de72..1bddcd4 100644
80234--- a/include/linux/namei.h
80235+++ b/include/linux/namei.h
80236@@ -19,7 +19,7 @@ struct nameidata {
80237 unsigned seq, m_seq;
80238 int last_type;
80239 unsigned depth;
80240- char *saved_names[MAX_NESTED_LINKS + 1];
80241+ const char *saved_names[MAX_NESTED_LINKS + 1];
80242 };
80243
80244 /*
80245@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
80246
80247 extern void nd_jump_link(struct nameidata *nd, struct path *path);
80248
80249-static inline void nd_set_link(struct nameidata *nd, char *path)
80250+static inline void nd_set_link(struct nameidata *nd, const char *path)
80251 {
80252 nd->saved_names[nd->depth] = path;
80253 }
80254
80255-static inline char *nd_get_link(struct nameidata *nd)
80256+static inline const char *nd_get_link(const struct nameidata *nd)
80257 {
80258 return nd->saved_names[nd->depth];
80259 }
80260diff --git a/include/linux/net.h b/include/linux/net.h
80261index 69be3e6..0fb422d 100644
80262--- a/include/linux/net.h
80263+++ b/include/linux/net.h
80264@@ -192,7 +192,7 @@ struct net_proto_family {
80265 int (*create)(struct net *net, struct socket *sock,
80266 int protocol, int kern);
80267 struct module *owner;
80268-};
80269+} __do_const;
80270
80271 struct iovec;
80272 struct kvec;
80273diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
80274index 2177a6b..67fc561 100644
80275--- a/include/linux/netdevice.h
80276+++ b/include/linux/netdevice.h
80277@@ -1129,6 +1129,7 @@ struct net_device_ops {
80278 struct net_device *dev,
80279 void *priv);
80280 };
80281+typedef struct net_device_ops __no_const net_device_ops_no_const;
80282
80283 /*
80284 * The DEVICE structure.
80285@@ -1211,7 +1212,7 @@ struct net_device {
80286 int iflink;
80287
80288 struct net_device_stats stats;
80289- atomic_long_t rx_dropped; /* dropped packets by core network
80290+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
80291 * Do not use this in drivers.
80292 */
80293
80294diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
80295index 2077489..a15e561 100644
80296--- a/include/linux/netfilter.h
80297+++ b/include/linux/netfilter.h
80298@@ -84,7 +84,7 @@ struct nf_sockopt_ops {
80299 #endif
80300 /* Use the module struct to lock set/get code in place */
80301 struct module *owner;
80302-};
80303+} __do_const;
80304
80305 /* Function to register/unregister hook points. */
80306 int nf_register_hook(struct nf_hook_ops *reg);
80307diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
80308index 28c7436..2d6156a 100644
80309--- a/include/linux/netfilter/nfnetlink.h
80310+++ b/include/linux/netfilter/nfnetlink.h
80311@@ -19,7 +19,7 @@ struct nfnl_callback {
80312 const struct nlattr * const cda[]);
80313 const struct nla_policy *policy; /* netlink attribute policy */
80314 const u_int16_t attr_count; /* number of nlattr's */
80315-};
80316+} __do_const;
80317
80318 struct nfnetlink_subsystem {
80319 const char *name;
80320diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
80321new file mode 100644
80322index 0000000..33f4af8
80323--- /dev/null
80324+++ b/include/linux/netfilter/xt_gradm.h
80325@@ -0,0 +1,9 @@
80326+#ifndef _LINUX_NETFILTER_XT_GRADM_H
80327+#define _LINUX_NETFILTER_XT_GRADM_H 1
80328+
80329+struct xt_gradm_mtinfo {
80330+ __u16 flags;
80331+ __u16 invflags;
80332+};
80333+
80334+#endif
80335diff --git a/include/linux/nls.h b/include/linux/nls.h
80336index 5dc635f..35f5e11 100644
80337--- a/include/linux/nls.h
80338+++ b/include/linux/nls.h
80339@@ -31,7 +31,7 @@ struct nls_table {
80340 const unsigned char *charset2upper;
80341 struct module *owner;
80342 struct nls_table *next;
80343-};
80344+} __do_const;
80345
80346 /* this value hold the maximum octet of charset */
80347 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
80348diff --git a/include/linux/notifier.h b/include/linux/notifier.h
80349index d14a4c3..a078786 100644
80350--- a/include/linux/notifier.h
80351+++ b/include/linux/notifier.h
80352@@ -54,7 +54,8 @@ struct notifier_block {
80353 notifier_fn_t notifier_call;
80354 struct notifier_block __rcu *next;
80355 int priority;
80356-};
80357+} __do_const;
80358+typedef struct notifier_block __no_const notifier_block_no_const;
80359
80360 struct atomic_notifier_head {
80361 spinlock_t lock;
80362diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
80363index b2a0f15..4d7da32 100644
80364--- a/include/linux/oprofile.h
80365+++ b/include/linux/oprofile.h
80366@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
80367 int oprofilefs_create_ro_ulong(struct dentry * root,
80368 char const * name, ulong * val);
80369
80370-/** Create a file for read-only access to an atomic_t. */
80371+/** Create a file for read-only access to an atomic_unchecked_t. */
80372 int oprofilefs_create_ro_atomic(struct dentry * root,
80373- char const * name, atomic_t * val);
80374+ char const * name, atomic_unchecked_t * val);
80375
80376 /** create a directory */
80377 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
80378diff --git a/include/linux/padata.h b/include/linux/padata.h
80379index 4386946..f50c615 100644
80380--- a/include/linux/padata.h
80381+++ b/include/linux/padata.h
80382@@ -129,7 +129,7 @@ struct parallel_data {
80383 struct padata_serial_queue __percpu *squeue;
80384 atomic_t reorder_objects;
80385 atomic_t refcnt;
80386- atomic_t seq_nr;
80387+ atomic_unchecked_t seq_nr;
80388 struct padata_cpumask cpumask;
80389 spinlock_t lock ____cacheline_aligned;
80390 unsigned int processed;
80391diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
80392index a2e2f1d..8a391d2 100644
80393--- a/include/linux/pci_hotplug.h
80394+++ b/include/linux/pci_hotplug.h
80395@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
80396 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
80397 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
80398 int (*reset_slot) (struct hotplug_slot *slot, int probe);
80399-};
80400+} __do_const;
80401+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
80402
80403 /**
80404 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
80405diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
80406index 2e069d1..27054b8 100644
80407--- a/include/linux/perf_event.h
80408+++ b/include/linux/perf_event.h
80409@@ -327,8 +327,8 @@ struct perf_event {
80410
80411 enum perf_event_active_state state;
80412 unsigned int attach_state;
80413- local64_t count;
80414- atomic64_t child_count;
80415+ local64_t count; /* PaX: fix it one day */
80416+ atomic64_unchecked_t child_count;
80417
80418 /*
80419 * These are the total time in nanoseconds that the event
80420@@ -379,8 +379,8 @@ struct perf_event {
80421 * These accumulate total time (in nanoseconds) that children
80422 * events have been enabled and running, respectively.
80423 */
80424- atomic64_t child_total_time_enabled;
80425- atomic64_t child_total_time_running;
80426+ atomic64_unchecked_t child_total_time_enabled;
80427+ atomic64_unchecked_t child_total_time_running;
80428
80429 /*
80430 * Protect attach/detach and child_list:
80431@@ -707,7 +707,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
80432 entry->ip[entry->nr++] = ip;
80433 }
80434
80435-extern int sysctl_perf_event_paranoid;
80436+extern int sysctl_perf_event_legitimately_concerned;
80437 extern int sysctl_perf_event_mlock;
80438 extern int sysctl_perf_event_sample_rate;
80439 extern int sysctl_perf_cpu_time_max_percent;
80440@@ -722,19 +722,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
80441 loff_t *ppos);
80442
80443
80444+static inline bool perf_paranoid_any(void)
80445+{
80446+ return sysctl_perf_event_legitimately_concerned > 2;
80447+}
80448+
80449 static inline bool perf_paranoid_tracepoint_raw(void)
80450 {
80451- return sysctl_perf_event_paranoid > -1;
80452+ return sysctl_perf_event_legitimately_concerned > -1;
80453 }
80454
80455 static inline bool perf_paranoid_cpu(void)
80456 {
80457- return sysctl_perf_event_paranoid > 0;
80458+ return sysctl_perf_event_legitimately_concerned > 0;
80459 }
80460
80461 static inline bool perf_paranoid_kernel(void)
80462 {
80463- return sysctl_perf_event_paranoid > 1;
80464+ return sysctl_perf_event_legitimately_concerned > 1;
80465 }
80466
80467 extern void perf_event_init(void);
80468@@ -850,7 +855,7 @@ struct perf_pmu_events_attr {
80469 struct device_attribute attr;
80470 u64 id;
80471 const char *event_str;
80472-};
80473+} __do_const;
80474
80475 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
80476 static struct perf_pmu_events_attr _var = { \
80477diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
80478index 7246ef3..1539ea4 100644
80479--- a/include/linux/pid_namespace.h
80480+++ b/include/linux/pid_namespace.h
80481@@ -43,7 +43,7 @@ struct pid_namespace {
80482 int hide_pid;
80483 int reboot; /* group exit code if this pidns was rebooted */
80484 unsigned int proc_inum;
80485-};
80486+} __randomize_layout;
80487
80488 extern struct pid_namespace init_pid_ns;
80489
80490diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
80491index ab57526..94598804 100644
80492--- a/include/linux/pipe_fs_i.h
80493+++ b/include/linux/pipe_fs_i.h
80494@@ -47,10 +47,10 @@ struct pipe_inode_info {
80495 struct mutex mutex;
80496 wait_queue_head_t wait;
80497 unsigned int nrbufs, curbuf, buffers;
80498- unsigned int readers;
80499- unsigned int writers;
80500- unsigned int files;
80501- unsigned int waiting_writers;
80502+ atomic_t readers;
80503+ atomic_t writers;
80504+ atomic_t files;
80505+ atomic_t waiting_writers;
80506 unsigned int r_counter;
80507 unsigned int w_counter;
80508 struct page *tmp_page;
80509diff --git a/include/linux/pm.h b/include/linux/pm.h
80510index a224c7f..92d8a97 100644
80511--- a/include/linux/pm.h
80512+++ b/include/linux/pm.h
80513@@ -576,6 +576,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
80514 struct dev_pm_domain {
80515 struct dev_pm_ops ops;
80516 };
80517+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
80518
80519 /*
80520 * The PM_EVENT_ messages are also used by drivers implementing the legacy
80521diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
80522index 7c1d252..0e7061d 100644
80523--- a/include/linux/pm_domain.h
80524+++ b/include/linux/pm_domain.h
80525@@ -44,11 +44,11 @@ struct gpd_dev_ops {
80526 int (*thaw_early)(struct device *dev);
80527 int (*thaw)(struct device *dev);
80528 bool (*active_wakeup)(struct device *dev);
80529-};
80530+} __no_const;
80531
80532 struct gpd_cpu_data {
80533 unsigned int saved_exit_latency;
80534- struct cpuidle_state *idle_state;
80535+ cpuidle_state_no_const *idle_state;
80536 };
80537
80538 struct generic_pm_domain {
80539diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
80540index 6fa7cea..7bf6415 100644
80541--- a/include/linux/pm_runtime.h
80542+++ b/include/linux/pm_runtime.h
80543@@ -103,7 +103,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
80544
80545 static inline void pm_runtime_mark_last_busy(struct device *dev)
80546 {
80547- ACCESS_ONCE(dev->power.last_busy) = jiffies;
80548+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
80549 }
80550
80551 #else /* !CONFIG_PM_RUNTIME */
80552diff --git a/include/linux/pnp.h b/include/linux/pnp.h
80553index 195aafc..49a7bc2 100644
80554--- a/include/linux/pnp.h
80555+++ b/include/linux/pnp.h
80556@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
80557 struct pnp_fixup {
80558 char id[7];
80559 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
80560-};
80561+} __do_const;
80562
80563 /* config parameters */
80564 #define PNP_CONFIG_NORMAL 0x0001
80565diff --git a/include/linux/poison.h b/include/linux/poison.h
80566index 2110a81..13a11bb 100644
80567--- a/include/linux/poison.h
80568+++ b/include/linux/poison.h
80569@@ -19,8 +19,8 @@
80570 * under normal circumstances, used to verify that nobody uses
80571 * non-initialized list entries.
80572 */
80573-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
80574-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
80575+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
80576+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
80577
80578 /********** include/linux/timer.h **********/
80579 /*
80580diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
80581index d8b187c3..9a9257a 100644
80582--- a/include/linux/power/smartreflex.h
80583+++ b/include/linux/power/smartreflex.h
80584@@ -238,7 +238,7 @@ struct omap_sr_class_data {
80585 int (*notify)(struct omap_sr *sr, u32 status);
80586 u8 notify_flags;
80587 u8 class_type;
80588-};
80589+} __do_const;
80590
80591 /**
80592 * struct omap_sr_nvalue_table - Smartreflex n-target value info
80593diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
80594index 4ea1d37..80f4b33 100644
80595--- a/include/linux/ppp-comp.h
80596+++ b/include/linux/ppp-comp.h
80597@@ -84,7 +84,7 @@ struct compressor {
80598 struct module *owner;
80599 /* Extra skb space needed by the compressor algorithm */
80600 unsigned int comp_extra;
80601-};
80602+} __do_const;
80603
80604 /*
80605 * The return value from decompress routine is the length of the
80606diff --git a/include/linux/preempt.h b/include/linux/preempt.h
80607index a3d9dc8..8af9922 100644
80608--- a/include/linux/preempt.h
80609+++ b/include/linux/preempt.h
80610@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
80611 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
80612 #endif
80613
80614+#define raw_preempt_count_add(val) __preempt_count_add(val)
80615+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
80616+
80617 #define __preempt_count_inc() __preempt_count_add(1)
80618 #define __preempt_count_dec() __preempt_count_sub(1)
80619
80620 #define preempt_count_inc() preempt_count_add(1)
80621+#define raw_preempt_count_inc() raw_preempt_count_add(1)
80622 #define preempt_count_dec() preempt_count_sub(1)
80623+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
80624
80625 #ifdef CONFIG_PREEMPT_COUNT
80626
80627@@ -41,6 +46,12 @@ do { \
80628 barrier(); \
80629 } while (0)
80630
80631+#define raw_preempt_disable() \
80632+do { \
80633+ raw_preempt_count_inc(); \
80634+ barrier(); \
80635+} while (0)
80636+
80637 #define sched_preempt_enable_no_resched() \
80638 do { \
80639 barrier(); \
80640@@ -49,6 +60,12 @@ do { \
80641
80642 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
80643
80644+#define raw_preempt_enable_no_resched() \
80645+do { \
80646+ barrier(); \
80647+ raw_preempt_count_dec(); \
80648+} while (0)
80649+
80650 #ifdef CONFIG_PREEMPT
80651 #define preempt_enable() \
80652 do { \
80653@@ -105,8 +122,10 @@ do { \
80654 * region.
80655 */
80656 #define preempt_disable() barrier()
80657+#define raw_preempt_disable() barrier()
80658 #define sched_preempt_enable_no_resched() barrier()
80659 #define preempt_enable_no_resched() barrier()
80660+#define raw_preempt_enable_no_resched() barrier()
80661 #define preempt_enable() barrier()
80662 #define preempt_check_resched() do { } while (0)
80663
80664diff --git a/include/linux/printk.h b/include/linux/printk.h
80665index 6949258..7c4730e 100644
80666--- a/include/linux/printk.h
80667+++ b/include/linux/printk.h
80668@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
80669 void early_printk(const char *s, ...) { }
80670 #endif
80671
80672+extern int kptr_restrict;
80673+
80674 #ifdef CONFIG_PRINTK
80675 asmlinkage __printf(5, 0)
80676 int vprintk_emit(int facility, int level,
80677@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
80678
80679 extern int printk_delay_msec;
80680 extern int dmesg_restrict;
80681-extern int kptr_restrict;
80682
80683 extern void wake_up_klogd(void);
80684
80685diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
80686index 608e60a..c26f864 100644
80687--- a/include/linux/proc_fs.h
80688+++ b/include/linux/proc_fs.h
80689@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
80690 return proc_create_data(name, mode, parent, proc_fops, NULL);
80691 }
80692
80693+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
80694+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
80695+{
80696+#ifdef CONFIG_GRKERNSEC_PROC_USER
80697+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
80698+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
80699+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
80700+#else
80701+ return proc_create_data(name, mode, parent, proc_fops, NULL);
80702+#endif
80703+}
80704+
80705+
80706 extern void proc_set_size(struct proc_dir_entry *, loff_t);
80707 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
80708 extern void *PDE_DATA(const struct inode *);
80709diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
80710index 34a1e10..70f6bde 100644
80711--- a/include/linux/proc_ns.h
80712+++ b/include/linux/proc_ns.h
80713@@ -14,7 +14,7 @@ struct proc_ns_operations {
80714 void (*put)(void *ns);
80715 int (*install)(struct nsproxy *nsproxy, void *ns);
80716 unsigned int (*inum)(void *ns);
80717-};
80718+} __do_const __randomize_layout;
80719
80720 struct proc_ns {
80721 void *ns;
80722diff --git a/include/linux/quota.h b/include/linux/quota.h
80723index cc7494a..1e27036 100644
80724--- a/include/linux/quota.h
80725+++ b/include/linux/quota.h
80726@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
80727
80728 extern bool qid_eq(struct kqid left, struct kqid right);
80729 extern bool qid_lt(struct kqid left, struct kqid right);
80730-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
80731+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
80732 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
80733 extern bool qid_valid(struct kqid qid);
80734
80735diff --git a/include/linux/random.h b/include/linux/random.h
80736index 4002b3d..d5ad855 100644
80737--- a/include/linux/random.h
80738+++ b/include/linux/random.h
80739@@ -10,9 +10,19 @@
80740
80741
80742 extern void add_device_randomness(const void *, unsigned int);
80743+
80744+static inline void add_latent_entropy(void)
80745+{
80746+
80747+#ifdef LATENT_ENTROPY_PLUGIN
80748+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
80749+#endif
80750+
80751+}
80752+
80753 extern void add_input_randomness(unsigned int type, unsigned int code,
80754- unsigned int value);
80755-extern void add_interrupt_randomness(int irq, int irq_flags);
80756+ unsigned int value) __latent_entropy;
80757+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
80758
80759 extern void get_random_bytes(void *buf, int nbytes);
80760 extern void get_random_bytes_arch(void *buf, int nbytes);
80761@@ -23,10 +33,10 @@ extern int random_int_secret_init(void);
80762 extern const struct file_operations random_fops, urandom_fops;
80763 #endif
80764
80765-unsigned int get_random_int(void);
80766+unsigned int __intentional_overflow(-1) get_random_int(void);
80767 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
80768
80769-u32 prandom_u32(void);
80770+u32 prandom_u32(void) __intentional_overflow(-1);
80771 void prandom_bytes(void *buf, int nbytes);
80772 void prandom_seed(u32 seed);
80773 void prandom_reseed_late(void);
80774@@ -38,6 +48,11 @@ struct rnd_state {
80775 u32 prandom_u32_state(struct rnd_state *state);
80776 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
80777
80778+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
80779+{
80780+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
80781+}
80782+
80783 /*
80784 * Handle minimum values for seeds
80785 */
80786diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
80787index fea49b5..2ac22bb 100644
80788--- a/include/linux/rbtree_augmented.h
80789+++ b/include/linux/rbtree_augmented.h
80790@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
80791 old->rbaugmented = rbcompute(old); \
80792 } \
80793 rbstatic const struct rb_augment_callbacks rbname = { \
80794- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
80795+ .propagate = rbname ## _propagate, \
80796+ .copy = rbname ## _copy, \
80797+ .rotate = rbname ## _rotate \
80798 };
80799
80800
80801diff --git a/include/linux/rculist.h b/include/linux/rculist.h
80802index 45a0a9e..e83788e 100644
80803--- a/include/linux/rculist.h
80804+++ b/include/linux/rculist.h
80805@@ -29,8 +29,8 @@
80806 */
80807 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
80808 {
80809- ACCESS_ONCE(list->next) = list;
80810- ACCESS_ONCE(list->prev) = list;
80811+ ACCESS_ONCE_RW(list->next) = list;
80812+ ACCESS_ONCE_RW(list->prev) = list;
80813 }
80814
80815 /*
80816@@ -59,6 +59,9 @@ extern void __list_add_rcu(struct list_head *new,
80817 struct list_head *prev, struct list_head *next);
80818 #endif
80819
80820+extern void __pax_list_add_rcu(struct list_head *new,
80821+ struct list_head *prev, struct list_head *next);
80822+
80823 /**
80824 * list_add_rcu - add a new entry to rcu-protected list
80825 * @new: new entry to be added
80826@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
80827 __list_add_rcu(new, head, head->next);
80828 }
80829
80830+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
80831+{
80832+ __pax_list_add_rcu(new, head, head->next);
80833+}
80834+
80835 /**
80836 * list_add_tail_rcu - add a new entry to rcu-protected list
80837 * @new: new entry to be added
80838@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
80839 __list_add_rcu(new, head->prev, head);
80840 }
80841
80842+static inline void pax_list_add_tail_rcu(struct list_head *new,
80843+ struct list_head *head)
80844+{
80845+ __pax_list_add_rcu(new, head->prev, head);
80846+}
80847+
80848 /**
80849 * list_del_rcu - deletes entry from list without re-initialization
80850 * @entry: the element to delete from the list.
80851@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
80852 entry->prev = LIST_POISON2;
80853 }
80854
80855+extern void pax_list_del_rcu(struct list_head *entry);
80856+
80857 /**
80858 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
80859 * @n: the element to delete from the hash list.
80860diff --git a/include/linux/reboot.h b/include/linux/reboot.h
80861index 9e7db9e..7d4fd72 100644
80862--- a/include/linux/reboot.h
80863+++ b/include/linux/reboot.h
80864@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
80865 */
80866
80867 extern void migrate_to_reboot_cpu(void);
80868-extern void machine_restart(char *cmd);
80869-extern void machine_halt(void);
80870-extern void machine_power_off(void);
80871+extern void machine_restart(char *cmd) __noreturn;
80872+extern void machine_halt(void) __noreturn;
80873+extern void machine_power_off(void) __noreturn;
80874
80875 extern void machine_shutdown(void);
80876 struct pt_regs;
80877@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
80878 */
80879
80880 extern void kernel_restart_prepare(char *cmd);
80881-extern void kernel_restart(char *cmd);
80882-extern void kernel_halt(void);
80883-extern void kernel_power_off(void);
80884+extern void kernel_restart(char *cmd) __noreturn;
80885+extern void kernel_halt(void) __noreturn;
80886+extern void kernel_power_off(void) __noreturn;
80887
80888 extern int C_A_D; /* for sysctl */
80889 void ctrl_alt_del(void);
80890@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
80891 * Emergency restart, callable from an interrupt handler.
80892 */
80893
80894-extern void emergency_restart(void);
80895+extern void emergency_restart(void) __noreturn;
80896 #include <asm/emergency-restart.h>
80897
80898 #endif /* _LINUX_REBOOT_H */
80899diff --git a/include/linux/regset.h b/include/linux/regset.h
80900index 8e0c9fe..ac4d221 100644
80901--- a/include/linux/regset.h
80902+++ b/include/linux/regset.h
80903@@ -161,7 +161,8 @@ struct user_regset {
80904 unsigned int align;
80905 unsigned int bias;
80906 unsigned int core_note_type;
80907-};
80908+} __do_const;
80909+typedef struct user_regset __no_const user_regset_no_const;
80910
80911 /**
80912 * struct user_regset_view - available regsets
80913diff --git a/include/linux/relay.h b/include/linux/relay.h
80914index d7c8359..818daf5 100644
80915--- a/include/linux/relay.h
80916+++ b/include/linux/relay.h
80917@@ -157,7 +157,7 @@ struct rchan_callbacks
80918 * The callback should return 0 if successful, negative if not.
80919 */
80920 int (*remove_buf_file)(struct dentry *dentry);
80921-};
80922+} __no_const;
80923
80924 /*
80925 * CONFIG_RELAY kernel API, kernel/relay.c
80926diff --git a/include/linux/rio.h b/include/linux/rio.h
80927index b71d573..2f940bd 100644
80928--- a/include/linux/rio.h
80929+++ b/include/linux/rio.h
80930@@ -355,7 +355,7 @@ struct rio_ops {
80931 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
80932 u64 rstart, u32 size, u32 flags);
80933 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
80934-};
80935+} __no_const;
80936
80937 #define RIO_RESOURCE_MEM 0x00000100
80938 #define RIO_RESOURCE_DOORBELL 0x00000200
80939diff --git a/include/linux/rmap.h b/include/linux/rmap.h
80940index 6dacb93..6174423 100644
80941--- a/include/linux/rmap.h
80942+++ b/include/linux/rmap.h
80943@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
80944 void anon_vma_init(void); /* create anon_vma_cachep */
80945 int anon_vma_prepare(struct vm_area_struct *);
80946 void unlink_anon_vmas(struct vm_area_struct *);
80947-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
80948-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
80949+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
80950+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
80951
80952 static inline void anon_vma_merge(struct vm_area_struct *vma,
80953 struct vm_area_struct *next)
80954diff --git a/include/linux/sched.h b/include/linux/sched.h
80955index 53f97eb..1d90705 100644
80956--- a/include/linux/sched.h
80957+++ b/include/linux/sched.h
80958@@ -63,6 +63,7 @@ struct bio_list;
80959 struct fs_struct;
80960 struct perf_event_context;
80961 struct blk_plug;
80962+struct linux_binprm;
80963
80964 /*
80965 * List of flags we want to share for kernel threads,
80966@@ -304,7 +305,7 @@ extern char __sched_text_start[], __sched_text_end[];
80967 extern int in_sched_functions(unsigned long addr);
80968
80969 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
80970-extern signed long schedule_timeout(signed long timeout);
80971+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
80972 extern signed long schedule_timeout_interruptible(signed long timeout);
80973 extern signed long schedule_timeout_killable(signed long timeout);
80974 extern signed long schedule_timeout_uninterruptible(signed long timeout);
80975@@ -315,6 +316,19 @@ struct nsproxy;
80976 struct user_namespace;
80977
80978 #ifdef CONFIG_MMU
80979+
80980+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
80981+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
80982+#else
80983+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
80984+{
80985+ return 0;
80986+}
80987+#endif
80988+
80989+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
80990+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
80991+
80992 extern void arch_pick_mmap_layout(struct mm_struct *mm);
80993 extern unsigned long
80994 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
80995@@ -600,6 +614,17 @@ struct signal_struct {
80996 #ifdef CONFIG_TASKSTATS
80997 struct taskstats *stats;
80998 #endif
80999+
81000+#ifdef CONFIG_GRKERNSEC
81001+ u32 curr_ip;
81002+ u32 saved_ip;
81003+ u32 gr_saddr;
81004+ u32 gr_daddr;
81005+ u16 gr_sport;
81006+ u16 gr_dport;
81007+ u8 used_accept:1;
81008+#endif
81009+
81010 #ifdef CONFIG_AUDIT
81011 unsigned audit_tty;
81012 unsigned audit_tty_log_passwd;
81013@@ -626,7 +651,7 @@ struct signal_struct {
81014 struct mutex cred_guard_mutex; /* guard against foreign influences on
81015 * credential calculations
81016 * (notably. ptrace) */
81017-};
81018+} __randomize_layout;
81019
81020 /*
81021 * Bits in flags field of signal_struct.
81022@@ -680,6 +705,14 @@ struct user_struct {
81023 struct key *session_keyring; /* UID's default session keyring */
81024 #endif
81025
81026+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
81027+ unsigned char kernel_banned;
81028+#endif
81029+#ifdef CONFIG_GRKERNSEC_BRUTE
81030+ unsigned char suid_banned;
81031+ unsigned long suid_ban_expires;
81032+#endif
81033+
81034 /* Hash table maintenance information */
81035 struct hlist_node uidhash_node;
81036 kuid_t uid;
81037@@ -687,7 +720,7 @@ struct user_struct {
81038 #ifdef CONFIG_PERF_EVENTS
81039 atomic_long_t locked_vm;
81040 #endif
81041-};
81042+} __randomize_layout;
81043
81044 extern int uids_sysfs_init(void);
81045
81046@@ -1162,8 +1195,8 @@ struct task_struct {
81047 struct list_head thread_group;
81048
81049 struct completion *vfork_done; /* for vfork() */
81050- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
81051- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
81052+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
81053+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
81054
81055 cputime_t utime, stime, utimescaled, stimescaled;
81056 cputime_t gtime;
81057@@ -1188,11 +1221,6 @@ struct task_struct {
81058 struct task_cputime cputime_expires;
81059 struct list_head cpu_timers[3];
81060
81061-/* process credentials */
81062- const struct cred __rcu *real_cred; /* objective and real subjective task
81063- * credentials (COW) */
81064- const struct cred __rcu *cred; /* effective (overridable) subjective task
81065- * credentials (COW) */
81066 char comm[TASK_COMM_LEN]; /* executable name excluding path
81067 - access with [gs]et_task_comm (which lock
81068 it with task_lock())
81069@@ -1209,6 +1237,10 @@ struct task_struct {
81070 #endif
81071 /* CPU-specific state of this task */
81072 struct thread_struct thread;
81073+/* thread_info moved to task_struct */
81074+#ifdef CONFIG_X86
81075+ struct thread_info tinfo;
81076+#endif
81077 /* filesystem information */
81078 struct fs_struct *fs;
81079 /* open file information */
81080@@ -1282,6 +1314,10 @@ struct task_struct {
81081 gfp_t lockdep_reclaim_gfp;
81082 #endif
81083
81084+/* process credentials */
81085+ const struct cred __rcu *real_cred; /* objective and real subjective task
81086+ * credentials (COW) */
81087+
81088 /* journalling filesystem info */
81089 void *journal_info;
81090
81091@@ -1320,6 +1356,10 @@ struct task_struct {
81092 /* cg_list protected by css_set_lock and tsk->alloc_lock */
81093 struct list_head cg_list;
81094 #endif
81095+
81096+ const struct cred __rcu *cred; /* effective (overridable) subjective task
81097+ * credentials (COW) */
81098+
81099 #ifdef CONFIG_FUTEX
81100 struct robust_list_head __user *robust_list;
81101 #ifdef CONFIG_COMPAT
81102@@ -1454,7 +1494,78 @@ struct task_struct {
81103 unsigned int sequential_io;
81104 unsigned int sequential_io_avg;
81105 #endif
81106-};
81107+
81108+#ifdef CONFIG_GRKERNSEC
81109+ /* grsecurity */
81110+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
81111+ u64 exec_id;
81112+#endif
81113+#ifdef CONFIG_GRKERNSEC_SETXID
81114+ const struct cred *delayed_cred;
81115+#endif
81116+ struct dentry *gr_chroot_dentry;
81117+ struct acl_subject_label *acl;
81118+ struct acl_subject_label *tmpacl;
81119+ struct acl_role_label *role;
81120+ struct file *exec_file;
81121+ unsigned long brute_expires;
81122+ u16 acl_role_id;
81123+ u8 inherited;
81124+ /* is this the task that authenticated to the special role */
81125+ u8 acl_sp_role;
81126+ u8 is_writable;
81127+ u8 brute;
81128+ u8 gr_is_chrooted;
81129+#endif
81130+
81131+} __randomize_layout;
81132+
81133+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
81134+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
81135+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
81136+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
81137+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
81138+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
81139+
81140+#ifdef CONFIG_PAX_SOFTMODE
81141+extern int pax_softmode;
81142+#endif
81143+
81144+extern int pax_check_flags(unsigned long *);
81145+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
81146+
81147+/* if tsk != current then task_lock must be held on it */
81148+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
81149+static inline unsigned long pax_get_flags(struct task_struct *tsk)
81150+{
81151+ if (likely(tsk->mm))
81152+ return tsk->mm->pax_flags;
81153+ else
81154+ return 0UL;
81155+}
81156+
81157+/* if tsk != current then task_lock must be held on it */
81158+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
81159+{
81160+ if (likely(tsk->mm)) {
81161+ tsk->mm->pax_flags = flags;
81162+ return 0;
81163+ }
81164+ return -EINVAL;
81165+}
81166+#endif
81167+
81168+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
81169+extern void pax_set_initial_flags(struct linux_binprm *bprm);
81170+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
81171+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
81172+#endif
81173+
81174+struct path;
81175+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
81176+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
81177+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
81178+extern void pax_report_refcount_overflow(struct pt_regs *regs);
81179
81180 /* Future-safe accessor for struct task_struct's cpus_allowed. */
81181 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
81182@@ -1531,7 +1642,7 @@ struct pid_namespace;
81183 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
81184 struct pid_namespace *ns);
81185
81186-static inline pid_t task_pid_nr(struct task_struct *tsk)
81187+static inline pid_t task_pid_nr(const struct task_struct *tsk)
81188 {
81189 return tsk->pid;
81190 }
81191@@ -1981,7 +2092,9 @@ void yield(void);
81192 extern struct exec_domain default_exec_domain;
81193
81194 union thread_union {
81195+#ifndef CONFIG_X86
81196 struct thread_info thread_info;
81197+#endif
81198 unsigned long stack[THREAD_SIZE/sizeof(long)];
81199 };
81200
81201@@ -2014,6 +2127,7 @@ extern struct pid_namespace init_pid_ns;
81202 */
81203
81204 extern struct task_struct *find_task_by_vpid(pid_t nr);
81205+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
81206 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
81207 struct pid_namespace *ns);
81208
81209@@ -2178,7 +2292,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
81210 extern void exit_itimers(struct signal_struct *);
81211 extern void flush_itimer_signals(void);
81212
81213-extern void do_group_exit(int);
81214+extern __noreturn void do_group_exit(int);
81215
81216 extern int allow_signal(int);
81217 extern int disallow_signal(int);
81218@@ -2369,9 +2483,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
81219
81220 #endif
81221
81222-static inline int object_is_on_stack(void *obj)
81223+static inline int object_starts_on_stack(void *obj)
81224 {
81225- void *stack = task_stack_page(current);
81226+ const void *stack = task_stack_page(current);
81227
81228 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
81229 }
81230diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
81231index e3347c5..f682891 100644
81232--- a/include/linux/sched/sysctl.h
81233+++ b/include/linux/sched/sysctl.h
81234@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
81235 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
81236
81237 extern int sysctl_max_map_count;
81238+extern unsigned long sysctl_heap_stack_gap;
81239
81240 extern unsigned int sysctl_sched_latency;
81241 extern unsigned int sysctl_sched_min_granularity;
81242diff --git a/include/linux/security.h b/include/linux/security.h
81243index 5623a7f..b352409 100644
81244--- a/include/linux/security.h
81245+++ b/include/linux/security.h
81246@@ -27,6 +27,7 @@
81247 #include <linux/slab.h>
81248 #include <linux/err.h>
81249 #include <linux/string.h>
81250+#include <linux/grsecurity.h>
81251
81252 struct linux_binprm;
81253 struct cred;
81254@@ -116,8 +117,6 @@ struct seq_file;
81255
81256 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
81257
81258-void reset_security_ops(void);
81259-
81260 #ifdef CONFIG_MMU
81261 extern unsigned long mmap_min_addr;
81262 extern unsigned long dac_mmap_min_addr;
81263diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
81264index dc368b8..e895209 100644
81265--- a/include/linux/semaphore.h
81266+++ b/include/linux/semaphore.h
81267@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
81268 }
81269
81270 extern void down(struct semaphore *sem);
81271-extern int __must_check down_interruptible(struct semaphore *sem);
81272+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
81273 extern int __must_check down_killable(struct semaphore *sem);
81274 extern int __must_check down_trylock(struct semaphore *sem);
81275 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
81276diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
81277index 52e0097..09625ef 100644
81278--- a/include/linux/seq_file.h
81279+++ b/include/linux/seq_file.h
81280@@ -27,6 +27,9 @@ struct seq_file {
81281 struct mutex lock;
81282 const struct seq_operations *op;
81283 int poll_event;
81284+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
81285+ u64 exec_id;
81286+#endif
81287 #ifdef CONFIG_USER_NS
81288 struct user_namespace *user_ns;
81289 #endif
81290@@ -39,6 +42,7 @@ struct seq_operations {
81291 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
81292 int (*show) (struct seq_file *m, void *v);
81293 };
81294+typedef struct seq_operations __no_const seq_operations_no_const;
81295
81296 #define SEQ_SKIP 1
81297
81298diff --git a/include/linux/shm.h b/include/linux/shm.h
81299index 429c199..4d42e38 100644
81300--- a/include/linux/shm.h
81301+++ b/include/linux/shm.h
81302@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
81303
81304 /* The task created the shm object. NULL if the task is dead. */
81305 struct task_struct *shm_creator;
81306+#ifdef CONFIG_GRKERNSEC
81307+ time_t shm_createtime;
81308+ pid_t shm_lapid;
81309+#endif
81310 };
81311
81312 /* shm_mode upper byte flags */
81313diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
81314index 37cb679..dbaebc0 100644
81315--- a/include/linux/skbuff.h
81316+++ b/include/linux/skbuff.h
81317@@ -643,7 +643,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
81318 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
81319 int node);
81320 struct sk_buff *build_skb(void *data, unsigned int frag_size);
81321-static inline struct sk_buff *alloc_skb(unsigned int size,
81322+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
81323 gfp_t priority)
81324 {
81325 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
81326@@ -750,7 +750,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
81327 */
81328 static inline int skb_queue_empty(const struct sk_buff_head *list)
81329 {
81330- return list->next == (struct sk_buff *)list;
81331+ return list->next == (const struct sk_buff *)list;
81332 }
81333
81334 /**
81335@@ -763,7 +763,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
81336 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
81337 const struct sk_buff *skb)
81338 {
81339- return skb->next == (struct sk_buff *)list;
81340+ return skb->next == (const struct sk_buff *)list;
81341 }
81342
81343 /**
81344@@ -776,7 +776,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
81345 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
81346 const struct sk_buff *skb)
81347 {
81348- return skb->prev == (struct sk_buff *)list;
81349+ return skb->prev == (const struct sk_buff *)list;
81350 }
81351
81352 /**
81353@@ -1686,7 +1686,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
81354 return skb->inner_transport_header - skb->inner_network_header;
81355 }
81356
81357-static inline int skb_network_offset(const struct sk_buff *skb)
81358+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
81359 {
81360 return skb_network_header(skb) - skb->data;
81361 }
81362@@ -1746,7 +1746,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
81363 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
81364 */
81365 #ifndef NET_SKB_PAD
81366-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
81367+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
81368 #endif
81369
81370 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
81371@@ -2345,7 +2345,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
81372 int *err);
81373 unsigned int datagram_poll(struct file *file, struct socket *sock,
81374 struct poll_table_struct *wait);
81375-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
81376+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
81377 struct iovec *to, int size);
81378 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
81379 struct iovec *iov);
81380@@ -2618,6 +2618,9 @@ static inline void nf_reset(struct sk_buff *skb)
81381 nf_bridge_put(skb->nf_bridge);
81382 skb->nf_bridge = NULL;
81383 #endif
81384+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
81385+ skb->nf_trace = 0;
81386+#endif
81387 }
81388
81389 static inline void nf_reset_trace(struct sk_buff *skb)
81390diff --git a/include/linux/slab.h b/include/linux/slab.h
81391index 1e2f4fe..df49ca6 100644
81392--- a/include/linux/slab.h
81393+++ b/include/linux/slab.h
81394@@ -14,15 +14,29 @@
81395 #include <linux/gfp.h>
81396 #include <linux/types.h>
81397 #include <linux/workqueue.h>
81398-
81399+#include <linux/err.h>
81400
81401 /*
81402 * Flags to pass to kmem_cache_create().
81403 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
81404 */
81405 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
81406+
81407+#ifdef CONFIG_PAX_USERCOPY_SLABS
81408+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
81409+#else
81410+#define SLAB_USERCOPY 0x00000000UL
81411+#endif
81412+
81413 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
81414 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
81415+
81416+#ifdef CONFIG_PAX_MEMORY_SANITIZE
81417+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
81418+#else
81419+#define SLAB_NO_SANITIZE 0x00000000UL
81420+#endif
81421+
81422 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
81423 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
81424 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
81425@@ -98,10 +112,13 @@
81426 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
81427 * Both make kfree a no-op.
81428 */
81429-#define ZERO_SIZE_PTR ((void *)16)
81430+#define ZERO_SIZE_PTR \
81431+({ \
81432+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
81433+ (void *)(-MAX_ERRNO-1L); \
81434+})
81435
81436-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
81437- (unsigned long)ZERO_SIZE_PTR)
81438+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
81439
81440 #include <linux/kmemleak.h>
81441
81442@@ -142,6 +159,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
81443 void kfree(const void *);
81444 void kzfree(const void *);
81445 size_t ksize(const void *);
81446+const char *check_heap_object(const void *ptr, unsigned long n);
81447+bool is_usercopy_object(const void *ptr);
81448
81449 /*
81450 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
81451@@ -174,7 +193,7 @@ struct kmem_cache {
81452 unsigned int align; /* Alignment as calculated */
81453 unsigned long flags; /* Active flags on the slab */
81454 const char *name; /* Slab name for sysfs */
81455- int refcount; /* Use counter */
81456+ atomic_t refcount; /* Use counter */
81457 void (*ctor)(void *); /* Called on object slot creation */
81458 struct list_head list; /* List of all slab caches on the system */
81459 };
81460@@ -248,6 +267,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
81461 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
81462 #endif
81463
81464+#ifdef CONFIG_PAX_USERCOPY_SLABS
81465+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
81466+#endif
81467+
81468 /*
81469 * Figure out which kmalloc slab an allocation of a certain size
81470 * belongs to.
81471@@ -256,7 +279,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
81472 * 2 = 120 .. 192 bytes
81473 * n = 2^(n-1) .. 2^n -1
81474 */
81475-static __always_inline int kmalloc_index(size_t size)
81476+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
81477 {
81478 if (!size)
81479 return 0;
81480@@ -299,11 +322,11 @@ static __always_inline int kmalloc_index(size_t size)
81481 }
81482 #endif /* !CONFIG_SLOB */
81483
81484-void *__kmalloc(size_t size, gfp_t flags);
81485+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
81486 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
81487
81488 #ifdef CONFIG_NUMA
81489-void *__kmalloc_node(size_t size, gfp_t flags, int node);
81490+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
81491 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
81492 #else
81493 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
81494diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
81495index 09bfffb..4fc80fb 100644
81496--- a/include/linux/slab_def.h
81497+++ b/include/linux/slab_def.h
81498@@ -36,7 +36,7 @@ struct kmem_cache {
81499 /* 4) cache creation/removal */
81500 const char *name;
81501 struct list_head list;
81502- int refcount;
81503+ atomic_t refcount;
81504 int object_size;
81505 int align;
81506
81507@@ -52,10 +52,14 @@ struct kmem_cache {
81508 unsigned long node_allocs;
81509 unsigned long node_frees;
81510 unsigned long node_overflow;
81511- atomic_t allochit;
81512- atomic_t allocmiss;
81513- atomic_t freehit;
81514- atomic_t freemiss;
81515+ atomic_unchecked_t allochit;
81516+ atomic_unchecked_t allocmiss;
81517+ atomic_unchecked_t freehit;
81518+ atomic_unchecked_t freemiss;
81519+#ifdef CONFIG_PAX_MEMORY_SANITIZE
81520+ atomic_unchecked_t sanitized;
81521+ atomic_unchecked_t not_sanitized;
81522+#endif
81523
81524 /*
81525 * If debugging is enabled, then the allocator can add additional
81526diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
81527index f56bfa9..8378a26 100644
81528--- a/include/linux/slub_def.h
81529+++ b/include/linux/slub_def.h
81530@@ -74,7 +74,7 @@ struct kmem_cache {
81531 struct kmem_cache_order_objects max;
81532 struct kmem_cache_order_objects min;
81533 gfp_t allocflags; /* gfp flags to use on each alloc */
81534- int refcount; /* Refcount for slab cache destroy */
81535+ atomic_t refcount; /* Refcount for slab cache destroy */
81536 void (*ctor)(void *);
81537 int inuse; /* Offset to metadata */
81538 int align; /* Alignment */
81539diff --git a/include/linux/smp.h b/include/linux/smp.h
81540index 5da22ee..71d8a28 100644
81541--- a/include/linux/smp.h
81542+++ b/include/linux/smp.h
81543@@ -176,7 +176,9 @@ static inline void kick_all_cpus_sync(void) { }
81544 #endif
81545
81546 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
81547+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
81548 #define put_cpu() preempt_enable()
81549+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
81550
81551 /*
81552 * Callback to arch code if there's nosmp or maxcpus=0 on the
81553diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
81554index 54f91d3..be2c379 100644
81555--- a/include/linux/sock_diag.h
81556+++ b/include/linux/sock_diag.h
81557@@ -11,7 +11,7 @@ struct sock;
81558 struct sock_diag_handler {
81559 __u8 family;
81560 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
81561-};
81562+} __do_const;
81563
81564 int sock_diag_register(const struct sock_diag_handler *h);
81565 void sock_diag_unregister(const struct sock_diag_handler *h);
81566diff --git a/include/linux/sonet.h b/include/linux/sonet.h
81567index 680f9a3..f13aeb0 100644
81568--- a/include/linux/sonet.h
81569+++ b/include/linux/sonet.h
81570@@ -7,7 +7,7 @@
81571 #include <uapi/linux/sonet.h>
81572
81573 struct k_sonet_stats {
81574-#define __HANDLE_ITEM(i) atomic_t i
81575+#define __HANDLE_ITEM(i) atomic_unchecked_t i
81576 __SONET_ITEMS
81577 #undef __HANDLE_ITEM
81578 };
81579diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
81580index 07d8e53..dc934c9 100644
81581--- a/include/linux/sunrpc/addr.h
81582+++ b/include/linux/sunrpc/addr.h
81583@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
81584 {
81585 switch (sap->sa_family) {
81586 case AF_INET:
81587- return ntohs(((struct sockaddr_in *)sap)->sin_port);
81588+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
81589 case AF_INET6:
81590- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
81591+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
81592 }
81593 return 0;
81594 }
81595@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
81596 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
81597 const struct sockaddr *src)
81598 {
81599- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
81600+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
81601 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
81602
81603 dsin->sin_family = ssin->sin_family;
81604@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
81605 if (sa->sa_family != AF_INET6)
81606 return 0;
81607
81608- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
81609+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
81610 }
81611
81612 #endif /* _LINUX_SUNRPC_ADDR_H */
81613diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
81614index 8af2804..c7414ef 100644
81615--- a/include/linux/sunrpc/clnt.h
81616+++ b/include/linux/sunrpc/clnt.h
81617@@ -97,7 +97,7 @@ struct rpc_procinfo {
81618 unsigned int p_timer; /* Which RTT timer to use */
81619 u32 p_statidx; /* Which procedure to account */
81620 const char * p_name; /* name of procedure */
81621-};
81622+} __do_const;
81623
81624 #ifdef __KERNEL__
81625
81626diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
81627index 6eecfc2..7ada79d 100644
81628--- a/include/linux/sunrpc/svc.h
81629+++ b/include/linux/sunrpc/svc.h
81630@@ -410,7 +410,7 @@ struct svc_procedure {
81631 unsigned int pc_count; /* call count */
81632 unsigned int pc_cachetype; /* cache info (NFS) */
81633 unsigned int pc_xdrressize; /* maximum size of XDR reply */
81634-};
81635+} __do_const;
81636
81637 /*
81638 * Function prototypes.
81639diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
81640index 0b8e3e6..33e0a01 100644
81641--- a/include/linux/sunrpc/svc_rdma.h
81642+++ b/include/linux/sunrpc/svc_rdma.h
81643@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
81644 extern unsigned int svcrdma_max_requests;
81645 extern unsigned int svcrdma_max_req_size;
81646
81647-extern atomic_t rdma_stat_recv;
81648-extern atomic_t rdma_stat_read;
81649-extern atomic_t rdma_stat_write;
81650-extern atomic_t rdma_stat_sq_starve;
81651-extern atomic_t rdma_stat_rq_starve;
81652-extern atomic_t rdma_stat_rq_poll;
81653-extern atomic_t rdma_stat_rq_prod;
81654-extern atomic_t rdma_stat_sq_poll;
81655-extern atomic_t rdma_stat_sq_prod;
81656+extern atomic_unchecked_t rdma_stat_recv;
81657+extern atomic_unchecked_t rdma_stat_read;
81658+extern atomic_unchecked_t rdma_stat_write;
81659+extern atomic_unchecked_t rdma_stat_sq_starve;
81660+extern atomic_unchecked_t rdma_stat_rq_starve;
81661+extern atomic_unchecked_t rdma_stat_rq_poll;
81662+extern atomic_unchecked_t rdma_stat_rq_prod;
81663+extern atomic_unchecked_t rdma_stat_sq_poll;
81664+extern atomic_unchecked_t rdma_stat_sq_prod;
81665
81666 #define RPCRDMA_VERSION 1
81667
81668diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
81669index 8d71d65..f79586e 100644
81670--- a/include/linux/sunrpc/svcauth.h
81671+++ b/include/linux/sunrpc/svcauth.h
81672@@ -120,7 +120,7 @@ struct auth_ops {
81673 int (*release)(struct svc_rqst *rq);
81674 void (*domain_release)(struct auth_domain *);
81675 int (*set_client)(struct svc_rqst *rq);
81676-};
81677+} __do_const;
81678
81679 #define SVC_GARBAGE 1
81680 #define SVC_SYSERR 2
81681diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
81682index a5ffd32..0935dea 100644
81683--- a/include/linux/swiotlb.h
81684+++ b/include/linux/swiotlb.h
81685@@ -60,7 +60,8 @@ extern void
81686
81687 extern void
81688 swiotlb_free_coherent(struct device *hwdev, size_t size,
81689- void *vaddr, dma_addr_t dma_handle);
81690+ void *vaddr, dma_addr_t dma_handle,
81691+ struct dma_attrs *attrs);
81692
81693 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
81694 unsigned long offset, size_t size,
81695diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
81696index 94273bb..c2e05fc 100644
81697--- a/include/linux/syscalls.h
81698+++ b/include/linux/syscalls.h
81699@@ -97,8 +97,14 @@ struct sigaltstack;
81700 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
81701
81702 #define __SC_DECL(t, a) t a
81703+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
81704 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
81705-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
81706+#define __SC_LONG(t, a) __typeof( \
81707+ __builtin_choose_expr( \
81708+ sizeof(t) > sizeof(int), \
81709+ (t) 0, \
81710+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
81711+ )) a
81712 #define __SC_CAST(t, a) (t) a
81713 #define __SC_ARGS(t, a) a
81714 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
81715@@ -363,11 +369,11 @@ asmlinkage long sys_sync(void);
81716 asmlinkage long sys_fsync(unsigned int fd);
81717 asmlinkage long sys_fdatasync(unsigned int fd);
81718 asmlinkage long sys_bdflush(int func, long data);
81719-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
81720- char __user *type, unsigned long flags,
81721+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
81722+ const char __user *type, unsigned long flags,
81723 void __user *data);
81724-asmlinkage long sys_umount(char __user *name, int flags);
81725-asmlinkage long sys_oldumount(char __user *name);
81726+asmlinkage long sys_umount(const char __user *name, int flags);
81727+asmlinkage long sys_oldumount(const char __user *name);
81728 asmlinkage long sys_truncate(const char __user *path, long length);
81729 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
81730 asmlinkage long sys_stat(const char __user *filename,
81731@@ -579,7 +585,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
81732 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
81733 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
81734 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
81735- struct sockaddr __user *, int);
81736+ struct sockaddr __user *, int) __intentional_overflow(0);
81737 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
81738 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
81739 unsigned int vlen, unsigned flags);
81740diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
81741index 27b3b0b..e093dd9 100644
81742--- a/include/linux/syscore_ops.h
81743+++ b/include/linux/syscore_ops.h
81744@@ -16,7 +16,7 @@ struct syscore_ops {
81745 int (*suspend)(void);
81746 void (*resume)(void);
81747 void (*shutdown)(void);
81748-};
81749+} __do_const;
81750
81751 extern void register_syscore_ops(struct syscore_ops *ops);
81752 extern void unregister_syscore_ops(struct syscore_ops *ops);
81753diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
81754index 14a8ff2..fa95f3a 100644
81755--- a/include/linux/sysctl.h
81756+++ b/include/linux/sysctl.h
81757@@ -34,13 +34,13 @@ struct ctl_table_root;
81758 struct ctl_table_header;
81759 struct ctl_dir;
81760
81761-typedef struct ctl_table ctl_table;
81762-
81763 typedef int proc_handler (struct ctl_table *ctl, int write,
81764 void __user *buffer, size_t *lenp, loff_t *ppos);
81765
81766 extern int proc_dostring(struct ctl_table *, int,
81767 void __user *, size_t *, loff_t *);
81768+extern int proc_dostring_modpriv(struct ctl_table *, int,
81769+ void __user *, size_t *, loff_t *);
81770 extern int proc_dointvec(struct ctl_table *, int,
81771 void __user *, size_t *, loff_t *);
81772 extern int proc_dointvec_minmax(struct ctl_table *, int,
81773@@ -115,7 +115,9 @@ struct ctl_table
81774 struct ctl_table_poll *poll;
81775 void *extra1;
81776 void *extra2;
81777-};
81778+} __do_const __randomize_layout;
81779+typedef struct ctl_table __no_const ctl_table_no_const;
81780+typedef struct ctl_table ctl_table;
81781
81782 struct ctl_node {
81783 struct rb_node node;
81784diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
81785index 6695040..3d4192d 100644
81786--- a/include/linux/sysfs.h
81787+++ b/include/linux/sysfs.h
81788@@ -33,7 +33,8 @@ struct attribute {
81789 struct lock_class_key *key;
81790 struct lock_class_key skey;
81791 #endif
81792-};
81793+} __do_const;
81794+typedef struct attribute __no_const attribute_no_const;
81795
81796 /**
81797 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
81798@@ -62,7 +63,8 @@ struct attribute_group {
81799 struct attribute *, int);
81800 struct attribute **attrs;
81801 struct bin_attribute **bin_attrs;
81802-};
81803+} __do_const;
81804+typedef struct attribute_group __no_const attribute_group_no_const;
81805
81806 /**
81807 * Use these macros to make defining attributes easier. See include/linux/device.h
81808@@ -126,7 +128,8 @@ struct bin_attribute {
81809 char *, loff_t, size_t);
81810 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
81811 struct vm_area_struct *vma);
81812-};
81813+} __do_const;
81814+typedef struct bin_attribute __no_const bin_attribute_no_const;
81815
81816 /**
81817 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
81818diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
81819index 387fa7d..3fcde6b 100644
81820--- a/include/linux/sysrq.h
81821+++ b/include/linux/sysrq.h
81822@@ -16,6 +16,7 @@
81823
81824 #include <linux/errno.h>
81825 #include <linux/types.h>
81826+#include <linux/compiler.h>
81827
81828 /* Possible values of bitmask for enabling sysrq functions */
81829 /* 0x0001 is reserved for enable everything */
81830@@ -33,7 +34,7 @@ struct sysrq_key_op {
81831 char *help_msg;
81832 char *action_msg;
81833 int enable_mask;
81834-};
81835+} __do_const;
81836
81837 #ifdef CONFIG_MAGIC_SYSRQ
81838
81839diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
81840index fddbe20..0312de8 100644
81841--- a/include/linux/thread_info.h
81842+++ b/include/linux/thread_info.h
81843@@ -161,6 +161,15 @@ static inline bool test_and_clear_restore_sigmask(void)
81844 #error "no set_restore_sigmask() provided and default one won't work"
81845 #endif
81846
81847+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
81848+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
81849+{
81850+#ifndef CONFIG_PAX_USERCOPY_DEBUG
81851+ if (!__builtin_constant_p(n))
81852+#endif
81853+ __check_object_size(ptr, n, to_user);
81854+}
81855+
81856 #endif /* __KERNEL__ */
81857
81858 #endif /* _LINUX_THREAD_INFO_H */
81859diff --git a/include/linux/tty.h b/include/linux/tty.h
81860index 97d660e..6356755 100644
81861--- a/include/linux/tty.h
81862+++ b/include/linux/tty.h
81863@@ -196,7 +196,7 @@ struct tty_port {
81864 const struct tty_port_operations *ops; /* Port operations */
81865 spinlock_t lock; /* Lock protecting tty field */
81866 int blocked_open; /* Waiting to open */
81867- int count; /* Usage count */
81868+ atomic_t count; /* Usage count */
81869 wait_queue_head_t open_wait; /* Open waiters */
81870 wait_queue_head_t close_wait; /* Close waiters */
81871 wait_queue_head_t delta_msr_wait; /* Modem status change */
81872@@ -278,7 +278,7 @@ struct tty_struct {
81873 /* If the tty has a pending do_SAK, queue it here - akpm */
81874 struct work_struct SAK_work;
81875 struct tty_port *port;
81876-};
81877+} __randomize_layout;
81878
81879 /* Each of a tty's open files has private_data pointing to tty_file_private */
81880 struct tty_file_private {
81881@@ -545,7 +545,7 @@ extern int tty_port_open(struct tty_port *port,
81882 struct tty_struct *tty, struct file *filp);
81883 static inline int tty_port_users(struct tty_port *port)
81884 {
81885- return port->count + port->blocked_open;
81886+ return atomic_read(&port->count) + port->blocked_open;
81887 }
81888
81889 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
81890diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
81891index 756a609..f61242d 100644
81892--- a/include/linux/tty_driver.h
81893+++ b/include/linux/tty_driver.h
81894@@ -285,7 +285,7 @@ struct tty_operations {
81895 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
81896 #endif
81897 const struct file_operations *proc_fops;
81898-};
81899+} __do_const;
81900
81901 struct tty_driver {
81902 int magic; /* magic number for this structure */
81903@@ -319,7 +319,7 @@ struct tty_driver {
81904
81905 const struct tty_operations *ops;
81906 struct list_head tty_drivers;
81907-};
81908+} __randomize_layout;
81909
81910 extern struct list_head tty_drivers;
81911
81912diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
81913index f15c898..207b7d1 100644
81914--- a/include/linux/tty_ldisc.h
81915+++ b/include/linux/tty_ldisc.h
81916@@ -211,7 +211,7 @@ struct tty_ldisc_ops {
81917
81918 struct module *owner;
81919
81920- int refcount;
81921+ atomic_t refcount;
81922 };
81923
81924 struct tty_ldisc {
81925diff --git a/include/linux/types.h b/include/linux/types.h
81926index 4d118ba..c3ee9bf 100644
81927--- a/include/linux/types.h
81928+++ b/include/linux/types.h
81929@@ -176,10 +176,26 @@ typedef struct {
81930 int counter;
81931 } atomic_t;
81932
81933+#ifdef CONFIG_PAX_REFCOUNT
81934+typedef struct {
81935+ int counter;
81936+} atomic_unchecked_t;
81937+#else
81938+typedef atomic_t atomic_unchecked_t;
81939+#endif
81940+
81941 #ifdef CONFIG_64BIT
81942 typedef struct {
81943 long counter;
81944 } atomic64_t;
81945+
81946+#ifdef CONFIG_PAX_REFCOUNT
81947+typedef struct {
81948+ long counter;
81949+} atomic64_unchecked_t;
81950+#else
81951+typedef atomic64_t atomic64_unchecked_t;
81952+#endif
81953 #endif
81954
81955 struct list_head {
81956diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
81957index 9d8cf05..0ed74dd 100644
81958--- a/include/linux/uaccess.h
81959+++ b/include/linux/uaccess.h
81960@@ -72,11 +72,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
81961 long ret; \
81962 mm_segment_t old_fs = get_fs(); \
81963 \
81964- set_fs(KERNEL_DS); \
81965 pagefault_disable(); \
81966- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
81967- pagefault_enable(); \
81968+ set_fs(KERNEL_DS); \
81969+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
81970 set_fs(old_fs); \
81971+ pagefault_enable(); \
81972 ret; \
81973 })
81974
81975diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
81976index 8e522cbc..aa8572d 100644
81977--- a/include/linux/uidgid.h
81978+++ b/include/linux/uidgid.h
81979@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
81980
81981 #endif /* CONFIG_USER_NS */
81982
81983+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
81984+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
81985+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
81986+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
81987+
81988 #endif /* _LINUX_UIDGID_H */
81989diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
81990index 99c1b4d..562e6f3 100644
81991--- a/include/linux/unaligned/access_ok.h
81992+++ b/include/linux/unaligned/access_ok.h
81993@@ -4,34 +4,34 @@
81994 #include <linux/kernel.h>
81995 #include <asm/byteorder.h>
81996
81997-static inline u16 get_unaligned_le16(const void *p)
81998+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
81999 {
82000- return le16_to_cpup((__le16 *)p);
82001+ return le16_to_cpup((const __le16 *)p);
82002 }
82003
82004-static inline u32 get_unaligned_le32(const void *p)
82005+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
82006 {
82007- return le32_to_cpup((__le32 *)p);
82008+ return le32_to_cpup((const __le32 *)p);
82009 }
82010
82011-static inline u64 get_unaligned_le64(const void *p)
82012+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
82013 {
82014- return le64_to_cpup((__le64 *)p);
82015+ return le64_to_cpup((const __le64 *)p);
82016 }
82017
82018-static inline u16 get_unaligned_be16(const void *p)
82019+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
82020 {
82021- return be16_to_cpup((__be16 *)p);
82022+ return be16_to_cpup((const __be16 *)p);
82023 }
82024
82025-static inline u32 get_unaligned_be32(const void *p)
82026+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
82027 {
82028- return be32_to_cpup((__be32 *)p);
82029+ return be32_to_cpup((const __be32 *)p);
82030 }
82031
82032-static inline u64 get_unaligned_be64(const void *p)
82033+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
82034 {
82035- return be64_to_cpup((__be64 *)p);
82036+ return be64_to_cpup((const __be64 *)p);
82037 }
82038
82039 static inline void put_unaligned_le16(u16 val, void *p)
82040diff --git a/include/linux/usb.h b/include/linux/usb.h
82041index 7454865..29f4bfa 100644
82042--- a/include/linux/usb.h
82043+++ b/include/linux/usb.h
82044@@ -563,7 +563,7 @@ struct usb_device {
82045 int maxchild;
82046
82047 u32 quirks;
82048- atomic_t urbnum;
82049+ atomic_unchecked_t urbnum;
82050
82051 unsigned long active_duration;
82052
82053@@ -1641,7 +1641,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
82054
82055 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
82056 __u8 request, __u8 requesttype, __u16 value, __u16 index,
82057- void *data, __u16 size, int timeout);
82058+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
82059 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
82060 void *data, int len, int *actual_length, int timeout);
82061 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
82062diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
82063index e452ba6..78f8e80 100644
82064--- a/include/linux/usb/renesas_usbhs.h
82065+++ b/include/linux/usb/renesas_usbhs.h
82066@@ -39,7 +39,7 @@ enum {
82067 */
82068 struct renesas_usbhs_driver_callback {
82069 int (*notify_hotplug)(struct platform_device *pdev);
82070-};
82071+} __no_const;
82072
82073 /*
82074 * callback functions for platform
82075diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
82076index 4836ba3..603f6ee 100644
82077--- a/include/linux/user_namespace.h
82078+++ b/include/linux/user_namespace.h
82079@@ -33,7 +33,7 @@ struct user_namespace {
82080 struct key *persistent_keyring_register;
82081 struct rw_semaphore persistent_keyring_register_sem;
82082 #endif
82083-};
82084+} __randomize_layout;
82085
82086 extern struct user_namespace init_user_ns;
82087
82088diff --git a/include/linux/utsname.h b/include/linux/utsname.h
82089index 239e277..22a5cf5 100644
82090--- a/include/linux/utsname.h
82091+++ b/include/linux/utsname.h
82092@@ -24,7 +24,7 @@ struct uts_namespace {
82093 struct new_utsname name;
82094 struct user_namespace *user_ns;
82095 unsigned int proc_inum;
82096-};
82097+} __randomize_layout;
82098 extern struct uts_namespace init_uts_ns;
82099
82100 #ifdef CONFIG_UTS_NS
82101diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
82102index 6f8fbcf..4efc177 100644
82103--- a/include/linux/vermagic.h
82104+++ b/include/linux/vermagic.h
82105@@ -25,9 +25,42 @@
82106 #define MODULE_ARCH_VERMAGIC ""
82107 #endif
82108
82109+#ifdef CONFIG_PAX_REFCOUNT
82110+#define MODULE_PAX_REFCOUNT "REFCOUNT "
82111+#else
82112+#define MODULE_PAX_REFCOUNT ""
82113+#endif
82114+
82115+#ifdef CONSTIFY_PLUGIN
82116+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
82117+#else
82118+#define MODULE_CONSTIFY_PLUGIN ""
82119+#endif
82120+
82121+#ifdef STACKLEAK_PLUGIN
82122+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
82123+#else
82124+#define MODULE_STACKLEAK_PLUGIN ""
82125+#endif
82126+
82127+#ifdef RANDSTRUCT_PLUGIN
82128+#include <generated/randomize_layout_hash.h>
82129+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
82130+#else
82131+#define MODULE_RANDSTRUCT_PLUGIN
82132+#endif
82133+
82134+#ifdef CONFIG_GRKERNSEC
82135+#define MODULE_GRSEC "GRSEC "
82136+#else
82137+#define MODULE_GRSEC ""
82138+#endif
82139+
82140 #define VERMAGIC_STRING \
82141 UTS_RELEASE " " \
82142 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
82143 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
82144- MODULE_ARCH_VERMAGIC
82145+ MODULE_ARCH_VERMAGIC \
82146+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
82147+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
82148
82149diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
82150index 502073a..a7de024 100644
82151--- a/include/linux/vga_switcheroo.h
82152+++ b/include/linux/vga_switcheroo.h
82153@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
82154
82155 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
82156
82157-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
82158-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
82159+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
82160+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
82161 #else
82162
82163 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
82164@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
82165
82166 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
82167
82168-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
82169-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
82170+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
82171+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
82172
82173 #endif
82174 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
82175diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
82176index 4b8a891..cb8df6e 100644
82177--- a/include/linux/vmalloc.h
82178+++ b/include/linux/vmalloc.h
82179@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
82180 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
82181 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
82182 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
82183+
82184+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
82185+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
82186+#endif
82187+
82188 /* bits [20..32] reserved for arch specific ioremap internals */
82189
82190 /*
82191@@ -142,7 +147,7 @@ extern void free_vm_area(struct vm_struct *area);
82192
82193 /* for /dev/kmem */
82194 extern long vread(char *buf, char *addr, unsigned long count);
82195-extern long vwrite(char *buf, char *addr, unsigned long count);
82196+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
82197
82198 /*
82199 * Internals. Dont't use..
82200diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
82201index a67b384..f52a537 100644
82202--- a/include/linux/vmstat.h
82203+++ b/include/linux/vmstat.h
82204@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
82205 /*
82206 * Zone based page accounting with per cpu differentials.
82207 */
82208-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
82209+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
82210
82211 static inline void zone_page_state_add(long x, struct zone *zone,
82212 enum zone_stat_item item)
82213 {
82214- atomic_long_add(x, &zone->vm_stat[item]);
82215- atomic_long_add(x, &vm_stat[item]);
82216+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
82217+ atomic_long_add_unchecked(x, &vm_stat[item]);
82218 }
82219
82220-static inline unsigned long global_page_state(enum zone_stat_item item)
82221+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
82222 {
82223- long x = atomic_long_read(&vm_stat[item]);
82224+ long x = atomic_long_read_unchecked(&vm_stat[item]);
82225 #ifdef CONFIG_SMP
82226 if (x < 0)
82227 x = 0;
82228@@ -109,10 +109,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
82229 return x;
82230 }
82231
82232-static inline unsigned long zone_page_state(struct zone *zone,
82233+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
82234 enum zone_stat_item item)
82235 {
82236- long x = atomic_long_read(&zone->vm_stat[item]);
82237+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
82238 #ifdef CONFIG_SMP
82239 if (x < 0)
82240 x = 0;
82241@@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
82242 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
82243 enum zone_stat_item item)
82244 {
82245- long x = atomic_long_read(&zone->vm_stat[item]);
82246+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
82247
82248 #ifdef CONFIG_SMP
82249 int cpu;
82250@@ -218,8 +218,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
82251
82252 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
82253 {
82254- atomic_long_inc(&zone->vm_stat[item]);
82255- atomic_long_inc(&vm_stat[item]);
82256+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
82257+ atomic_long_inc_unchecked(&vm_stat[item]);
82258 }
82259
82260 static inline void __inc_zone_page_state(struct page *page,
82261@@ -230,8 +230,8 @@ static inline void __inc_zone_page_state(struct page *page,
82262
82263 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
82264 {
82265- atomic_long_dec(&zone->vm_stat[item]);
82266- atomic_long_dec(&vm_stat[item]);
82267+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
82268+ atomic_long_dec_unchecked(&vm_stat[item]);
82269 }
82270
82271 static inline void __dec_zone_page_state(struct page *page,
82272diff --git a/include/linux/xattr.h b/include/linux/xattr.h
82273index 91b0a68..0e9adf6 100644
82274--- a/include/linux/xattr.h
82275+++ b/include/linux/xattr.h
82276@@ -28,7 +28,7 @@ struct xattr_handler {
82277 size_t size, int handler_flags);
82278 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
82279 size_t size, int flags, int handler_flags);
82280-};
82281+} __do_const;
82282
82283 struct xattr {
82284 const char *name;
82285@@ -37,6 +37,9 @@ struct xattr {
82286 };
82287
82288 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
82289+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
82290+ssize_t pax_getxattr(struct dentry *, void *, size_t);
82291+#endif
82292 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
82293 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
82294 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
82295diff --git a/include/linux/zlib.h b/include/linux/zlib.h
82296index 9c5a6b4..09c9438 100644
82297--- a/include/linux/zlib.h
82298+++ b/include/linux/zlib.h
82299@@ -31,6 +31,7 @@
82300 #define _ZLIB_H
82301
82302 #include <linux/zconf.h>
82303+#include <linux/compiler.h>
82304
82305 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
82306 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
82307@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
82308
82309 /* basic functions */
82310
82311-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
82312+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
82313 /*
82314 Returns the number of bytes that needs to be allocated for a per-
82315 stream workspace with the specified parameters. A pointer to this
82316diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
82317index c768c9f..bdcaa5a 100644
82318--- a/include/media/v4l2-dev.h
82319+++ b/include/media/v4l2-dev.h
82320@@ -76,7 +76,7 @@ struct v4l2_file_operations {
82321 int (*mmap) (struct file *, struct vm_area_struct *);
82322 int (*open) (struct file *);
82323 int (*release) (struct file *);
82324-};
82325+} __do_const;
82326
82327 /*
82328 * Newer version of video_device, handled by videodev2.c
82329diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
82330index c9b1593..a572459 100644
82331--- a/include/media/v4l2-device.h
82332+++ b/include/media/v4l2-device.h
82333@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
82334 this function returns 0. If the name ends with a digit (e.g. cx18),
82335 then the name will be set to cx18-0 since cx180 looks really odd. */
82336 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
82337- atomic_t *instance);
82338+ atomic_unchecked_t *instance);
82339
82340 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
82341 Since the parent disappears this ensures that v4l2_dev doesn't have an
82342diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
82343index 9a36d92..0aafe2a 100644
82344--- a/include/net/9p/transport.h
82345+++ b/include/net/9p/transport.h
82346@@ -60,7 +60,7 @@ struct p9_trans_module {
82347 int (*cancel) (struct p9_client *, struct p9_req_t *req);
82348 int (*zc_request)(struct p9_client *, struct p9_req_t *,
82349 char *, char *, int , int, int, int);
82350-};
82351+} __do_const;
82352
82353 void v9fs_register_trans(struct p9_trans_module *m);
82354 void v9fs_unregister_trans(struct p9_trans_module *m);
82355diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
82356index c853b16d..37fccb7 100644
82357--- a/include/net/bluetooth/l2cap.h
82358+++ b/include/net/bluetooth/l2cap.h
82359@@ -557,7 +557,7 @@ struct l2cap_ops {
82360 long (*get_sndtimeo) (struct l2cap_chan *chan);
82361 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
82362 unsigned long len, int nb);
82363-};
82364+} __do_const;
82365
82366 struct l2cap_conn {
82367 struct hci_conn *hcon;
82368diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
82369index f2ae33d..c457cf0 100644
82370--- a/include/net/caif/cfctrl.h
82371+++ b/include/net/caif/cfctrl.h
82372@@ -52,7 +52,7 @@ struct cfctrl_rsp {
82373 void (*radioset_rsp)(void);
82374 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
82375 struct cflayer *client_layer);
82376-};
82377+} __no_const;
82378
82379 /* Link Setup Parameters for CAIF-Links. */
82380 struct cfctrl_link_param {
82381@@ -101,8 +101,8 @@ struct cfctrl_request_info {
82382 struct cfctrl {
82383 struct cfsrvl serv;
82384 struct cfctrl_rsp res;
82385- atomic_t req_seq_no;
82386- atomic_t rsp_seq_no;
82387+ atomic_unchecked_t req_seq_no;
82388+ atomic_unchecked_t rsp_seq_no;
82389 struct list_head list;
82390 /* Protects from simultaneous access to first_req list */
82391 spinlock_t info_list_lock;
82392diff --git a/include/net/flow.h b/include/net/flow.h
82393index 65ce471..b7bbe9b 100644
82394--- a/include/net/flow.h
82395+++ b/include/net/flow.h
82396@@ -222,6 +222,6 @@ struct flow_cache_object *flow_cache_lookup(struct net *net,
82397
82398 void flow_cache_flush(void);
82399 void flow_cache_flush_deferred(void);
82400-extern atomic_t flow_cache_genid;
82401+extern atomic_unchecked_t flow_cache_genid;
82402
82403 #endif
82404diff --git a/include/net/genetlink.h b/include/net/genetlink.h
82405index 1b177ed..a24a138 100644
82406--- a/include/net/genetlink.h
82407+++ b/include/net/genetlink.h
82408@@ -118,7 +118,7 @@ struct genl_ops {
82409 u8 cmd;
82410 u8 internal_flags;
82411 u8 flags;
82412-};
82413+} __do_const;
82414
82415 int __genl_register_family(struct genl_family *family);
82416
82417diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
82418index 734d9b5..48a9a4b 100644
82419--- a/include/net/gro_cells.h
82420+++ b/include/net/gro_cells.h
82421@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
82422 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
82423
82424 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
82425- atomic_long_inc(&dev->rx_dropped);
82426+ atomic_long_inc_unchecked(&dev->rx_dropped);
82427 kfree_skb(skb);
82428 return;
82429 }
82430diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
82431index c55aeed..b3393f4 100644
82432--- a/include/net/inet_connection_sock.h
82433+++ b/include/net/inet_connection_sock.h
82434@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
82435 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
82436 int (*bind_conflict)(const struct sock *sk,
82437 const struct inet_bind_bucket *tb, bool relax);
82438-};
82439+} __do_const;
82440
82441 /** inet_connection_sock - INET connection oriented sock
82442 *
82443diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
82444index f4e127a..c3d5e9c 100644
82445--- a/include/net/inetpeer.h
82446+++ b/include/net/inetpeer.h
82447@@ -47,8 +47,8 @@ struct inet_peer {
82448 */
82449 union {
82450 struct {
82451- atomic_t rid; /* Frag reception counter */
82452- atomic_t ip_id_count; /* IP ID for the next packet */
82453+ atomic_unchecked_t rid; /* Frag reception counter */
82454+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
82455 };
82456 struct rcu_head rcu;
82457 struct inet_peer *gc_next;
82458@@ -178,16 +178,13 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
82459 /* can be called with or without local BH being disabled */
82460 static inline int inet_getid(struct inet_peer *p, int more)
82461 {
82462- int old, new;
82463+ int id;
82464 more++;
82465 inet_peer_refcheck(p);
82466- do {
82467- old = atomic_read(&p->ip_id_count);
82468- new = old + more;
82469- if (!new)
82470- new = 1;
82471- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
82472- return new;
82473+ id = atomic_add_return_unchecked(more, &p->ip_id_count);
82474+ if (!id)
82475+ id = atomic_inc_return_unchecked(&p->ip_id_count);
82476+ return id;
82477 }
82478
82479 #endif /* _NET_INETPEER_H */
82480diff --git a/include/net/ip.h b/include/net/ip.h
82481index 5a25f36..2e73203 100644
82482--- a/include/net/ip.h
82483+++ b/include/net/ip.h
82484@@ -219,7 +219,7 @@ static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
82485
82486 void inet_get_local_port_range(struct net *net, int *low, int *high);
82487
82488-extern unsigned long *sysctl_local_reserved_ports;
82489+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
82490 static inline int inet_is_reserved_local_port(int port)
82491 {
82492 return test_bit(port, sysctl_local_reserved_ports);
82493diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
82494index 9922093..a1755d6 100644
82495--- a/include/net/ip_fib.h
82496+++ b/include/net/ip_fib.h
82497@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
82498
82499 #define FIB_RES_SADDR(net, res) \
82500 ((FIB_RES_NH(res).nh_saddr_genid == \
82501- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
82502+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
82503 FIB_RES_NH(res).nh_saddr : \
82504 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
82505 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
82506diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
82507index 5679d92..2e7a690 100644
82508--- a/include/net/ip_vs.h
82509+++ b/include/net/ip_vs.h
82510@@ -558,7 +558,7 @@ struct ip_vs_conn {
82511 struct ip_vs_conn *control; /* Master control connection */
82512 atomic_t n_control; /* Number of controlled ones */
82513 struct ip_vs_dest *dest; /* real server */
82514- atomic_t in_pkts; /* incoming packet counter */
82515+ atomic_unchecked_t in_pkts; /* incoming packet counter */
82516
82517 /* packet transmitter for different forwarding methods. If it
82518 mangles the packet, it must return NF_DROP or better NF_STOLEN,
82519@@ -705,7 +705,7 @@ struct ip_vs_dest {
82520 __be16 port; /* port number of the server */
82521 union nf_inet_addr addr; /* IP address of the server */
82522 volatile unsigned int flags; /* dest status flags */
82523- atomic_t conn_flags; /* flags to copy to conn */
82524+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
82525 atomic_t weight; /* server weight */
82526
82527 atomic_t refcnt; /* reference counter */
82528@@ -960,11 +960,11 @@ struct netns_ipvs {
82529 /* ip_vs_lblc */
82530 int sysctl_lblc_expiration;
82531 struct ctl_table_header *lblc_ctl_header;
82532- struct ctl_table *lblc_ctl_table;
82533+ ctl_table_no_const *lblc_ctl_table;
82534 /* ip_vs_lblcr */
82535 int sysctl_lblcr_expiration;
82536 struct ctl_table_header *lblcr_ctl_header;
82537- struct ctl_table *lblcr_ctl_table;
82538+ ctl_table_no_const *lblcr_ctl_table;
82539 /* ip_vs_est */
82540 struct list_head est_list; /* estimator list */
82541 spinlock_t est_lock;
82542diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
82543index 0224402..dafaf94a 100644
82544--- a/include/net/irda/ircomm_tty.h
82545+++ b/include/net/irda/ircomm_tty.h
82546@@ -35,6 +35,7 @@
82547 #include <linux/termios.h>
82548 #include <linux/timer.h>
82549 #include <linux/tty.h> /* struct tty_struct */
82550+#include <asm/local.h>
82551
82552 #include <net/irda/irias_object.h>
82553 #include <net/irda/ircomm_core.h>
82554diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
82555index 714cc9a..ea05f3e 100644
82556--- a/include/net/iucv/af_iucv.h
82557+++ b/include/net/iucv/af_iucv.h
82558@@ -149,7 +149,7 @@ struct iucv_skb_cb {
82559 struct iucv_sock_list {
82560 struct hlist_head head;
82561 rwlock_t lock;
82562- atomic_t autobind_name;
82563+ atomic_unchecked_t autobind_name;
82564 };
82565
82566 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
82567diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
82568index f3be818..bf46196 100644
82569--- a/include/net/llc_c_ac.h
82570+++ b/include/net/llc_c_ac.h
82571@@ -87,7 +87,7 @@
82572 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
82573 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
82574
82575-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
82576+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
82577
82578 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
82579 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
82580diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
82581index 3948cf1..83b28c4 100644
82582--- a/include/net/llc_c_ev.h
82583+++ b/include/net/llc_c_ev.h
82584@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
82585 return (struct llc_conn_state_ev *)skb->cb;
82586 }
82587
82588-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
82589-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
82590+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
82591+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
82592
82593 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
82594 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
82595diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
82596index 0e79cfb..f46db31 100644
82597--- a/include/net/llc_c_st.h
82598+++ b/include/net/llc_c_st.h
82599@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
82600 u8 next_state;
82601 llc_conn_ev_qfyr_t *ev_qualifiers;
82602 llc_conn_action_t *ev_actions;
82603-};
82604+} __do_const;
82605
82606 struct llc_conn_state {
82607 u8 current_state;
82608diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
82609index a61b98c..aade1eb 100644
82610--- a/include/net/llc_s_ac.h
82611+++ b/include/net/llc_s_ac.h
82612@@ -23,7 +23,7 @@
82613 #define SAP_ACT_TEST_IND 9
82614
82615 /* All action functions must look like this */
82616-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
82617+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
82618
82619 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
82620 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
82621diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
82622index 567c681..cd73ac0 100644
82623--- a/include/net/llc_s_st.h
82624+++ b/include/net/llc_s_st.h
82625@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
82626 llc_sap_ev_t ev;
82627 u8 next_state;
82628 llc_sap_action_t *ev_actions;
82629-};
82630+} __do_const;
82631
82632 struct llc_sap_state {
82633 u8 curr_state;
82634diff --git a/include/net/mac80211.h b/include/net/mac80211.h
82635index 7ceed99..d3ffaa2 100644
82636--- a/include/net/mac80211.h
82637+++ b/include/net/mac80211.h
82638@@ -4407,7 +4407,7 @@ struct rate_control_ops {
82639 void (*add_sta_debugfs)(void *priv, void *priv_sta,
82640 struct dentry *dir);
82641 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
82642-};
82643+} __do_const;
82644
82645 static inline int rate_supported(struct ieee80211_sta *sta,
82646 enum ieee80211_band band,
82647diff --git a/include/net/neighbour.h b/include/net/neighbour.h
82648index 536501a..47b7982 100644
82649--- a/include/net/neighbour.h
82650+++ b/include/net/neighbour.h
82651@@ -123,7 +123,7 @@ struct neigh_ops {
82652 void (*error_report)(struct neighbour *, struct sk_buff *);
82653 int (*output)(struct neighbour *, struct sk_buff *);
82654 int (*connected_output)(struct neighbour *, struct sk_buff *);
82655-};
82656+} __do_const;
82657
82658 struct pneigh_entry {
82659 struct pneigh_entry *next;
82660@@ -163,7 +163,6 @@ struct neigh_table {
82661 void (*proxy_redo)(struct sk_buff *skb);
82662 char *id;
82663 struct neigh_parms parms;
82664- /* HACK. gc_* should follow parms without a gap! */
82665 int gc_interval;
82666 int gc_thresh1;
82667 int gc_thresh2;
82668@@ -178,7 +177,7 @@ struct neigh_table {
82669 struct neigh_statistics __percpu *stats;
82670 struct neigh_hash_table __rcu *nht;
82671 struct pneigh_entry **phash_buckets;
82672-};
82673+} __randomize_layout;
82674
82675 #define NEIGH_PRIV_ALIGN sizeof(long long)
82676 #define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN)
82677diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
82678index da68c9a..c4a0720 100644
82679--- a/include/net/net_namespace.h
82680+++ b/include/net/net_namespace.h
82681@@ -124,8 +124,8 @@ struct net {
82682 struct netns_ipvs *ipvs;
82683 #endif
82684 struct sock *diag_nlsk;
82685- atomic_t fnhe_genid;
82686-};
82687+ atomic_unchecked_t fnhe_genid;
82688+} __randomize_layout;
82689
82690 /*
82691 * ifindex generation is per-net namespace, and loopback is
82692@@ -281,7 +281,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
82693 #define __net_init __init
82694 #define __net_exit __exit_refok
82695 #define __net_initdata __initdata
82696+#ifdef CONSTIFY_PLUGIN
82697 #define __net_initconst __initconst
82698+#else
82699+#define __net_initconst __initdata
82700+#endif
82701 #endif
82702
82703 struct pernet_operations {
82704@@ -291,7 +295,7 @@ struct pernet_operations {
82705 void (*exit_batch)(struct list_head *net_exit_list);
82706 int *id;
82707 size_t size;
82708-};
82709+} __do_const;
82710
82711 /*
82712 * Use these carefully. If you implement a network device and it
82713@@ -339,23 +343,23 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
82714
82715 static inline int rt_genid_ipv4(struct net *net)
82716 {
82717- return atomic_read(&net->ipv4.rt_genid);
82718+ return atomic_read_unchecked(&net->ipv4.rt_genid);
82719 }
82720
82721 static inline void rt_genid_bump_ipv4(struct net *net)
82722 {
82723- atomic_inc(&net->ipv4.rt_genid);
82724+ atomic_inc_unchecked(&net->ipv4.rt_genid);
82725 }
82726
82727 #if IS_ENABLED(CONFIG_IPV6)
82728 static inline int rt_genid_ipv6(struct net *net)
82729 {
82730- return atomic_read(&net->ipv6.rt_genid);
82731+ return atomic_read_unchecked(&net->ipv6.rt_genid);
82732 }
82733
82734 static inline void rt_genid_bump_ipv6(struct net *net)
82735 {
82736- atomic_inc(&net->ipv6.rt_genid);
82737+ atomic_inc_unchecked(&net->ipv6.rt_genid);
82738 }
82739 #else
82740 static inline int rt_genid_ipv6(struct net *net)
82741@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
82742
82743 static inline int fnhe_genid(struct net *net)
82744 {
82745- return atomic_read(&net->fnhe_genid);
82746+ return atomic_read_unchecked(&net->fnhe_genid);
82747 }
82748
82749 static inline void fnhe_genid_bump(struct net *net)
82750 {
82751- atomic_inc(&net->fnhe_genid);
82752+ atomic_inc_unchecked(&net->fnhe_genid);
82753 }
82754
82755 #endif /* __NET_NET_NAMESPACE_H */
82756diff --git a/include/net/netdma.h b/include/net/netdma.h
82757index 8ba8ce2..99b7fff 100644
82758--- a/include/net/netdma.h
82759+++ b/include/net/netdma.h
82760@@ -24,7 +24,7 @@
82761 #include <linux/dmaengine.h>
82762 #include <linux/skbuff.h>
82763
82764-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
82765+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
82766 struct sk_buff *skb, int offset, struct iovec *to,
82767 size_t len, struct dma_pinned_list *pinned_list);
82768
82769diff --git a/include/net/netlink.h b/include/net/netlink.h
82770index 2b47eaa..6d5bcc2 100644
82771--- a/include/net/netlink.h
82772+++ b/include/net/netlink.h
82773@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
82774 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
82775 {
82776 if (mark)
82777- skb_trim(skb, (unsigned char *) mark - skb->data);
82778+ skb_trim(skb, (const unsigned char *) mark - skb->data);
82779 }
82780
82781 /**
82782diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
82783index c9c0c53..53f24c3 100644
82784--- a/include/net/netns/conntrack.h
82785+++ b/include/net/netns/conntrack.h
82786@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
82787 struct nf_proto_net {
82788 #ifdef CONFIG_SYSCTL
82789 struct ctl_table_header *ctl_table_header;
82790- struct ctl_table *ctl_table;
82791+ ctl_table_no_const *ctl_table;
82792 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
82793 struct ctl_table_header *ctl_compat_header;
82794- struct ctl_table *ctl_compat_table;
82795+ ctl_table_no_const *ctl_compat_table;
82796 #endif
82797 #endif
82798 unsigned int users;
82799@@ -58,7 +58,7 @@ struct nf_ip_net {
82800 struct nf_icmp_net icmpv6;
82801 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
82802 struct ctl_table_header *ctl_table_header;
82803- struct ctl_table *ctl_table;
82804+ ctl_table_no_const *ctl_table;
82805 #endif
82806 };
82807
82808diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
82809index ee520cb..9a0fd88 100644
82810--- a/include/net/netns/ipv4.h
82811+++ b/include/net/netns/ipv4.h
82812@@ -72,7 +72,7 @@ struct netns_ipv4 {
82813
82814 kgid_t sysctl_ping_group_range[2];
82815
82816- atomic_t dev_addr_genid;
82817+ atomic_unchecked_t dev_addr_genid;
82818
82819 #ifdef CONFIG_IP_MROUTE
82820 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
82821@@ -82,6 +82,6 @@ struct netns_ipv4 {
82822 struct fib_rules_ops *mr_rules_ops;
82823 #endif
82824 #endif
82825- atomic_t rt_genid;
82826+ atomic_unchecked_t rt_genid;
82827 };
82828 #endif
82829diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
82830index 0fb2401..477d81c 100644
82831--- a/include/net/netns/ipv6.h
82832+++ b/include/net/netns/ipv6.h
82833@@ -71,8 +71,8 @@ struct netns_ipv6 {
82834 struct fib_rules_ops *mr6_rules_ops;
82835 #endif
82836 #endif
82837- atomic_t dev_addr_genid;
82838- atomic_t rt_genid;
82839+ atomic_unchecked_t dev_addr_genid;
82840+ atomic_unchecked_t rt_genid;
82841 };
82842
82843 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
82844diff --git a/include/net/ping.h b/include/net/ping.h
82845index 90f4841..74446a8 100644
82846--- a/include/net/ping.h
82847+++ b/include/net/ping.h
82848@@ -56,7 +56,7 @@ struct ping_iter_state {
82849 extern struct proto ping_prot;
82850 extern struct ping_table ping_table;
82851 #if IS_ENABLED(CONFIG_IPV6)
82852-extern struct pingv6_ops pingv6_ops;
82853+extern struct pingv6_ops *pingv6_ops;
82854 #endif
82855
82856 struct pingfakehdr {
82857diff --git a/include/net/protocol.h b/include/net/protocol.h
82858index fbf7676..a5e21c3 100644
82859--- a/include/net/protocol.h
82860+++ b/include/net/protocol.h
82861@@ -44,7 +44,7 @@ struct net_protocol {
82862 void (*err_handler)(struct sk_buff *skb, u32 info);
82863 unsigned int no_policy:1,
82864 netns_ok:1;
82865-};
82866+} __do_const;
82867
82868 #if IS_ENABLED(CONFIG_IPV6)
82869 struct inet6_protocol {
82870@@ -57,7 +57,7 @@ struct inet6_protocol {
82871 u8 type, u8 code, int offset,
82872 __be32 info);
82873 unsigned int flags; /* INET6_PROTO_xxx */
82874-};
82875+} __do_const;
82876
82877 #define INET6_PROTO_NOPOLICY 0x1
82878 #define INET6_PROTO_FINAL 0x2
82879diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
82880index bb13a18..e734116 100644
82881--- a/include/net/rtnetlink.h
82882+++ b/include/net/rtnetlink.h
82883@@ -79,7 +79,7 @@ struct rtnl_link_ops {
82884 const struct net_device *dev);
82885 unsigned int (*get_num_tx_queues)(void);
82886 unsigned int (*get_num_rx_queues)(void);
82887-};
82888+} __do_const;
82889
82890 int __rtnl_link_register(struct rtnl_link_ops *ops);
82891 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
82892diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
82893index 6bd44fe..96f364e 100644
82894--- a/include/net/sctp/checksum.h
82895+++ b/include/net/sctp/checksum.h
82896@@ -62,8 +62,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
82897 unsigned int offset)
82898 {
82899 struct sctphdr *sh = sctp_hdr(skb);
82900- __le32 ret, old = sh->checksum;
82901- const struct skb_checksum_ops ops = {
82902+ __le32 ret, old = sh->checksum;
82903+ static const struct skb_checksum_ops ops = {
82904 .update = sctp_csum_update,
82905 .combine = sctp_csum_combine,
82906 };
82907diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
82908index 4ef75af..5aa073a 100644
82909--- a/include/net/sctp/sm.h
82910+++ b/include/net/sctp/sm.h
82911@@ -81,7 +81,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
82912 typedef struct {
82913 sctp_state_fn_t *fn;
82914 const char *name;
82915-} sctp_sm_table_entry_t;
82916+} __do_const sctp_sm_table_entry_t;
82917
82918 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
82919 * currently in use.
82920@@ -293,7 +293,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
82921 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
82922
82923 /* Extern declarations for major data structures. */
82924-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
82925+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
82926
82927
82928 /* Get the size of a DATA chunk payload. */
82929diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
82930index 0a248b3..4dcbe5c 100644
82931--- a/include/net/sctp/structs.h
82932+++ b/include/net/sctp/structs.h
82933@@ -508,7 +508,7 @@ struct sctp_pf {
82934 struct sctp_association *asoc);
82935 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
82936 struct sctp_af *af;
82937-};
82938+} __do_const;
82939
82940
82941 /* Structure to track chunk fragments that have been acked, but peer
82942diff --git a/include/net/sock.h b/include/net/sock.h
82943index 2ef3c3e..e02013e 100644
82944--- a/include/net/sock.h
82945+++ b/include/net/sock.h
82946@@ -348,7 +348,7 @@ struct sock {
82947 unsigned int sk_napi_id;
82948 unsigned int sk_ll_usec;
82949 #endif
82950- atomic_t sk_drops;
82951+ atomic_unchecked_t sk_drops;
82952 int sk_rcvbuf;
82953
82954 struct sk_filter __rcu *sk_filter;
82955@@ -1209,7 +1209,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
82956 return ret >> PAGE_SHIFT;
82957 }
82958
82959-static inline long
82960+static inline long __intentional_overflow(-1)
82961 sk_memory_allocated(const struct sock *sk)
82962 {
82963 struct proto *prot = sk->sk_prot;
82964@@ -1813,7 +1813,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
82965 }
82966
82967 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
82968- char __user *from, char *to,
82969+ char __user *from, unsigned char *to,
82970 int copy, int offset)
82971 {
82972 if (skb->ip_summed == CHECKSUM_NONE) {
82973@@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
82974 }
82975 }
82976
82977-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
82978+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
82979
82980 /**
82981 * sk_page_frag - return an appropriate page_frag
82982diff --git a/include/net/tcp.h b/include/net/tcp.h
82983index 9250d62..10a7f03 100644
82984--- a/include/net/tcp.h
82985+++ b/include/net/tcp.h
82986@@ -480,20 +480,21 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
82987 #ifdef CONFIG_SYN_COOKIES
82988 #include <linux/ktime.h>
82989
82990-/* Syncookies use a monotonic timer which increments every 64 seconds.
82991+/* Syncookies use a monotonic timer which increments every 60 seconds.
82992 * This counter is used both as a hash input and partially encoded into
82993 * the cookie value. A cookie is only validated further if the delta
82994 * between the current counter value and the encoded one is less than this,
82995- * i.e. a sent cookie is valid only at most for 128 seconds (or less if
82996+ * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
82997 * the counter advances immediately after a cookie is generated).
82998 */
82999 #define MAX_SYNCOOKIE_AGE 2
83000
83001 static inline u32 tcp_cookie_time(void)
83002 {
83003- struct timespec now;
83004- getnstimeofday(&now);
83005- return now.tv_sec >> 6; /* 64 seconds granularity */
83006+ u64 val = get_jiffies_64();
83007+
83008+ do_div(val, 60 * HZ);
83009+ return val;
83010 }
83011
83012 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
83013@@ -540,7 +541,7 @@ void tcp_retransmit_timer(struct sock *sk);
83014 void tcp_xmit_retransmit_queue(struct sock *);
83015 void tcp_simple_retransmit(struct sock *);
83016 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
83017-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
83018+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
83019
83020 void tcp_send_probe0(struct sock *);
83021 void tcp_send_partial(struct sock *);
83022@@ -711,8 +712,8 @@ struct tcp_skb_cb {
83023 struct inet6_skb_parm h6;
83024 #endif
83025 } header; /* For incoming frames */
83026- __u32 seq; /* Starting sequence number */
83027- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
83028+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
83029+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
83030 __u32 when; /* used to compute rtt's */
83031 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
83032
83033@@ -726,7 +727,7 @@ struct tcp_skb_cb {
83034
83035 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
83036 /* 1 byte hole */
83037- __u32 ack_seq; /* Sequence number ACK'd */
83038+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
83039 };
83040
83041 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
83042diff --git a/include/net/xfrm.h b/include/net/xfrm.h
83043index 6b82fdf..14d74d2 100644
83044--- a/include/net/xfrm.h
83045+++ b/include/net/xfrm.h
83046@@ -287,7 +287,6 @@ struct xfrm_dst;
83047 struct xfrm_policy_afinfo {
83048 unsigned short family;
83049 struct dst_ops *dst_ops;
83050- void (*garbage_collect)(struct net *net);
83051 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
83052 const xfrm_address_t *saddr,
83053 const xfrm_address_t *daddr);
83054@@ -305,7 +304,7 @@ struct xfrm_policy_afinfo {
83055 struct net_device *dev,
83056 const struct flowi *fl);
83057 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
83058-};
83059+} __do_const;
83060
83061 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
83062 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
83063@@ -344,7 +343,7 @@ struct xfrm_state_afinfo {
83064 int (*transport_finish)(struct sk_buff *skb,
83065 int async);
83066 void (*local_error)(struct sk_buff *skb, u32 mtu);
83067-};
83068+} __do_const;
83069
83070 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
83071 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
83072@@ -429,7 +428,7 @@ struct xfrm_mode {
83073 struct module *owner;
83074 unsigned int encap;
83075 int flags;
83076-};
83077+} __do_const;
83078
83079 /* Flags for xfrm_mode. */
83080 enum {
83081@@ -526,7 +525,7 @@ struct xfrm_policy {
83082 struct timer_list timer;
83083
83084 struct flow_cache_object flo;
83085- atomic_t genid;
83086+ atomic_unchecked_t genid;
83087 u32 priority;
83088 u32 index;
83089 struct xfrm_mark mark;
83090@@ -1166,6 +1165,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
83091 }
83092
83093 void xfrm_garbage_collect(struct net *net);
83094+void xfrm_garbage_collect_deferred(struct net *net);
83095
83096 #else
83097
83098@@ -1204,6 +1204,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
83099 static inline void xfrm_garbage_collect(struct net *net)
83100 {
83101 }
83102+static inline void xfrm_garbage_collect_deferred(struct net *net)
83103+{
83104+}
83105 #endif
83106
83107 static __inline__
83108diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
83109index 1017e0b..227aa4d 100644
83110--- a/include/rdma/iw_cm.h
83111+++ b/include/rdma/iw_cm.h
83112@@ -122,7 +122,7 @@ struct iw_cm_verbs {
83113 int backlog);
83114
83115 int (*destroy_listen)(struct iw_cm_id *cm_id);
83116-};
83117+} __no_const;
83118
83119 /**
83120 * iw_create_cm_id - Create an IW CM identifier.
83121diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
83122index 52beadf..598734c 100644
83123--- a/include/scsi/libfc.h
83124+++ b/include/scsi/libfc.h
83125@@ -771,6 +771,7 @@ struct libfc_function_template {
83126 */
83127 void (*disc_stop_final) (struct fc_lport *);
83128 };
83129+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
83130
83131 /**
83132 * struct fc_disc - Discovery context
83133@@ -875,7 +876,7 @@ struct fc_lport {
83134 struct fc_vport *vport;
83135
83136 /* Operational Information */
83137- struct libfc_function_template tt;
83138+ libfc_function_template_no_const tt;
83139 u8 link_up;
83140 u8 qfull;
83141 enum fc_lport_state state;
83142diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
83143index d65fbec..f80fef2 100644
83144--- a/include/scsi/scsi_device.h
83145+++ b/include/scsi/scsi_device.h
83146@@ -180,9 +180,9 @@ struct scsi_device {
83147 unsigned int max_device_blocked; /* what device_blocked counts down from */
83148 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
83149
83150- atomic_t iorequest_cnt;
83151- atomic_t iodone_cnt;
83152- atomic_t ioerr_cnt;
83153+ atomic_unchecked_t iorequest_cnt;
83154+ atomic_unchecked_t iodone_cnt;
83155+ atomic_unchecked_t ioerr_cnt;
83156
83157 struct device sdev_gendev,
83158 sdev_dev;
83159diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
83160index b797e8f..8e2c3aa 100644
83161--- a/include/scsi/scsi_transport_fc.h
83162+++ b/include/scsi/scsi_transport_fc.h
83163@@ -751,7 +751,8 @@ struct fc_function_template {
83164 unsigned long show_host_system_hostname:1;
83165
83166 unsigned long disable_target_scan:1;
83167-};
83168+} __do_const;
83169+typedef struct fc_function_template __no_const fc_function_template_no_const;
83170
83171
83172 /**
83173diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
83174index ae6c3b8..fd748ac 100644
83175--- a/include/sound/compress_driver.h
83176+++ b/include/sound/compress_driver.h
83177@@ -128,7 +128,7 @@ struct snd_compr_ops {
83178 struct snd_compr_caps *caps);
83179 int (*get_codec_caps) (struct snd_compr_stream *stream,
83180 struct snd_compr_codec_caps *codec);
83181-};
83182+} __no_const;
83183
83184 /**
83185 * struct snd_compr: Compressed device
83186diff --git a/include/sound/soc.h b/include/sound/soc.h
83187index 1f741cb..8cefc08 100644
83188--- a/include/sound/soc.h
83189+++ b/include/sound/soc.h
83190@@ -763,7 +763,7 @@ struct snd_soc_codec_driver {
83191 /* probe ordering - for components with runtime dependencies */
83192 int probe_order;
83193 int remove_order;
83194-};
83195+} __do_const;
83196
83197 /* SoC platform interface */
83198 struct snd_soc_platform_driver {
83199@@ -809,7 +809,7 @@ struct snd_soc_platform_driver {
83200 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
83201 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
83202 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
83203-};
83204+} __do_const;
83205
83206 struct snd_soc_platform {
83207 const char *name;
83208diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
83209index e3569f8..6544ffd 100644
83210--- a/include/target/target_core_base.h
83211+++ b/include/target/target_core_base.h
83212@@ -687,7 +687,7 @@ struct se_device {
83213 atomic_long_t write_bytes;
83214 /* Active commands on this virtual SE device */
83215 atomic_t simple_cmds;
83216- atomic_t dev_ordered_id;
83217+ atomic_unchecked_t dev_ordered_id;
83218 atomic_t dev_ordered_sync;
83219 atomic_t dev_qf_count;
83220 int export_count;
83221diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
83222new file mode 100644
83223index 0000000..fb634b7
83224--- /dev/null
83225+++ b/include/trace/events/fs.h
83226@@ -0,0 +1,53 @@
83227+#undef TRACE_SYSTEM
83228+#define TRACE_SYSTEM fs
83229+
83230+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
83231+#define _TRACE_FS_H
83232+
83233+#include <linux/fs.h>
83234+#include <linux/tracepoint.h>
83235+
83236+TRACE_EVENT(do_sys_open,
83237+
83238+ TP_PROTO(const char *filename, int flags, int mode),
83239+
83240+ TP_ARGS(filename, flags, mode),
83241+
83242+ TP_STRUCT__entry(
83243+ __string( filename, filename )
83244+ __field( int, flags )
83245+ __field( int, mode )
83246+ ),
83247+
83248+ TP_fast_assign(
83249+ __assign_str(filename, filename);
83250+ __entry->flags = flags;
83251+ __entry->mode = mode;
83252+ ),
83253+
83254+ TP_printk("\"%s\" %x %o",
83255+ __get_str(filename), __entry->flags, __entry->mode)
83256+);
83257+
83258+TRACE_EVENT(open_exec,
83259+
83260+ TP_PROTO(const char *filename),
83261+
83262+ TP_ARGS(filename),
83263+
83264+ TP_STRUCT__entry(
83265+ __string( filename, filename )
83266+ ),
83267+
83268+ TP_fast_assign(
83269+ __assign_str(filename, filename);
83270+ ),
83271+
83272+ TP_printk("\"%s\"",
83273+ __get_str(filename))
83274+);
83275+
83276+#endif /* _TRACE_FS_H */
83277+
83278+/* This part must be outside protection */
83279+#include <trace/define_trace.h>
83280diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
83281index 1c09820..7f5ec79 100644
83282--- a/include/trace/events/irq.h
83283+++ b/include/trace/events/irq.h
83284@@ -36,7 +36,7 @@ struct softirq_action;
83285 */
83286 TRACE_EVENT(irq_handler_entry,
83287
83288- TP_PROTO(int irq, struct irqaction *action),
83289+ TP_PROTO(int irq, const struct irqaction *action),
83290
83291 TP_ARGS(irq, action),
83292
83293@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
83294 */
83295 TRACE_EVENT(irq_handler_exit,
83296
83297- TP_PROTO(int irq, struct irqaction *action, int ret),
83298+ TP_PROTO(int irq, const struct irqaction *action, int ret),
83299
83300 TP_ARGS(irq, action, ret),
83301
83302diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
83303index 7caf44c..23c6f27 100644
83304--- a/include/uapi/linux/a.out.h
83305+++ b/include/uapi/linux/a.out.h
83306@@ -39,6 +39,14 @@ enum machine_type {
83307 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
83308 };
83309
83310+/* Constants for the N_FLAGS field */
83311+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
83312+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
83313+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
83314+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
83315+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
83316+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
83317+
83318 #if !defined (N_MAGIC)
83319 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
83320 #endif
83321diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
83322index d876736..ccce5c0 100644
83323--- a/include/uapi/linux/byteorder/little_endian.h
83324+++ b/include/uapi/linux/byteorder/little_endian.h
83325@@ -42,51 +42,51 @@
83326
83327 static inline __le64 __cpu_to_le64p(const __u64 *p)
83328 {
83329- return (__force __le64)*p;
83330+ return (__force const __le64)*p;
83331 }
83332-static inline __u64 __le64_to_cpup(const __le64 *p)
83333+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
83334 {
83335- return (__force __u64)*p;
83336+ return (__force const __u64)*p;
83337 }
83338 static inline __le32 __cpu_to_le32p(const __u32 *p)
83339 {
83340- return (__force __le32)*p;
83341+ return (__force const __le32)*p;
83342 }
83343 static inline __u32 __le32_to_cpup(const __le32 *p)
83344 {
83345- return (__force __u32)*p;
83346+ return (__force const __u32)*p;
83347 }
83348 static inline __le16 __cpu_to_le16p(const __u16 *p)
83349 {
83350- return (__force __le16)*p;
83351+ return (__force const __le16)*p;
83352 }
83353 static inline __u16 __le16_to_cpup(const __le16 *p)
83354 {
83355- return (__force __u16)*p;
83356+ return (__force const __u16)*p;
83357 }
83358 static inline __be64 __cpu_to_be64p(const __u64 *p)
83359 {
83360- return (__force __be64)__swab64p(p);
83361+ return (__force const __be64)__swab64p(p);
83362 }
83363 static inline __u64 __be64_to_cpup(const __be64 *p)
83364 {
83365- return __swab64p((__u64 *)p);
83366+ return __swab64p((const __u64 *)p);
83367 }
83368 static inline __be32 __cpu_to_be32p(const __u32 *p)
83369 {
83370- return (__force __be32)__swab32p(p);
83371+ return (__force const __be32)__swab32p(p);
83372 }
83373-static inline __u32 __be32_to_cpup(const __be32 *p)
83374+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
83375 {
83376- return __swab32p((__u32 *)p);
83377+ return __swab32p((const __u32 *)p);
83378 }
83379 static inline __be16 __cpu_to_be16p(const __u16 *p)
83380 {
83381- return (__force __be16)__swab16p(p);
83382+ return (__force const __be16)__swab16p(p);
83383 }
83384 static inline __u16 __be16_to_cpup(const __be16 *p)
83385 {
83386- return __swab16p((__u16 *)p);
83387+ return __swab16p((const __u16 *)p);
83388 }
83389 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
83390 #define __le64_to_cpus(x) do { (void)(x); } while (0)
83391diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
83392index ef6103b..d4e65dd 100644
83393--- a/include/uapi/linux/elf.h
83394+++ b/include/uapi/linux/elf.h
83395@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
83396 #define PT_GNU_EH_FRAME 0x6474e550
83397
83398 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
83399+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
83400+
83401+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
83402+
83403+/* Constants for the e_flags field */
83404+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
83405+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
83406+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
83407+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
83408+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
83409+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
83410
83411 /*
83412 * Extended Numbering
83413@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
83414 #define DT_DEBUG 21
83415 #define DT_TEXTREL 22
83416 #define DT_JMPREL 23
83417+#define DT_FLAGS 30
83418+ #define DF_TEXTREL 0x00000004
83419 #define DT_ENCODING 32
83420 #define OLD_DT_LOOS 0x60000000
83421 #define DT_LOOS 0x6000000d
83422@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
83423 #define PF_W 0x2
83424 #define PF_X 0x1
83425
83426+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
83427+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
83428+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
83429+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
83430+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
83431+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
83432+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
83433+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
83434+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
83435+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
83436+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
83437+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
83438+
83439 typedef struct elf32_phdr{
83440 Elf32_Word p_type;
83441 Elf32_Off p_offset;
83442@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
83443 #define EI_OSABI 7
83444 #define EI_PAD 8
83445
83446+#define EI_PAX 14
83447+
83448 #define ELFMAG0 0x7f /* EI_MAG */
83449 #define ELFMAG1 'E'
83450 #define ELFMAG2 'L'
83451diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
83452index aa169c4..6a2771d 100644
83453--- a/include/uapi/linux/personality.h
83454+++ b/include/uapi/linux/personality.h
83455@@ -30,6 +30,7 @@ enum {
83456 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
83457 ADDR_NO_RANDOMIZE | \
83458 ADDR_COMPAT_LAYOUT | \
83459+ ADDR_LIMIT_3GB | \
83460 MMAP_PAGE_ZERO)
83461
83462 /*
83463diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
83464index 7530e74..e714828 100644
83465--- a/include/uapi/linux/screen_info.h
83466+++ b/include/uapi/linux/screen_info.h
83467@@ -43,7 +43,8 @@ struct screen_info {
83468 __u16 pages; /* 0x32 */
83469 __u16 vesa_attributes; /* 0x34 */
83470 __u32 capabilities; /* 0x36 */
83471- __u8 _reserved[6]; /* 0x3a */
83472+ __u16 vesapm_size; /* 0x3a */
83473+ __u8 _reserved[4]; /* 0x3c */
83474 } __attribute__((packed));
83475
83476 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
83477diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
83478index 0e011eb..82681b1 100644
83479--- a/include/uapi/linux/swab.h
83480+++ b/include/uapi/linux/swab.h
83481@@ -43,7 +43,7 @@
83482 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
83483 */
83484
83485-static inline __attribute_const__ __u16 __fswab16(__u16 val)
83486+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
83487 {
83488 #ifdef __HAVE_BUILTIN_BSWAP16__
83489 return __builtin_bswap16(val);
83490@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
83491 #endif
83492 }
83493
83494-static inline __attribute_const__ __u32 __fswab32(__u32 val)
83495+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
83496 {
83497 #ifdef __HAVE_BUILTIN_BSWAP32__
83498 return __builtin_bswap32(val);
83499@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
83500 #endif
83501 }
83502
83503-static inline __attribute_const__ __u64 __fswab64(__u64 val)
83504+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
83505 {
83506 #ifdef __HAVE_BUILTIN_BSWAP64__
83507 return __builtin_bswap64(val);
83508diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
83509index 6d67213..552fdd9 100644
83510--- a/include/uapi/linux/sysctl.h
83511+++ b/include/uapi/linux/sysctl.h
83512@@ -155,8 +155,6 @@ enum
83513 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
83514 };
83515
83516-
83517-
83518 /* CTL_VM names: */
83519 enum
83520 {
83521diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
83522index 437f1b0..0eeb38d 100644
83523--- a/include/uapi/linux/videodev2.h
83524+++ b/include/uapi/linux/videodev2.h
83525@@ -1227,7 +1227,7 @@ struct v4l2_ext_control {
83526 union {
83527 __s32 value;
83528 __s64 value64;
83529- char *string;
83530+ char __user *string;
83531 };
83532 } __attribute__ ((packed));
83533
83534diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
83535index e4629b9..6958086 100644
83536--- a/include/uapi/linux/xattr.h
83537+++ b/include/uapi/linux/xattr.h
83538@@ -63,5 +63,9 @@
83539 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
83540 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
83541
83542+/* User namespace */
83543+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
83544+#define XATTR_PAX_FLAGS_SUFFIX "flags"
83545+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
83546
83547 #endif /* _UAPI_LINUX_XATTR_H */
83548diff --git a/include/video/udlfb.h b/include/video/udlfb.h
83549index f9466fa..f4e2b81 100644
83550--- a/include/video/udlfb.h
83551+++ b/include/video/udlfb.h
83552@@ -53,10 +53,10 @@ struct dlfb_data {
83553 u32 pseudo_palette[256];
83554 int blank_mode; /*one of FB_BLANK_ */
83555 /* blit-only rendering path metrics, exposed through sysfs */
83556- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
83557- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
83558- atomic_t bytes_sent; /* to usb, after compression including overhead */
83559- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
83560+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
83561+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
83562+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
83563+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
83564 };
83565
83566 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
83567diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
83568index 30f5362..8ed8ac9 100644
83569--- a/include/video/uvesafb.h
83570+++ b/include/video/uvesafb.h
83571@@ -122,6 +122,7 @@ struct uvesafb_par {
83572 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
83573 u8 pmi_setpal; /* PMI for palette changes */
83574 u16 *pmi_base; /* protected mode interface location */
83575+ u8 *pmi_code; /* protected mode code location */
83576 void *pmi_start;
83577 void *pmi_pal;
83578 u8 *vbe_state_orig; /*
83579diff --git a/init/Kconfig b/init/Kconfig
83580index 4e5d96a..93cd8a1 100644
83581--- a/init/Kconfig
83582+++ b/init/Kconfig
83583@@ -1079,6 +1079,7 @@ endif # CGROUPS
83584
83585 config CHECKPOINT_RESTORE
83586 bool "Checkpoint/restore support" if EXPERT
83587+ depends on !GRKERNSEC
83588 default n
83589 help
83590 Enables additional kernel features in a sake of checkpoint/restore.
83591@@ -1550,7 +1551,7 @@ config SLUB_DEBUG
83592
83593 config COMPAT_BRK
83594 bool "Disable heap randomization"
83595- default y
83596+ default n
83597 help
83598 Randomizing heap placement makes heap exploits harder, but it
83599 also breaks ancient binaries (including anything libc5 based).
83600@@ -1838,7 +1839,7 @@ config INIT_ALL_POSSIBLE
83601 config STOP_MACHINE
83602 bool
83603 default y
83604- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
83605+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
83606 help
83607 Need stop_machine() primitive.
83608
83609diff --git a/init/Makefile b/init/Makefile
83610index 7bc47ee..6da2dc7 100644
83611--- a/init/Makefile
83612+++ b/init/Makefile
83613@@ -2,6 +2,9 @@
83614 # Makefile for the linux kernel.
83615 #
83616
83617+ccflags-y := $(GCC_PLUGINS_CFLAGS)
83618+asflags-y := $(GCC_PLUGINS_AFLAGS)
83619+
83620 obj-y := main.o version.o mounts.o
83621 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
83622 obj-y += noinitramfs.o
83623diff --git a/init/do_mounts.c b/init/do_mounts.c
83624index 8e5addc..c96ea61 100644
83625--- a/init/do_mounts.c
83626+++ b/init/do_mounts.c
83627@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
83628 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
83629 {
83630 struct super_block *s;
83631- int err = sys_mount(name, "/root", fs, flags, data);
83632+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
83633 if (err)
83634 return err;
83635
83636- sys_chdir("/root");
83637+ sys_chdir((const char __force_user *)"/root");
83638 s = current->fs->pwd.dentry->d_sb;
83639 ROOT_DEV = s->s_dev;
83640 printk(KERN_INFO
83641@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
83642 va_start(args, fmt);
83643 vsprintf(buf, fmt, args);
83644 va_end(args);
83645- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
83646+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
83647 if (fd >= 0) {
83648 sys_ioctl(fd, FDEJECT, 0);
83649 sys_close(fd);
83650 }
83651 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
83652- fd = sys_open("/dev/console", O_RDWR, 0);
83653+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
83654 if (fd >= 0) {
83655 sys_ioctl(fd, TCGETS, (long)&termios);
83656 termios.c_lflag &= ~ICANON;
83657 sys_ioctl(fd, TCSETSF, (long)&termios);
83658- sys_read(fd, &c, 1);
83659+ sys_read(fd, (char __user *)&c, 1);
83660 termios.c_lflag |= ICANON;
83661 sys_ioctl(fd, TCSETSF, (long)&termios);
83662 sys_close(fd);
83663@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
83664 mount_root();
83665 out:
83666 devtmpfs_mount("dev");
83667- sys_mount(".", "/", NULL, MS_MOVE, NULL);
83668- sys_chroot(".");
83669+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
83670+ sys_chroot((const char __force_user *)".");
83671 }
83672
83673 static bool is_tmpfs;
83674diff --git a/init/do_mounts.h b/init/do_mounts.h
83675index f5b978a..69dbfe8 100644
83676--- a/init/do_mounts.h
83677+++ b/init/do_mounts.h
83678@@ -15,15 +15,15 @@ extern int root_mountflags;
83679
83680 static inline int create_dev(char *name, dev_t dev)
83681 {
83682- sys_unlink(name);
83683- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
83684+ sys_unlink((char __force_user *)name);
83685+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
83686 }
83687
83688 #if BITS_PER_LONG == 32
83689 static inline u32 bstat(char *name)
83690 {
83691 struct stat64 stat;
83692- if (sys_stat64(name, &stat) != 0)
83693+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
83694 return 0;
83695 if (!S_ISBLK(stat.st_mode))
83696 return 0;
83697@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
83698 static inline u32 bstat(char *name)
83699 {
83700 struct stat stat;
83701- if (sys_newstat(name, &stat) != 0)
83702+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
83703 return 0;
83704 if (!S_ISBLK(stat.st_mode))
83705 return 0;
83706diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
83707index 3e0878e..8a9d7a0 100644
83708--- a/init/do_mounts_initrd.c
83709+++ b/init/do_mounts_initrd.c
83710@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
83711 {
83712 sys_unshare(CLONE_FS | CLONE_FILES);
83713 /* stdin/stdout/stderr for /linuxrc */
83714- sys_open("/dev/console", O_RDWR, 0);
83715+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
83716 sys_dup(0);
83717 sys_dup(0);
83718 /* move initrd over / and chdir/chroot in initrd root */
83719- sys_chdir("/root");
83720- sys_mount(".", "/", NULL, MS_MOVE, NULL);
83721- sys_chroot(".");
83722+ sys_chdir((const char __force_user *)"/root");
83723+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
83724+ sys_chroot((const char __force_user *)".");
83725 sys_setsid();
83726 return 0;
83727 }
83728@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
83729 create_dev("/dev/root.old", Root_RAM0);
83730 /* mount initrd on rootfs' /root */
83731 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
83732- sys_mkdir("/old", 0700);
83733- sys_chdir("/old");
83734+ sys_mkdir((const char __force_user *)"/old", 0700);
83735+ sys_chdir((const char __force_user *)"/old");
83736
83737 /* try loading default modules from initrd */
83738 load_default_modules();
83739@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
83740 current->flags &= ~PF_FREEZER_SKIP;
83741
83742 /* move initrd to rootfs' /old */
83743- sys_mount("..", ".", NULL, MS_MOVE, NULL);
83744+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
83745 /* switch root and cwd back to / of rootfs */
83746- sys_chroot("..");
83747+ sys_chroot((const char __force_user *)"..");
83748
83749 if (new_decode_dev(real_root_dev) == Root_RAM0) {
83750- sys_chdir("/old");
83751+ sys_chdir((const char __force_user *)"/old");
83752 return;
83753 }
83754
83755- sys_chdir("/");
83756+ sys_chdir((const char __force_user *)"/");
83757 ROOT_DEV = new_decode_dev(real_root_dev);
83758 mount_root();
83759
83760 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
83761- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
83762+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
83763 if (!error)
83764 printk("okay\n");
83765 else {
83766- int fd = sys_open("/dev/root.old", O_RDWR, 0);
83767+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
83768 if (error == -ENOENT)
83769 printk("/initrd does not exist. Ignored.\n");
83770 else
83771 printk("failed\n");
83772 printk(KERN_NOTICE "Unmounting old root\n");
83773- sys_umount("/old", MNT_DETACH);
83774+ sys_umount((char __force_user *)"/old", MNT_DETACH);
83775 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
83776 if (fd < 0) {
83777 error = fd;
83778@@ -127,11 +127,11 @@ int __init initrd_load(void)
83779 * mounted in the normal path.
83780 */
83781 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
83782- sys_unlink("/initrd.image");
83783+ sys_unlink((const char __force_user *)"/initrd.image");
83784 handle_initrd();
83785 return 1;
83786 }
83787 }
83788- sys_unlink("/initrd.image");
83789+ sys_unlink((const char __force_user *)"/initrd.image");
83790 return 0;
83791 }
83792diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
83793index 8cb6db5..d729f50 100644
83794--- a/init/do_mounts_md.c
83795+++ b/init/do_mounts_md.c
83796@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
83797 partitioned ? "_d" : "", minor,
83798 md_setup_args[ent].device_names);
83799
83800- fd = sys_open(name, 0, 0);
83801+ fd = sys_open((char __force_user *)name, 0, 0);
83802 if (fd < 0) {
83803 printk(KERN_ERR "md: open failed - cannot start "
83804 "array %s\n", name);
83805@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
83806 * array without it
83807 */
83808 sys_close(fd);
83809- fd = sys_open(name, 0, 0);
83810+ fd = sys_open((char __force_user *)name, 0, 0);
83811 sys_ioctl(fd, BLKRRPART, 0);
83812 }
83813 sys_close(fd);
83814@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
83815
83816 wait_for_device_probe();
83817
83818- fd = sys_open("/dev/md0", 0, 0);
83819+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
83820 if (fd >= 0) {
83821 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
83822 sys_close(fd);
83823diff --git a/init/init_task.c b/init/init_task.c
83824index ba0a7f36..2bcf1d5 100644
83825--- a/init/init_task.c
83826+++ b/init/init_task.c
83827@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
83828 * Initial thread structure. Alignment of this is handled by a special
83829 * linker map entry.
83830 */
83831+#ifdef CONFIG_X86
83832+union thread_union init_thread_union __init_task_data;
83833+#else
83834 union thread_union init_thread_union __init_task_data =
83835 { INIT_THREAD_INFO(init_task) };
83836+#endif
83837diff --git a/init/initramfs.c b/init/initramfs.c
83838index a67ef9d..2d17ed9 100644
83839--- a/init/initramfs.c
83840+++ b/init/initramfs.c
83841@@ -84,7 +84,7 @@ static void __init free_hash(void)
83842 }
83843 }
83844
83845-static long __init do_utime(char *filename, time_t mtime)
83846+static long __init do_utime(char __force_user *filename, time_t mtime)
83847 {
83848 struct timespec t[2];
83849
83850@@ -119,7 +119,7 @@ static void __init dir_utime(void)
83851 struct dir_entry *de, *tmp;
83852 list_for_each_entry_safe(de, tmp, &dir_list, list) {
83853 list_del(&de->list);
83854- do_utime(de->name, de->mtime);
83855+ do_utime((char __force_user *)de->name, de->mtime);
83856 kfree(de->name);
83857 kfree(de);
83858 }
83859@@ -281,7 +281,7 @@ static int __init maybe_link(void)
83860 if (nlink >= 2) {
83861 char *old = find_link(major, minor, ino, mode, collected);
83862 if (old)
83863- return (sys_link(old, collected) < 0) ? -1 : 1;
83864+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
83865 }
83866 return 0;
83867 }
83868@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
83869 {
83870 struct stat st;
83871
83872- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
83873+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
83874 if (S_ISDIR(st.st_mode))
83875- sys_rmdir(path);
83876+ sys_rmdir((char __force_user *)path);
83877 else
83878- sys_unlink(path);
83879+ sys_unlink((char __force_user *)path);
83880 }
83881 }
83882
83883@@ -315,7 +315,7 @@ static int __init do_name(void)
83884 int openflags = O_WRONLY|O_CREAT;
83885 if (ml != 1)
83886 openflags |= O_TRUNC;
83887- wfd = sys_open(collected, openflags, mode);
83888+ wfd = sys_open((char __force_user *)collected, openflags, mode);
83889
83890 if (wfd >= 0) {
83891 sys_fchown(wfd, uid, gid);
83892@@ -327,17 +327,17 @@ static int __init do_name(void)
83893 }
83894 }
83895 } else if (S_ISDIR(mode)) {
83896- sys_mkdir(collected, mode);
83897- sys_chown(collected, uid, gid);
83898- sys_chmod(collected, mode);
83899+ sys_mkdir((char __force_user *)collected, mode);
83900+ sys_chown((char __force_user *)collected, uid, gid);
83901+ sys_chmod((char __force_user *)collected, mode);
83902 dir_add(collected, mtime);
83903 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
83904 S_ISFIFO(mode) || S_ISSOCK(mode)) {
83905 if (maybe_link() == 0) {
83906- sys_mknod(collected, mode, rdev);
83907- sys_chown(collected, uid, gid);
83908- sys_chmod(collected, mode);
83909- do_utime(collected, mtime);
83910+ sys_mknod((char __force_user *)collected, mode, rdev);
83911+ sys_chown((char __force_user *)collected, uid, gid);
83912+ sys_chmod((char __force_user *)collected, mode);
83913+ do_utime((char __force_user *)collected, mtime);
83914 }
83915 }
83916 return 0;
83917@@ -346,15 +346,15 @@ static int __init do_name(void)
83918 static int __init do_copy(void)
83919 {
83920 if (count >= body_len) {
83921- sys_write(wfd, victim, body_len);
83922+ sys_write(wfd, (char __force_user *)victim, body_len);
83923 sys_close(wfd);
83924- do_utime(vcollected, mtime);
83925+ do_utime((char __force_user *)vcollected, mtime);
83926 kfree(vcollected);
83927 eat(body_len);
83928 state = SkipIt;
83929 return 0;
83930 } else {
83931- sys_write(wfd, victim, count);
83932+ sys_write(wfd, (char __force_user *)victim, count);
83933 body_len -= count;
83934 eat(count);
83935 return 1;
83936@@ -365,9 +365,9 @@ static int __init do_symlink(void)
83937 {
83938 collected[N_ALIGN(name_len) + body_len] = '\0';
83939 clean_path(collected, 0);
83940- sys_symlink(collected + N_ALIGN(name_len), collected);
83941- sys_lchown(collected, uid, gid);
83942- do_utime(collected, mtime);
83943+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
83944+ sys_lchown((char __force_user *)collected, uid, gid);
83945+ do_utime((char __force_user *)collected, mtime);
83946 state = SkipIt;
83947 next_state = Reset;
83948 return 0;
83949@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
83950 {
83951 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
83952 if (err)
83953- panic(err); /* Failed to decompress INTERNAL initramfs */
83954+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
83955 if (initrd_start) {
83956 #ifdef CONFIG_BLK_DEV_RAM
83957 int fd;
83958diff --git a/init/main.c b/init/main.c
83959index febc511..f0851763 100644
83960--- a/init/main.c
83961+++ b/init/main.c
83962@@ -103,6 +103,8 @@ static inline void mark_rodata_ro(void) { }
83963 extern void tc_init(void);
83964 #endif
83965
83966+extern void grsecurity_init(void);
83967+
83968 /*
83969 * Debug helper: via this flag we know that we are in 'early bootup code'
83970 * where only the boot processor is running with IRQ disabled. This means
83971@@ -164,6 +166,75 @@ static int __init set_reset_devices(char *str)
83972
83973 __setup("reset_devices", set_reset_devices);
83974
83975+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
83976+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
83977+static int __init setup_grsec_proc_gid(char *str)
83978+{
83979+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
83980+ return 1;
83981+}
83982+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
83983+#endif
83984+
83985+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
83986+unsigned long pax_user_shadow_base __read_only;
83987+EXPORT_SYMBOL(pax_user_shadow_base);
83988+extern char pax_enter_kernel_user[];
83989+extern char pax_exit_kernel_user[];
83990+#endif
83991+
83992+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
83993+static int __init setup_pax_nouderef(char *str)
83994+{
83995+#ifdef CONFIG_X86_32
83996+ unsigned int cpu;
83997+ struct desc_struct *gdt;
83998+
83999+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
84000+ gdt = get_cpu_gdt_table(cpu);
84001+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
84002+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
84003+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
84004+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
84005+ }
84006+ loadsegment(ds, __KERNEL_DS);
84007+ loadsegment(es, __KERNEL_DS);
84008+ loadsegment(ss, __KERNEL_DS);
84009+#else
84010+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
84011+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
84012+ clone_pgd_mask = ~(pgdval_t)0UL;
84013+ pax_user_shadow_base = 0UL;
84014+ setup_clear_cpu_cap(X86_FEATURE_PCID);
84015+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
84016+#endif
84017+
84018+ return 0;
84019+}
84020+early_param("pax_nouderef", setup_pax_nouderef);
84021+
84022+#ifdef CONFIG_X86_64
84023+static int __init setup_pax_weakuderef(char *str)
84024+{
84025+ if (clone_pgd_mask != ~(pgdval_t)0UL)
84026+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
84027+ return 1;
84028+}
84029+__setup("pax_weakuderef", setup_pax_weakuderef);
84030+#endif
84031+#endif
84032+
84033+#ifdef CONFIG_PAX_SOFTMODE
84034+int pax_softmode;
84035+
84036+static int __init setup_pax_softmode(char *str)
84037+{
84038+ get_option(&str, &pax_softmode);
84039+ return 1;
84040+}
84041+__setup("pax_softmode=", setup_pax_softmode);
84042+#endif
84043+
84044 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
84045 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
84046 static const char *panic_later, *panic_param;
84047@@ -691,25 +762,24 @@ int __init_or_module do_one_initcall(initcall_t fn)
84048 {
84049 int count = preempt_count();
84050 int ret;
84051- char msgbuf[64];
84052+ const char *msg1 = "", *msg2 = "";
84053
84054 if (initcall_debug)
84055 ret = do_one_initcall_debug(fn);
84056 else
84057 ret = fn();
84058
84059- msgbuf[0] = 0;
84060-
84061 if (preempt_count() != count) {
84062- sprintf(msgbuf, "preemption imbalance ");
84063+ msg1 = " preemption imbalance";
84064 preempt_count_set(count);
84065 }
84066 if (irqs_disabled()) {
84067- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
84068+ msg2 = " disabled interrupts";
84069 local_irq_enable();
84070 }
84071- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
84072+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
84073
84074+ add_latent_entropy();
84075 return ret;
84076 }
84077
84078@@ -816,8 +886,8 @@ static int run_init_process(const char *init_filename)
84079 {
84080 argv_init[0] = init_filename;
84081 return do_execve(init_filename,
84082- (const char __user *const __user *)argv_init,
84083- (const char __user *const __user *)envp_init);
84084+ (const char __user *const __force_user *)argv_init,
84085+ (const char __user *const __force_user *)envp_init);
84086 }
84087
84088 static int try_to_run_init_process(const char *init_filename)
84089@@ -834,6 +904,10 @@ static int try_to_run_init_process(const char *init_filename)
84090 return ret;
84091 }
84092
84093+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
84094+extern int gr_init_ran;
84095+#endif
84096+
84097 static noinline void __init kernel_init_freeable(void);
84098
84099 static int __ref kernel_init(void *unused)
84100@@ -858,6 +932,11 @@ static int __ref kernel_init(void *unused)
84101 ramdisk_execute_command, ret);
84102 }
84103
84104+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
84105+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
84106+ gr_init_ran = 1;
84107+#endif
84108+
84109 /*
84110 * We try each of these until one succeeds.
84111 *
84112@@ -913,7 +992,7 @@ static noinline void __init kernel_init_freeable(void)
84113 do_basic_setup();
84114
84115 /* Open the /dev/console on the rootfs, this should never fail */
84116- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
84117+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
84118 pr_err("Warning: unable to open an initial console.\n");
84119
84120 (void) sys_dup(0);
84121@@ -926,11 +1005,13 @@ static noinline void __init kernel_init_freeable(void)
84122 if (!ramdisk_execute_command)
84123 ramdisk_execute_command = "/init";
84124
84125- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
84126+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
84127 ramdisk_execute_command = NULL;
84128 prepare_namespace();
84129 }
84130
84131+ grsecurity_init();
84132+
84133 /*
84134 * Ok, we have completed the initial bootup, and
84135 * we're essentially up and running. Get rid of the
84136diff --git a/ipc/compat.c b/ipc/compat.c
84137index 892f658..e7c6320 100644
84138--- a/ipc/compat.c
84139+++ b/ipc/compat.c
84140@@ -399,7 +399,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
84141 COMPAT_SHMLBA);
84142 if (err < 0)
84143 return err;
84144- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
84145+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
84146 }
84147 case SHMDT:
84148 return sys_shmdt(compat_ptr(ptr));
84149diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
84150index b0e99de..09f385c 100644
84151--- a/ipc/ipc_sysctl.c
84152+++ b/ipc/ipc_sysctl.c
84153@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
84154 static int proc_ipc_dointvec(ctl_table *table, int write,
84155 void __user *buffer, size_t *lenp, loff_t *ppos)
84156 {
84157- struct ctl_table ipc_table;
84158+ ctl_table_no_const ipc_table;
84159
84160 memcpy(&ipc_table, table, sizeof(ipc_table));
84161 ipc_table.data = get_ipc(table);
84162@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
84163 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
84164 void __user *buffer, size_t *lenp, loff_t *ppos)
84165 {
84166- struct ctl_table ipc_table;
84167+ ctl_table_no_const ipc_table;
84168
84169 memcpy(&ipc_table, table, sizeof(ipc_table));
84170 ipc_table.data = get_ipc(table);
84171@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
84172 static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
84173 void __user *buffer, size_t *lenp, loff_t *ppos)
84174 {
84175- struct ctl_table ipc_table;
84176+ ctl_table_no_const ipc_table;
84177 size_t lenp_bef = *lenp;
84178 int rc;
84179
84180@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
84181 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
84182 void __user *buffer, size_t *lenp, loff_t *ppos)
84183 {
84184- struct ctl_table ipc_table;
84185+ ctl_table_no_const ipc_table;
84186 memcpy(&ipc_table, table, sizeof(ipc_table));
84187 ipc_table.data = get_ipc(table);
84188
84189@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
84190 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
84191 void __user *buffer, size_t *lenp, loff_t *ppos)
84192 {
84193- struct ctl_table ipc_table;
84194+ ctl_table_no_const ipc_table;
84195 size_t lenp_bef = *lenp;
84196 int oldval;
84197 int rc;
84198diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
84199index 5bb8bfe..a38ec05 100644
84200--- a/ipc/mq_sysctl.c
84201+++ b/ipc/mq_sysctl.c
84202@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
84203 static int proc_mq_dointvec(ctl_table *table, int write,
84204 void __user *buffer, size_t *lenp, loff_t *ppos)
84205 {
84206- struct ctl_table mq_table;
84207+ ctl_table_no_const mq_table;
84208 memcpy(&mq_table, table, sizeof(mq_table));
84209 mq_table.data = get_mq(table);
84210
84211@@ -35,7 +35,7 @@ static int proc_mq_dointvec(ctl_table *table, int write,
84212 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
84213 void __user *buffer, size_t *lenp, loff_t *ppos)
84214 {
84215- struct ctl_table mq_table;
84216+ ctl_table_no_const mq_table;
84217 memcpy(&mq_table, table, sizeof(mq_table));
84218 mq_table.data = get_mq(table);
84219
84220diff --git a/ipc/mqueue.c b/ipc/mqueue.c
84221index b8d4aed..96a4fe8 100644
84222--- a/ipc/mqueue.c
84223+++ b/ipc/mqueue.c
84224@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
84225 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
84226 info->attr.mq_msgsize);
84227
84228+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
84229 spin_lock(&mq_lock);
84230 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
84231 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
84232diff --git a/ipc/msg.c b/ipc/msg.c
84233index 52770bf..1c60a6f 100644
84234--- a/ipc/msg.c
84235+++ b/ipc/msg.c
84236@@ -297,18 +297,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
84237 return security_msg_queue_associate(msq, msgflg);
84238 }
84239
84240+static struct ipc_ops msg_ops = {
84241+ .getnew = newque,
84242+ .associate = msg_security,
84243+ .more_checks = NULL
84244+};
84245+
84246 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
84247 {
84248 struct ipc_namespace *ns;
84249- struct ipc_ops msg_ops;
84250 struct ipc_params msg_params;
84251
84252 ns = current->nsproxy->ipc_ns;
84253
84254- msg_ops.getnew = newque;
84255- msg_ops.associate = msg_security;
84256- msg_ops.more_checks = NULL;
84257-
84258 msg_params.key = key;
84259 msg_params.flg = msgflg;
84260
84261diff --git a/ipc/sem.c b/ipc/sem.c
84262index db9d241..bc8427c 100644
84263--- a/ipc/sem.c
84264+++ b/ipc/sem.c
84265@@ -562,10 +562,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
84266 return 0;
84267 }
84268
84269+static struct ipc_ops sem_ops = {
84270+ .getnew = newary,
84271+ .associate = sem_security,
84272+ .more_checks = sem_more_checks
84273+};
84274+
84275 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
84276 {
84277 struct ipc_namespace *ns;
84278- struct ipc_ops sem_ops;
84279 struct ipc_params sem_params;
84280
84281 ns = current->nsproxy->ipc_ns;
84282@@ -573,10 +578,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
84283 if (nsems < 0 || nsems > ns->sc_semmsl)
84284 return -EINVAL;
84285
84286- sem_ops.getnew = newary;
84287- sem_ops.associate = sem_security;
84288- sem_ops.more_checks = sem_more_checks;
84289-
84290 sem_params.key = key;
84291 sem_params.flg = semflg;
84292 sem_params.u.nsems = nsems;
84293diff --git a/ipc/shm.c b/ipc/shm.c
84294index 7a51443..3a257d8 100644
84295--- a/ipc/shm.c
84296+++ b/ipc/shm.c
84297@@ -72,6 +72,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
84298 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
84299 #endif
84300
84301+#ifdef CONFIG_GRKERNSEC
84302+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
84303+ const time_t shm_createtime, const kuid_t cuid,
84304+ const int shmid);
84305+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
84306+ const time_t shm_createtime);
84307+#endif
84308+
84309 void shm_init_ns(struct ipc_namespace *ns)
84310 {
84311 ns->shm_ctlmax = SHMMAX;
84312@@ -554,6 +562,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
84313 shp->shm_lprid = 0;
84314 shp->shm_atim = shp->shm_dtim = 0;
84315 shp->shm_ctim = get_seconds();
84316+#ifdef CONFIG_GRKERNSEC
84317+ {
84318+ struct timespec timeval;
84319+ do_posix_clock_monotonic_gettime(&timeval);
84320+
84321+ shp->shm_createtime = timeval.tv_sec;
84322+ }
84323+#endif
84324 shp->shm_segsz = size;
84325 shp->shm_nattch = 0;
84326 shp->shm_file = file;
84327@@ -607,18 +623,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
84328 return 0;
84329 }
84330
84331+static struct ipc_ops shm_ops = {
84332+ .getnew = newseg,
84333+ .associate = shm_security,
84334+ .more_checks = shm_more_checks
84335+};
84336+
84337 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
84338 {
84339 struct ipc_namespace *ns;
84340- struct ipc_ops shm_ops;
84341 struct ipc_params shm_params;
84342
84343 ns = current->nsproxy->ipc_ns;
84344
84345- shm_ops.getnew = newseg;
84346- shm_ops.associate = shm_security;
84347- shm_ops.more_checks = shm_more_checks;
84348-
84349 shm_params.key = key;
84350 shm_params.flg = shmflg;
84351 shm_params.u.size = size;
84352@@ -1089,6 +1106,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
84353 f_mode = FMODE_READ | FMODE_WRITE;
84354 }
84355 if (shmflg & SHM_EXEC) {
84356+
84357+#ifdef CONFIG_PAX_MPROTECT
84358+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
84359+ goto out;
84360+#endif
84361+
84362 prot |= PROT_EXEC;
84363 acc_mode |= S_IXUGO;
84364 }
84365@@ -1113,6 +1136,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
84366 if (err)
84367 goto out_unlock;
84368
84369+#ifdef CONFIG_GRKERNSEC
84370+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
84371+ shp->shm_perm.cuid, shmid) ||
84372+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
84373+ err = -EACCES;
84374+ goto out_unlock;
84375+ }
84376+#endif
84377+
84378 ipc_lock_object(&shp->shm_perm);
84379
84380 /* check if shm_destroy() is tearing down shp */
84381@@ -1125,6 +1157,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
84382 path = shp->shm_file->f_path;
84383 path_get(&path);
84384 shp->shm_nattch++;
84385+#ifdef CONFIG_GRKERNSEC
84386+ shp->shm_lapid = current->pid;
84387+#endif
84388 size = i_size_read(path.dentry->d_inode);
84389 ipc_unlock_object(&shp->shm_perm);
84390 rcu_read_unlock();
84391diff --git a/ipc/util.c b/ipc/util.c
84392index 3ae17a4..d67c32f 100644
84393--- a/ipc/util.c
84394+++ b/ipc/util.c
84395@@ -71,6 +71,8 @@ struct ipc_proc_iface {
84396 int (*show)(struct seq_file *, void *);
84397 };
84398
84399+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
84400+
84401 static void ipc_memory_notifier(struct work_struct *work)
84402 {
84403 ipcns_notify(IPCNS_MEMCHANGED);
84404@@ -558,6 +560,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
84405 granted_mode >>= 6;
84406 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
84407 granted_mode >>= 3;
84408+
84409+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
84410+ return -1;
84411+
84412 /* is there some bit set in requested_mode but not in granted_mode? */
84413 if ((requested_mode & ~granted_mode & 0007) &&
84414 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
84415diff --git a/kernel/acct.c b/kernel/acct.c
84416index 8d6e145..33e0b1e 100644
84417--- a/kernel/acct.c
84418+++ b/kernel/acct.c
84419@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
84420 */
84421 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
84422 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
84423- file->f_op->write(file, (char *)&ac,
84424+ file->f_op->write(file, (char __force_user *)&ac,
84425 sizeof(acct_t), &file->f_pos);
84426 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
84427 set_fs(fs);
84428diff --git a/kernel/audit.c b/kernel/audit.c
84429index 15ec13a..986322e 100644
84430--- a/kernel/audit.c
84431+++ b/kernel/audit.c
84432@@ -118,7 +118,7 @@ u32 audit_sig_sid = 0;
84433 3) suppressed due to audit_rate_limit
84434 4) suppressed due to audit_backlog_limit
84435 */
84436-static atomic_t audit_lost = ATOMIC_INIT(0);
84437+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
84438
84439 /* The netlink socket. */
84440 static struct sock *audit_sock;
84441@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
84442 unsigned long now;
84443 int print;
84444
84445- atomic_inc(&audit_lost);
84446+ atomic_inc_unchecked(&audit_lost);
84447
84448 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
84449
84450@@ -270,7 +270,7 @@ void audit_log_lost(const char *message)
84451 printk(KERN_WARNING
84452 "audit: audit_lost=%d audit_rate_limit=%d "
84453 "audit_backlog_limit=%d\n",
84454- atomic_read(&audit_lost),
84455+ atomic_read_unchecked(&audit_lost),
84456 audit_rate_limit,
84457 audit_backlog_limit);
84458 audit_panic(message);
84459@@ -766,7 +766,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
84460 status_set.pid = audit_pid;
84461 status_set.rate_limit = audit_rate_limit;
84462 status_set.backlog_limit = audit_backlog_limit;
84463- status_set.lost = atomic_read(&audit_lost);
84464+ status_set.lost = atomic_read_unchecked(&audit_lost);
84465 status_set.backlog = skb_queue_len(&audit_skb_queue);
84466 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
84467 &status_set, sizeof(status_set));
84468@@ -1359,7 +1359,7 @@ void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
84469 int i, avail, new_len;
84470 unsigned char *ptr;
84471 struct sk_buff *skb;
84472- static const unsigned char *hex = "0123456789ABCDEF";
84473+ static const unsigned char hex[] = "0123456789ABCDEF";
84474
84475 if (!ab)
84476 return;
84477diff --git a/kernel/auditsc.c b/kernel/auditsc.c
84478index ff32843..27fc708 100644
84479--- a/kernel/auditsc.c
84480+++ b/kernel/auditsc.c
84481@@ -1945,7 +1945,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
84482 }
84483
84484 /* global counter which is incremented every time something logs in */
84485-static atomic_t session_id = ATOMIC_INIT(0);
84486+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
84487
84488 static int audit_set_loginuid_perm(kuid_t loginuid)
84489 {
84490@@ -2011,7 +2011,7 @@ int audit_set_loginuid(kuid_t loginuid)
84491
84492 /* are we setting or clearing? */
84493 if (uid_valid(loginuid))
84494- sessionid = atomic_inc_return(&session_id);
84495+ sessionid = atomic_inc_return_unchecked(&session_id);
84496
84497 task->sessionid = sessionid;
84498 task->loginuid = loginuid;
84499diff --git a/kernel/capability.c b/kernel/capability.c
84500index 4e66bf9..cdccecf 100644
84501--- a/kernel/capability.c
84502+++ b/kernel/capability.c
84503@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
84504 * before modification is attempted and the application
84505 * fails.
84506 */
84507+ if (tocopy > ARRAY_SIZE(kdata))
84508+ return -EFAULT;
84509+
84510 if (copy_to_user(dataptr, kdata, tocopy
84511 * sizeof(struct __user_cap_data_struct))) {
84512 return -EFAULT;
84513@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
84514 int ret;
84515
84516 rcu_read_lock();
84517- ret = security_capable(__task_cred(t), ns, cap);
84518+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
84519+ gr_task_is_capable(t, __task_cred(t), cap);
84520 rcu_read_unlock();
84521
84522- return (ret == 0);
84523+ return ret;
84524 }
84525
84526 /**
84527@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
84528 int ret;
84529
84530 rcu_read_lock();
84531- ret = security_capable_noaudit(__task_cred(t), ns, cap);
84532+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
84533 rcu_read_unlock();
84534
84535- return (ret == 0);
84536+ return ret;
84537 }
84538
84539 /**
84540@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
84541 BUG();
84542 }
84543
84544- if (security_capable(current_cred(), ns, cap) == 0) {
84545+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
84546 current->flags |= PF_SUPERPRIV;
84547 return true;
84548 }
84549@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
84550 }
84551 EXPORT_SYMBOL(ns_capable);
84552
84553+bool ns_capable_nolog(struct user_namespace *ns, int cap)
84554+{
84555+ if (unlikely(!cap_valid(cap))) {
84556+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
84557+ BUG();
84558+ }
84559+
84560+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
84561+ current->flags |= PF_SUPERPRIV;
84562+ return true;
84563+ }
84564+ return false;
84565+}
84566+EXPORT_SYMBOL(ns_capable_nolog);
84567+
84568 /**
84569 * file_ns_capable - Determine if the file's opener had a capability in effect
84570 * @file: The file we want to check
84571@@ -432,6 +451,12 @@ bool capable(int cap)
84572 }
84573 EXPORT_SYMBOL(capable);
84574
84575+bool capable_nolog(int cap)
84576+{
84577+ return ns_capable_nolog(&init_user_ns, cap);
84578+}
84579+EXPORT_SYMBOL(capable_nolog);
84580+
84581 /**
84582 * inode_capable - Check superior capability over inode
84583 * @inode: The inode in question
84584@@ -453,3 +478,11 @@ bool inode_capable(const struct inode *inode, int cap)
84585 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
84586 }
84587 EXPORT_SYMBOL(inode_capable);
84588+
84589+bool inode_capable_nolog(const struct inode *inode, int cap)
84590+{
84591+ struct user_namespace *ns = current_user_ns();
84592+
84593+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
84594+}
84595+EXPORT_SYMBOL(inode_capable_nolog);
84596diff --git a/kernel/cgroup.c b/kernel/cgroup.c
84597index 271acd8..54b70fe 100644
84598--- a/kernel/cgroup.c
84599+++ b/kernel/cgroup.c
84600@@ -5609,7 +5609,7 @@ static int cgroup_css_links_read(struct cgroup_subsys_state *css,
84601 struct css_set *cset = link->cset;
84602 struct task_struct *task;
84603 int count = 0;
84604- seq_printf(seq, "css_set %p\n", cset);
84605+ seq_printf(seq, "css_set %pK\n", cset);
84606 list_for_each_entry(task, &cset->tasks, cg_list) {
84607 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
84608 seq_puts(seq, " ...\n");
84609diff --git a/kernel/compat.c b/kernel/compat.c
84610index 0a09e48..b46b3d78 100644
84611--- a/kernel/compat.c
84612+++ b/kernel/compat.c
84613@@ -13,6 +13,7 @@
84614
84615 #include <linux/linkage.h>
84616 #include <linux/compat.h>
84617+#include <linux/module.h>
84618 #include <linux/errno.h>
84619 #include <linux/time.h>
84620 #include <linux/signal.h>
84621@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
84622 mm_segment_t oldfs;
84623 long ret;
84624
84625- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
84626+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
84627 oldfs = get_fs();
84628 set_fs(KERNEL_DS);
84629 ret = hrtimer_nanosleep_restart(restart);
84630@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
84631 oldfs = get_fs();
84632 set_fs(KERNEL_DS);
84633 ret = hrtimer_nanosleep(&tu,
84634- rmtp ? (struct timespec __user *)&rmt : NULL,
84635+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
84636 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
84637 set_fs(oldfs);
84638
84639@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
84640 mm_segment_t old_fs = get_fs();
84641
84642 set_fs(KERNEL_DS);
84643- ret = sys_sigpending((old_sigset_t __user *) &s);
84644+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
84645 set_fs(old_fs);
84646 if (ret == 0)
84647 ret = put_user(s, set);
84648@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
84649 mm_segment_t old_fs = get_fs();
84650
84651 set_fs(KERNEL_DS);
84652- ret = sys_old_getrlimit(resource, &r);
84653+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
84654 set_fs(old_fs);
84655
84656 if (!ret) {
84657@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
84658 set_fs (KERNEL_DS);
84659 ret = sys_wait4(pid,
84660 (stat_addr ?
84661- (unsigned int __user *) &status : NULL),
84662- options, (struct rusage __user *) &r);
84663+ (unsigned int __force_user *) &status : NULL),
84664+ options, (struct rusage __force_user *) &r);
84665 set_fs (old_fs);
84666
84667 if (ret > 0) {
84668@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
84669 memset(&info, 0, sizeof(info));
84670
84671 set_fs(KERNEL_DS);
84672- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
84673- uru ? (struct rusage __user *)&ru : NULL);
84674+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
84675+ uru ? (struct rusage __force_user *)&ru : NULL);
84676 set_fs(old_fs);
84677
84678 if ((ret < 0) || (info.si_signo == 0))
84679@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
84680 oldfs = get_fs();
84681 set_fs(KERNEL_DS);
84682 err = sys_timer_settime(timer_id, flags,
84683- (struct itimerspec __user *) &newts,
84684- (struct itimerspec __user *) &oldts);
84685+ (struct itimerspec __force_user *) &newts,
84686+ (struct itimerspec __force_user *) &oldts);
84687 set_fs(oldfs);
84688 if (!err && old && put_compat_itimerspec(old, &oldts))
84689 return -EFAULT;
84690@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
84691 oldfs = get_fs();
84692 set_fs(KERNEL_DS);
84693 err = sys_timer_gettime(timer_id,
84694- (struct itimerspec __user *) &ts);
84695+ (struct itimerspec __force_user *) &ts);
84696 set_fs(oldfs);
84697 if (!err && put_compat_itimerspec(setting, &ts))
84698 return -EFAULT;
84699@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
84700 oldfs = get_fs();
84701 set_fs(KERNEL_DS);
84702 err = sys_clock_settime(which_clock,
84703- (struct timespec __user *) &ts);
84704+ (struct timespec __force_user *) &ts);
84705 set_fs(oldfs);
84706 return err;
84707 }
84708@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
84709 oldfs = get_fs();
84710 set_fs(KERNEL_DS);
84711 err = sys_clock_gettime(which_clock,
84712- (struct timespec __user *) &ts);
84713+ (struct timespec __force_user *) &ts);
84714 set_fs(oldfs);
84715 if (!err && put_compat_timespec(&ts, tp))
84716 return -EFAULT;
84717@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
84718
84719 oldfs = get_fs();
84720 set_fs(KERNEL_DS);
84721- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
84722+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
84723 set_fs(oldfs);
84724
84725 err = compat_put_timex(utp, &txc);
84726@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
84727 oldfs = get_fs();
84728 set_fs(KERNEL_DS);
84729 err = sys_clock_getres(which_clock,
84730- (struct timespec __user *) &ts);
84731+ (struct timespec __force_user *) &ts);
84732 set_fs(oldfs);
84733 if (!err && tp && put_compat_timespec(&ts, tp))
84734 return -EFAULT;
84735@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
84736 long err;
84737 mm_segment_t oldfs;
84738 struct timespec tu;
84739- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
84740+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
84741
84742- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
84743+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
84744 oldfs = get_fs();
84745 set_fs(KERNEL_DS);
84746 err = clock_nanosleep_restart(restart);
84747@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
84748 oldfs = get_fs();
84749 set_fs(KERNEL_DS);
84750 err = sys_clock_nanosleep(which_clock, flags,
84751- (struct timespec __user *) &in,
84752- (struct timespec __user *) &out);
84753+ (struct timespec __force_user *) &in,
84754+ (struct timespec __force_user *) &out);
84755 set_fs(oldfs);
84756
84757 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
84758@@ -1128,7 +1129,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
84759 mm_segment_t old_fs = get_fs();
84760
84761 set_fs(KERNEL_DS);
84762- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
84763+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
84764 set_fs(old_fs);
84765 if (put_compat_timespec(&t, interval))
84766 return -EFAULT;
84767diff --git a/kernel/configs.c b/kernel/configs.c
84768index c18b1f1..b9a0132 100644
84769--- a/kernel/configs.c
84770+++ b/kernel/configs.c
84771@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
84772 struct proc_dir_entry *entry;
84773
84774 /* create the current config file */
84775+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
84776+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
84777+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
84778+ &ikconfig_file_ops);
84779+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84780+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
84781+ &ikconfig_file_ops);
84782+#endif
84783+#else
84784 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
84785 &ikconfig_file_ops);
84786+#endif
84787+
84788 if (!entry)
84789 return -ENOMEM;
84790
84791diff --git a/kernel/cred.c b/kernel/cred.c
84792index e0573a4..3874e41 100644
84793--- a/kernel/cred.c
84794+++ b/kernel/cred.c
84795@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
84796 validate_creds(cred);
84797 alter_cred_subscribers(cred, -1);
84798 put_cred(cred);
84799+
84800+#ifdef CONFIG_GRKERNSEC_SETXID
84801+ cred = (struct cred *) tsk->delayed_cred;
84802+ if (cred != NULL) {
84803+ tsk->delayed_cred = NULL;
84804+ validate_creds(cred);
84805+ alter_cred_subscribers(cred, -1);
84806+ put_cred(cred);
84807+ }
84808+#endif
84809 }
84810
84811 /**
84812@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
84813 * Always returns 0 thus allowing this function to be tail-called at the end
84814 * of, say, sys_setgid().
84815 */
84816-int commit_creds(struct cred *new)
84817+static int __commit_creds(struct cred *new)
84818 {
84819 struct task_struct *task = current;
84820 const struct cred *old = task->real_cred;
84821@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
84822
84823 get_cred(new); /* we will require a ref for the subj creds too */
84824
84825+ gr_set_role_label(task, new->uid, new->gid);
84826+
84827 /* dumpability changes */
84828 if (!uid_eq(old->euid, new->euid) ||
84829 !gid_eq(old->egid, new->egid) ||
84830@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
84831 put_cred(old);
84832 return 0;
84833 }
84834+#ifdef CONFIG_GRKERNSEC_SETXID
84835+extern int set_user(struct cred *new);
84836+
84837+void gr_delayed_cred_worker(void)
84838+{
84839+ const struct cred *new = current->delayed_cred;
84840+ struct cred *ncred;
84841+
84842+ current->delayed_cred = NULL;
84843+
84844+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
84845+ // from doing get_cred on it when queueing this
84846+ put_cred(new);
84847+ return;
84848+ } else if (new == NULL)
84849+ return;
84850+
84851+ ncred = prepare_creds();
84852+ if (!ncred)
84853+ goto die;
84854+ // uids
84855+ ncred->uid = new->uid;
84856+ ncred->euid = new->euid;
84857+ ncred->suid = new->suid;
84858+ ncred->fsuid = new->fsuid;
84859+ // gids
84860+ ncred->gid = new->gid;
84861+ ncred->egid = new->egid;
84862+ ncred->sgid = new->sgid;
84863+ ncred->fsgid = new->fsgid;
84864+ // groups
84865+ if (set_groups(ncred, new->group_info) < 0) {
84866+ abort_creds(ncred);
84867+ goto die;
84868+ }
84869+ // caps
84870+ ncred->securebits = new->securebits;
84871+ ncred->cap_inheritable = new->cap_inheritable;
84872+ ncred->cap_permitted = new->cap_permitted;
84873+ ncred->cap_effective = new->cap_effective;
84874+ ncred->cap_bset = new->cap_bset;
84875+
84876+ if (set_user(ncred)) {
84877+ abort_creds(ncred);
84878+ goto die;
84879+ }
84880+
84881+ // from doing get_cred on it when queueing this
84882+ put_cred(new);
84883+
84884+ __commit_creds(ncred);
84885+ return;
84886+die:
84887+ // from doing get_cred on it when queueing this
84888+ put_cred(new);
84889+ do_group_exit(SIGKILL);
84890+}
84891+#endif
84892+
84893+int commit_creds(struct cred *new)
84894+{
84895+#ifdef CONFIG_GRKERNSEC_SETXID
84896+ int ret;
84897+ int schedule_it = 0;
84898+ struct task_struct *t;
84899+
84900+ /* we won't get called with tasklist_lock held for writing
84901+ and interrupts disabled as the cred struct in that case is
84902+ init_cred
84903+ */
84904+ if (grsec_enable_setxid && !current_is_single_threaded() &&
84905+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
84906+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
84907+ schedule_it = 1;
84908+ }
84909+ ret = __commit_creds(new);
84910+ if (schedule_it) {
84911+ rcu_read_lock();
84912+ read_lock(&tasklist_lock);
84913+ for (t = next_thread(current); t != current;
84914+ t = next_thread(t)) {
84915+ if (t->delayed_cred == NULL) {
84916+ t->delayed_cred = get_cred(new);
84917+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
84918+ set_tsk_need_resched(t);
84919+ }
84920+ }
84921+ read_unlock(&tasklist_lock);
84922+ rcu_read_unlock();
84923+ }
84924+ return ret;
84925+#else
84926+ return __commit_creds(new);
84927+#endif
84928+}
84929+
84930 EXPORT_SYMBOL(commit_creds);
84931
84932 /**
84933diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
84934index 7d2f35e..1bafcd0 100644
84935--- a/kernel/debug/debug_core.c
84936+++ b/kernel/debug/debug_core.c
84937@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
84938 */
84939 static atomic_t masters_in_kgdb;
84940 static atomic_t slaves_in_kgdb;
84941-static atomic_t kgdb_break_tasklet_var;
84942+static atomic_unchecked_t kgdb_break_tasklet_var;
84943 atomic_t kgdb_setting_breakpoint;
84944
84945 struct task_struct *kgdb_usethread;
84946@@ -133,7 +133,7 @@ int kgdb_single_step;
84947 static pid_t kgdb_sstep_pid;
84948
84949 /* to keep track of the CPU which is doing the single stepping*/
84950-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
84951+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
84952
84953 /*
84954 * If you are debugging a problem where roundup (the collection of
84955@@ -541,7 +541,7 @@ return_normal:
84956 * kernel will only try for the value of sstep_tries before
84957 * giving up and continuing on.
84958 */
84959- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
84960+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
84961 (kgdb_info[cpu].task &&
84962 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
84963 atomic_set(&kgdb_active, -1);
84964@@ -639,8 +639,8 @@ cpu_master_loop:
84965 }
84966
84967 kgdb_restore:
84968- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
84969- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
84970+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
84971+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
84972 if (kgdb_info[sstep_cpu].task)
84973 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
84974 else
84975@@ -916,18 +916,18 @@ static void kgdb_unregister_callbacks(void)
84976 static void kgdb_tasklet_bpt(unsigned long ing)
84977 {
84978 kgdb_breakpoint();
84979- atomic_set(&kgdb_break_tasklet_var, 0);
84980+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
84981 }
84982
84983 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
84984
84985 void kgdb_schedule_breakpoint(void)
84986 {
84987- if (atomic_read(&kgdb_break_tasklet_var) ||
84988+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
84989 atomic_read(&kgdb_active) != -1 ||
84990 atomic_read(&kgdb_setting_breakpoint))
84991 return;
84992- atomic_inc(&kgdb_break_tasklet_var);
84993+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
84994 tasklet_schedule(&kgdb_tasklet_breakpoint);
84995 }
84996 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
84997diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
84998index 0b097c8..11dd5c5 100644
84999--- a/kernel/debug/kdb/kdb_main.c
85000+++ b/kernel/debug/kdb/kdb_main.c
85001@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
85002 continue;
85003
85004 kdb_printf("%-20s%8u 0x%p ", mod->name,
85005- mod->core_size, (void *)mod);
85006+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
85007 #ifdef CONFIG_MODULE_UNLOAD
85008 kdb_printf("%4ld ", module_refcount(mod));
85009 #endif
85010@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
85011 kdb_printf(" (Loading)");
85012 else
85013 kdb_printf(" (Live)");
85014- kdb_printf(" 0x%p", mod->module_core);
85015+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
85016
85017 #ifdef CONFIG_MODULE_UNLOAD
85018 {
85019diff --git a/kernel/events/core.c b/kernel/events/core.c
85020index 6ed1163..f36346e 100644
85021--- a/kernel/events/core.c
85022+++ b/kernel/events/core.c
85023@@ -157,8 +157,15 @@ static struct srcu_struct pmus_srcu;
85024 * 0 - disallow raw tracepoint access for unpriv
85025 * 1 - disallow cpu events for unpriv
85026 * 2 - disallow kernel profiling for unpriv
85027+ * 3 - disallow all unpriv perf event use
85028 */
85029-int sysctl_perf_event_paranoid __read_mostly = 1;
85030+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
85031+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
85032+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
85033+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
85034+#else
85035+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
85036+#endif
85037
85038 /* Minimum for 512 kiB + 1 user control page */
85039 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
85040@@ -184,7 +191,7 @@ void update_perf_cpu_limits(void)
85041
85042 tmp *= sysctl_perf_cpu_time_max_percent;
85043 do_div(tmp, 100);
85044- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
85045+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
85046 }
85047
85048 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
85049@@ -271,7 +278,7 @@ void perf_sample_event_took(u64 sample_len_ns)
85050 update_perf_cpu_limits();
85051 }
85052
85053-static atomic64_t perf_event_id;
85054+static atomic64_unchecked_t perf_event_id;
85055
85056 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
85057 enum event_type_t event_type);
85058@@ -2985,7 +2992,7 @@ static void __perf_event_read(void *info)
85059
85060 static inline u64 perf_event_count(struct perf_event *event)
85061 {
85062- return local64_read(&event->count) + atomic64_read(&event->child_count);
85063+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
85064 }
85065
85066 static u64 perf_event_read(struct perf_event *event)
85067@@ -3353,9 +3360,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
85068 mutex_lock(&event->child_mutex);
85069 total += perf_event_read(event);
85070 *enabled += event->total_time_enabled +
85071- atomic64_read(&event->child_total_time_enabled);
85072+ atomic64_read_unchecked(&event->child_total_time_enabled);
85073 *running += event->total_time_running +
85074- atomic64_read(&event->child_total_time_running);
85075+ atomic64_read_unchecked(&event->child_total_time_running);
85076
85077 list_for_each_entry(child, &event->child_list, child_list) {
85078 total += perf_event_read(child);
85079@@ -3770,10 +3777,10 @@ void perf_event_update_userpage(struct perf_event *event)
85080 userpg->offset -= local64_read(&event->hw.prev_count);
85081
85082 userpg->time_enabled = enabled +
85083- atomic64_read(&event->child_total_time_enabled);
85084+ atomic64_read_unchecked(&event->child_total_time_enabled);
85085
85086 userpg->time_running = running +
85087- atomic64_read(&event->child_total_time_running);
85088+ atomic64_read_unchecked(&event->child_total_time_running);
85089
85090 arch_perf_update_userpage(userpg, now);
85091
85092@@ -4324,7 +4331,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
85093
85094 /* Data. */
85095 sp = perf_user_stack_pointer(regs);
85096- rem = __output_copy_user(handle, (void *) sp, dump_size);
85097+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
85098 dyn_size = dump_size - rem;
85099
85100 perf_output_skip(handle, rem);
85101@@ -4415,11 +4422,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
85102 values[n++] = perf_event_count(event);
85103 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
85104 values[n++] = enabled +
85105- atomic64_read(&event->child_total_time_enabled);
85106+ atomic64_read_unchecked(&event->child_total_time_enabled);
85107 }
85108 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
85109 values[n++] = running +
85110- atomic64_read(&event->child_total_time_running);
85111+ atomic64_read_unchecked(&event->child_total_time_running);
85112 }
85113 if (read_format & PERF_FORMAT_ID)
85114 values[n++] = primary_event_id(event);
85115@@ -6686,7 +6693,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
85116 event->parent = parent_event;
85117
85118 event->ns = get_pid_ns(task_active_pid_ns(current));
85119- event->id = atomic64_inc_return(&perf_event_id);
85120+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
85121
85122 event->state = PERF_EVENT_STATE_INACTIVE;
85123
85124@@ -6985,6 +6992,11 @@ SYSCALL_DEFINE5(perf_event_open,
85125 if (flags & ~PERF_FLAG_ALL)
85126 return -EINVAL;
85127
85128+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
85129+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
85130+ return -EACCES;
85131+#endif
85132+
85133 err = perf_copy_attr(attr_uptr, &attr);
85134 if (err)
85135 return err;
85136@@ -7316,10 +7328,10 @@ static void sync_child_event(struct perf_event *child_event,
85137 /*
85138 * Add back the child's count to the parent's count:
85139 */
85140- atomic64_add(child_val, &parent_event->child_count);
85141- atomic64_add(child_event->total_time_enabled,
85142+ atomic64_add_unchecked(child_val, &parent_event->child_count);
85143+ atomic64_add_unchecked(child_event->total_time_enabled,
85144 &parent_event->child_total_time_enabled);
85145- atomic64_add(child_event->total_time_running,
85146+ atomic64_add_unchecked(child_event->total_time_running,
85147 &parent_event->child_total_time_running);
85148
85149 /*
85150diff --git a/kernel/events/internal.h b/kernel/events/internal.h
85151index 569b2187..19940d9 100644
85152--- a/kernel/events/internal.h
85153+++ b/kernel/events/internal.h
85154@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
85155 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
85156 }
85157
85158-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
85159+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
85160 static inline unsigned long \
85161 func_name(struct perf_output_handle *handle, \
85162- const void *buf, unsigned long len) \
85163+ const void user *buf, unsigned long len) \
85164 { \
85165 unsigned long size, written; \
85166 \
85167@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
85168 return 0;
85169 }
85170
85171-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
85172+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
85173
85174 static inline unsigned long
85175 memcpy_skip(void *dst, const void *src, unsigned long n)
85176@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
85177 return 0;
85178 }
85179
85180-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
85181+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
85182
85183 #ifndef arch_perf_out_copy_user
85184 #define arch_perf_out_copy_user arch_perf_out_copy_user
85185@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
85186 }
85187 #endif
85188
85189-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
85190+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
85191
85192 /* Callchain handling */
85193 extern struct perf_callchain_entry *
85194diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
85195index 24b7d6c..40cf797 100644
85196--- a/kernel/events/uprobes.c
85197+++ b/kernel/events/uprobes.c
85198@@ -1640,7 +1640,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
85199 {
85200 struct page *page;
85201 uprobe_opcode_t opcode;
85202- int result;
85203+ long result;
85204
85205 pagefault_disable();
85206 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
85207diff --git a/kernel/exit.c b/kernel/exit.c
85208index a949819..a5f127d 100644
85209--- a/kernel/exit.c
85210+++ b/kernel/exit.c
85211@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
85212 struct task_struct *leader;
85213 int zap_leader;
85214 repeat:
85215+#ifdef CONFIG_NET
85216+ gr_del_task_from_ip_table(p);
85217+#endif
85218+
85219 /* don't need to get the RCU readlock here - the process is dead and
85220 * can't be modifying its own credentials. But shut RCU-lockdep up */
85221 rcu_read_lock();
85222@@ -329,7 +333,7 @@ int allow_signal(int sig)
85223 * know it'll be handled, so that they don't get converted to
85224 * SIGKILL or just silently dropped.
85225 */
85226- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
85227+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
85228 recalc_sigpending();
85229 spin_unlock_irq(&current->sighand->siglock);
85230 return 0;
85231@@ -698,6 +702,8 @@ void do_exit(long code)
85232 struct task_struct *tsk = current;
85233 int group_dead;
85234
85235+ set_fs(USER_DS);
85236+
85237 profile_task_exit(tsk);
85238
85239 WARN_ON(blk_needs_flush_plug(tsk));
85240@@ -714,7 +720,6 @@ void do_exit(long code)
85241 * mm_release()->clear_child_tid() from writing to a user-controlled
85242 * kernel address.
85243 */
85244- set_fs(USER_DS);
85245
85246 ptrace_event(PTRACE_EVENT_EXIT, code);
85247
85248@@ -773,6 +778,9 @@ void do_exit(long code)
85249 tsk->exit_code = code;
85250 taskstats_exit(tsk, group_dead);
85251
85252+ gr_acl_handle_psacct(tsk, code);
85253+ gr_acl_handle_exit();
85254+
85255 exit_mm(tsk);
85256
85257 if (group_dead)
85258@@ -894,7 +902,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
85259 * Take down every thread in the group. This is called by fatal signals
85260 * as well as by sys_exit_group (below).
85261 */
85262-void
85263+__noreturn void
85264 do_group_exit(int exit_code)
85265 {
85266 struct signal_struct *sig = current->signal;
85267diff --git a/kernel/fork.c b/kernel/fork.c
85268index dfa736c..d170f9b 100644
85269--- a/kernel/fork.c
85270+++ b/kernel/fork.c
85271@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
85272 *stackend = STACK_END_MAGIC; /* for overflow detection */
85273
85274 #ifdef CONFIG_CC_STACKPROTECTOR
85275- tsk->stack_canary = get_random_int();
85276+ tsk->stack_canary = pax_get_random_long();
85277 #endif
85278
85279 /*
85280@@ -345,12 +345,80 @@ free_tsk:
85281 }
85282
85283 #ifdef CONFIG_MMU
85284-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
85285+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
85286+{
85287+ struct vm_area_struct *tmp;
85288+ unsigned long charge;
85289+ struct file *file;
85290+ int retval;
85291+
85292+ charge = 0;
85293+ if (mpnt->vm_flags & VM_ACCOUNT) {
85294+ unsigned long len = vma_pages(mpnt);
85295+
85296+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
85297+ goto fail_nomem;
85298+ charge = len;
85299+ }
85300+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
85301+ if (!tmp)
85302+ goto fail_nomem;
85303+ *tmp = *mpnt;
85304+ tmp->vm_mm = mm;
85305+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
85306+ retval = vma_dup_policy(mpnt, tmp);
85307+ if (retval)
85308+ goto fail_nomem_policy;
85309+ if (anon_vma_fork(tmp, mpnt))
85310+ goto fail_nomem_anon_vma_fork;
85311+ tmp->vm_flags &= ~VM_LOCKED;
85312+ tmp->vm_next = tmp->vm_prev = NULL;
85313+ tmp->vm_mirror = NULL;
85314+ file = tmp->vm_file;
85315+ if (file) {
85316+ struct inode *inode = file_inode(file);
85317+ struct address_space *mapping = file->f_mapping;
85318+
85319+ get_file(file);
85320+ if (tmp->vm_flags & VM_DENYWRITE)
85321+ atomic_dec(&inode->i_writecount);
85322+ mutex_lock(&mapping->i_mmap_mutex);
85323+ if (tmp->vm_flags & VM_SHARED)
85324+ mapping->i_mmap_writable++;
85325+ flush_dcache_mmap_lock(mapping);
85326+ /* insert tmp into the share list, just after mpnt */
85327+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
85328+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
85329+ else
85330+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
85331+ flush_dcache_mmap_unlock(mapping);
85332+ mutex_unlock(&mapping->i_mmap_mutex);
85333+ }
85334+
85335+ /*
85336+ * Clear hugetlb-related page reserves for children. This only
85337+ * affects MAP_PRIVATE mappings. Faults generated by the child
85338+ * are not guaranteed to succeed, even if read-only
85339+ */
85340+ if (is_vm_hugetlb_page(tmp))
85341+ reset_vma_resv_huge_pages(tmp);
85342+
85343+ return tmp;
85344+
85345+fail_nomem_anon_vma_fork:
85346+ mpol_put(vma_policy(tmp));
85347+fail_nomem_policy:
85348+ kmem_cache_free(vm_area_cachep, tmp);
85349+fail_nomem:
85350+ vm_unacct_memory(charge);
85351+ return NULL;
85352+}
85353+
85354+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
85355 {
85356 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
85357 struct rb_node **rb_link, *rb_parent;
85358 int retval;
85359- unsigned long charge;
85360
85361 uprobe_start_dup_mmap();
85362 down_write(&oldmm->mmap_sem);
85363@@ -379,55 +447,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
85364
85365 prev = NULL;
85366 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
85367- struct file *file;
85368-
85369 if (mpnt->vm_flags & VM_DONTCOPY) {
85370 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
85371 -vma_pages(mpnt));
85372 continue;
85373 }
85374- charge = 0;
85375- if (mpnt->vm_flags & VM_ACCOUNT) {
85376- unsigned long len = vma_pages(mpnt);
85377-
85378- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
85379- goto fail_nomem;
85380- charge = len;
85381- }
85382- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
85383- if (!tmp)
85384- goto fail_nomem;
85385- *tmp = *mpnt;
85386- INIT_LIST_HEAD(&tmp->anon_vma_chain);
85387- retval = vma_dup_policy(mpnt, tmp);
85388- if (retval)
85389- goto fail_nomem_policy;
85390- tmp->vm_mm = mm;
85391- if (anon_vma_fork(tmp, mpnt))
85392- goto fail_nomem_anon_vma_fork;
85393- tmp->vm_flags &= ~VM_LOCKED;
85394- tmp->vm_next = tmp->vm_prev = NULL;
85395- file = tmp->vm_file;
85396- if (file) {
85397- struct inode *inode = file_inode(file);
85398- struct address_space *mapping = file->f_mapping;
85399-
85400- get_file(file);
85401- if (tmp->vm_flags & VM_DENYWRITE)
85402- atomic_dec(&inode->i_writecount);
85403- mutex_lock(&mapping->i_mmap_mutex);
85404- if (tmp->vm_flags & VM_SHARED)
85405- mapping->i_mmap_writable++;
85406- flush_dcache_mmap_lock(mapping);
85407- /* insert tmp into the share list, just after mpnt */
85408- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
85409- vma_nonlinear_insert(tmp,
85410- &mapping->i_mmap_nonlinear);
85411- else
85412- vma_interval_tree_insert_after(tmp, mpnt,
85413- &mapping->i_mmap);
85414- flush_dcache_mmap_unlock(mapping);
85415- mutex_unlock(&mapping->i_mmap_mutex);
85416+ tmp = dup_vma(mm, oldmm, mpnt);
85417+ if (!tmp) {
85418+ retval = -ENOMEM;
85419+ goto out;
85420 }
85421
85422 /*
85423@@ -459,6 +487,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
85424 if (retval)
85425 goto out;
85426 }
85427+
85428+#ifdef CONFIG_PAX_SEGMEXEC
85429+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
85430+ struct vm_area_struct *mpnt_m;
85431+
85432+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
85433+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
85434+
85435+ if (!mpnt->vm_mirror)
85436+ continue;
85437+
85438+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
85439+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
85440+ mpnt->vm_mirror = mpnt_m;
85441+ } else {
85442+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
85443+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
85444+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
85445+ mpnt->vm_mirror->vm_mirror = mpnt;
85446+ }
85447+ }
85448+ BUG_ON(mpnt_m);
85449+ }
85450+#endif
85451+
85452 /* a new mm has just been created */
85453 arch_dup_mmap(oldmm, mm);
85454 retval = 0;
85455@@ -468,14 +521,6 @@ out:
85456 up_write(&oldmm->mmap_sem);
85457 uprobe_end_dup_mmap();
85458 return retval;
85459-fail_nomem_anon_vma_fork:
85460- mpol_put(vma_policy(tmp));
85461-fail_nomem_policy:
85462- kmem_cache_free(vm_area_cachep, tmp);
85463-fail_nomem:
85464- retval = -ENOMEM;
85465- vm_unacct_memory(charge);
85466- goto out;
85467 }
85468
85469 static inline int mm_alloc_pgd(struct mm_struct *mm)
85470@@ -689,8 +734,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
85471 return ERR_PTR(err);
85472
85473 mm = get_task_mm(task);
85474- if (mm && mm != current->mm &&
85475- !ptrace_may_access(task, mode)) {
85476+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
85477+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
85478 mmput(mm);
85479 mm = ERR_PTR(-EACCES);
85480 }
85481@@ -909,13 +954,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
85482 spin_unlock(&fs->lock);
85483 return -EAGAIN;
85484 }
85485- fs->users++;
85486+ atomic_inc(&fs->users);
85487 spin_unlock(&fs->lock);
85488 return 0;
85489 }
85490 tsk->fs = copy_fs_struct(fs);
85491 if (!tsk->fs)
85492 return -ENOMEM;
85493+ /* Carry through gr_chroot_dentry and is_chrooted instead
85494+ of recomputing it here. Already copied when the task struct
85495+ is duplicated. This allows pivot_root to not be treated as
85496+ a chroot
85497+ */
85498+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
85499+
85500 return 0;
85501 }
85502
85503@@ -1126,7 +1178,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
85504 * parts of the process environment (as per the clone
85505 * flags). The actual kick-off is left to the caller.
85506 */
85507-static struct task_struct *copy_process(unsigned long clone_flags,
85508+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
85509 unsigned long stack_start,
85510 unsigned long stack_size,
85511 int __user *child_tidptr,
85512@@ -1198,6 +1250,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
85513 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
85514 #endif
85515 retval = -EAGAIN;
85516+
85517+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
85518+
85519 if (atomic_read(&p->real_cred->user->processes) >=
85520 task_rlimit(p, RLIMIT_NPROC)) {
85521 if (p->real_cred->user != INIT_USER &&
85522@@ -1446,6 +1501,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
85523 goto bad_fork_free_pid;
85524 }
85525
85526+ /* synchronizes with gr_set_acls()
85527+ we need to call this past the point of no return for fork()
85528+ */
85529+ gr_copy_label(p);
85530+
85531 if (likely(p->pid)) {
85532 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
85533
85534@@ -1532,6 +1592,8 @@ bad_fork_cleanup_count:
85535 bad_fork_free:
85536 free_task(p);
85537 fork_out:
85538+ gr_log_forkfail(retval);
85539+
85540 return ERR_PTR(retval);
85541 }
85542
85543@@ -1593,6 +1655,7 @@ long do_fork(unsigned long clone_flags,
85544
85545 p = copy_process(clone_flags, stack_start, stack_size,
85546 child_tidptr, NULL, trace);
85547+ add_latent_entropy();
85548 /*
85549 * Do this prior waking up the new thread - the thread pointer
85550 * might get invalid after that point, if the thread exits quickly.
85551@@ -1607,6 +1670,8 @@ long do_fork(unsigned long clone_flags,
85552 if (clone_flags & CLONE_PARENT_SETTID)
85553 put_user(nr, parent_tidptr);
85554
85555+ gr_handle_brute_check();
85556+
85557 if (clone_flags & CLONE_VFORK) {
85558 p->vfork_done = &vfork;
85559 init_completion(&vfork);
85560@@ -1723,7 +1788,7 @@ void __init proc_caches_init(void)
85561 mm_cachep = kmem_cache_create("mm_struct",
85562 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
85563 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
85564- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
85565+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
85566 mmap_init();
85567 nsproxy_cache_init();
85568 }
85569@@ -1763,7 +1828,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
85570 return 0;
85571
85572 /* don't need lock here; in the worst case we'll do useless copy */
85573- if (fs->users == 1)
85574+ if (atomic_read(&fs->users) == 1)
85575 return 0;
85576
85577 *new_fsp = copy_fs_struct(fs);
85578@@ -1870,7 +1935,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
85579 fs = current->fs;
85580 spin_lock(&fs->lock);
85581 current->fs = new_fs;
85582- if (--fs->users)
85583+ gr_set_chroot_entries(current, &current->fs->root);
85584+ if (atomic_dec_return(&fs->users))
85585 new_fs = NULL;
85586 else
85587 new_fs = fs;
85588diff --git a/kernel/futex.c b/kernel/futex.c
85589index f6ff019..ac53307 100644
85590--- a/kernel/futex.c
85591+++ b/kernel/futex.c
85592@@ -54,6 +54,7 @@
85593 #include <linux/mount.h>
85594 #include <linux/pagemap.h>
85595 #include <linux/syscalls.h>
85596+#include <linux/ptrace.h>
85597 #include <linux/signal.h>
85598 #include <linux/export.h>
85599 #include <linux/magic.h>
85600@@ -243,6 +244,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
85601 struct page *page, *page_head;
85602 int err, ro = 0;
85603
85604+#ifdef CONFIG_PAX_SEGMEXEC
85605+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
85606+ return -EFAULT;
85607+#endif
85608+
85609 /*
85610 * The futex address must be "naturally" aligned.
85611 */
85612@@ -442,7 +448,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
85613
85614 static int get_futex_value_locked(u32 *dest, u32 __user *from)
85615 {
85616- int ret;
85617+ unsigned long ret;
85618
85619 pagefault_disable();
85620 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
85621@@ -2735,6 +2741,7 @@ static int __init futex_init(void)
85622 {
85623 u32 curval;
85624 int i;
85625+ mm_segment_t oldfs;
85626
85627 /*
85628 * This will fail and we want it. Some arch implementations do
85629@@ -2746,8 +2753,11 @@ static int __init futex_init(void)
85630 * implementation, the non-functional ones will return
85631 * -ENOSYS.
85632 */
85633+ oldfs = get_fs();
85634+ set_fs(USER_DS);
85635 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
85636 futex_cmpxchg_enabled = 1;
85637+ set_fs(oldfs);
85638
85639 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
85640 plist_head_init(&futex_queues[i].chain);
85641diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
85642index f9f44fd..29885e4 100644
85643--- a/kernel/futex_compat.c
85644+++ b/kernel/futex_compat.c
85645@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
85646 return 0;
85647 }
85648
85649-static void __user *futex_uaddr(struct robust_list __user *entry,
85650+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
85651 compat_long_t futex_offset)
85652 {
85653 compat_uptr_t base = ptr_to_compat(entry);
85654diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
85655index f45b75b..bfac6d5 100644
85656--- a/kernel/gcov/base.c
85657+++ b/kernel/gcov/base.c
85658@@ -108,11 +108,6 @@ void gcov_enable_events(void)
85659 }
85660
85661 #ifdef CONFIG_MODULES
85662-static inline int within(void *addr, void *start, unsigned long size)
85663-{
85664- return ((addr >= start) && (addr < start + size));
85665-}
85666-
85667 /* Update list and generate events when modules are unloaded. */
85668 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
85669 void *data)
85670@@ -127,7 +122,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
85671
85672 /* Remove entries located in module from linked list. */
85673 while ((info = gcov_info_next(info))) {
85674- if (within(info, mod->module_core, mod->core_size)) {
85675+ if (within_module_core_rw((unsigned long)info, mod)) {
85676 gcov_info_unlink(prev, info);
85677 if (gcov_events_enabled)
85678 gcov_event(GCOV_REMOVE, info);
85679diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
85680index 383319b..56ebb13 100644
85681--- a/kernel/hrtimer.c
85682+++ b/kernel/hrtimer.c
85683@@ -1438,7 +1438,7 @@ void hrtimer_peek_ahead_timers(void)
85684 local_irq_restore(flags);
85685 }
85686
85687-static void run_hrtimer_softirq(struct softirq_action *h)
85688+static __latent_entropy void run_hrtimer_softirq(void)
85689 {
85690 hrtimer_peek_ahead_timers();
85691 }
85692diff --git a/kernel/irq_work.c b/kernel/irq_work.c
85693index 55fcce6..0e4cf34 100644
85694--- a/kernel/irq_work.c
85695+++ b/kernel/irq_work.c
85696@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
85697 return NOTIFY_OK;
85698 }
85699
85700-static struct notifier_block cpu_notify;
85701+static struct notifier_block cpu_notify = {
85702+ .notifier_call = irq_work_cpu_notify,
85703+ .priority = 0,
85704+};
85705
85706 static __init int irq_work_init_cpu_notifier(void)
85707 {
85708- cpu_notify.notifier_call = irq_work_cpu_notify;
85709- cpu_notify.priority = 0;
85710 register_cpu_notifier(&cpu_notify);
85711 return 0;
85712 }
85713diff --git a/kernel/jump_label.c b/kernel/jump_label.c
85714index 9019f15..9a3c42e 100644
85715--- a/kernel/jump_label.c
85716+++ b/kernel/jump_label.c
85717@@ -14,6 +14,7 @@
85718 #include <linux/err.h>
85719 #include <linux/static_key.h>
85720 #include <linux/jump_label_ratelimit.h>
85721+#include <linux/mm.h>
85722
85723 #ifdef HAVE_JUMP_LABEL
85724
85725@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
85726
85727 size = (((unsigned long)stop - (unsigned long)start)
85728 / sizeof(struct jump_entry));
85729+ pax_open_kernel();
85730 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
85731+ pax_close_kernel();
85732 }
85733
85734 static void jump_label_update(struct static_key *key, int enable);
85735@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
85736 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
85737 struct jump_entry *iter;
85738
85739+ pax_open_kernel();
85740 for (iter = iter_start; iter < iter_stop; iter++) {
85741 if (within_module_init(iter->code, mod))
85742 iter->code = 0;
85743 }
85744+ pax_close_kernel();
85745 }
85746
85747 static int
85748diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
85749index 3127ad5..159d880 100644
85750--- a/kernel/kallsyms.c
85751+++ b/kernel/kallsyms.c
85752@@ -11,6 +11,9 @@
85753 * Changed the compression method from stem compression to "table lookup"
85754 * compression (see scripts/kallsyms.c for a more complete description)
85755 */
85756+#ifdef CONFIG_GRKERNSEC_HIDESYM
85757+#define __INCLUDED_BY_HIDESYM 1
85758+#endif
85759 #include <linux/kallsyms.h>
85760 #include <linux/module.h>
85761 #include <linux/init.h>
85762@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
85763
85764 static inline int is_kernel_inittext(unsigned long addr)
85765 {
85766+ if (system_state != SYSTEM_BOOTING)
85767+ return 0;
85768+
85769 if (addr >= (unsigned long)_sinittext
85770 && addr <= (unsigned long)_einittext)
85771 return 1;
85772 return 0;
85773 }
85774
85775+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85776+#ifdef CONFIG_MODULES
85777+static inline int is_module_text(unsigned long addr)
85778+{
85779+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
85780+ return 1;
85781+
85782+ addr = ktla_ktva(addr);
85783+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
85784+}
85785+#else
85786+static inline int is_module_text(unsigned long addr)
85787+{
85788+ return 0;
85789+}
85790+#endif
85791+#endif
85792+
85793 static inline int is_kernel_text(unsigned long addr)
85794 {
85795 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
85796@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
85797
85798 static inline int is_kernel(unsigned long addr)
85799 {
85800+
85801+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85802+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
85803+ return 1;
85804+
85805+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
85806+#else
85807 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
85808+#endif
85809+
85810 return 1;
85811 return in_gate_area_no_mm(addr);
85812 }
85813
85814 static int is_ksym_addr(unsigned long addr)
85815 {
85816+
85817+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85818+ if (is_module_text(addr))
85819+ return 0;
85820+#endif
85821+
85822 if (all_var)
85823 return is_kernel(addr);
85824
85825@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
85826
85827 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
85828 {
85829- iter->name[0] = '\0';
85830 iter->nameoff = get_symbol_offset(new_pos);
85831 iter->pos = new_pos;
85832 }
85833@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
85834 {
85835 struct kallsym_iter *iter = m->private;
85836
85837+#ifdef CONFIG_GRKERNSEC_HIDESYM
85838+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
85839+ return 0;
85840+#endif
85841+
85842 /* Some debugging symbols have no name. Ignore them. */
85843 if (!iter->name[0])
85844 return 0;
85845@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
85846 */
85847 type = iter->exported ? toupper(iter->type) :
85848 tolower(iter->type);
85849+
85850 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
85851 type, iter->name, iter->module_name);
85852 } else
85853@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
85854 struct kallsym_iter *iter;
85855 int ret;
85856
85857- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
85858+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
85859 if (!iter)
85860 return -ENOMEM;
85861 reset_iter(iter, 0);
85862diff --git a/kernel/kcmp.c b/kernel/kcmp.c
85863index e30ac0f..3528cac 100644
85864--- a/kernel/kcmp.c
85865+++ b/kernel/kcmp.c
85866@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
85867 struct task_struct *task1, *task2;
85868 int ret;
85869
85870+#ifdef CONFIG_GRKERNSEC
85871+ return -ENOSYS;
85872+#endif
85873+
85874 rcu_read_lock();
85875
85876 /*
85877diff --git a/kernel/kexec.c b/kernel/kexec.c
85878index 9c97016..df438f8 100644
85879--- a/kernel/kexec.c
85880+++ b/kernel/kexec.c
85881@@ -1044,7 +1044,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
85882 unsigned long flags)
85883 {
85884 struct compat_kexec_segment in;
85885- struct kexec_segment out, __user *ksegments;
85886+ struct kexec_segment out;
85887+ struct kexec_segment __user *ksegments;
85888 unsigned long i, result;
85889
85890 /* Don't allow clients that don't understand the native
85891diff --git a/kernel/kmod.c b/kernel/kmod.c
85892index b086006..b66f630 100644
85893--- a/kernel/kmod.c
85894+++ b/kernel/kmod.c
85895@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
85896 kfree(info->argv);
85897 }
85898
85899-static int call_modprobe(char *module_name, int wait)
85900+static int call_modprobe(char *module_name, char *module_param, int wait)
85901 {
85902 struct subprocess_info *info;
85903 static char *envp[] = {
85904@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
85905 NULL
85906 };
85907
85908- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
85909+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
85910 if (!argv)
85911 goto out;
85912
85913@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
85914 argv[1] = "-q";
85915 argv[2] = "--";
85916 argv[3] = module_name; /* check free_modprobe_argv() */
85917- argv[4] = NULL;
85918+ argv[4] = module_param;
85919+ argv[5] = NULL;
85920
85921 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
85922 NULL, free_modprobe_argv, NULL);
85923@@ -129,9 +130,8 @@ out:
85924 * If module auto-loading support is disabled then this function
85925 * becomes a no-operation.
85926 */
85927-int __request_module(bool wait, const char *fmt, ...)
85928+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
85929 {
85930- va_list args;
85931 char module_name[MODULE_NAME_LEN];
85932 unsigned int max_modprobes;
85933 int ret;
85934@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
85935 if (!modprobe_path[0])
85936 return 0;
85937
85938- va_start(args, fmt);
85939- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
85940- va_end(args);
85941+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
85942 if (ret >= MODULE_NAME_LEN)
85943 return -ENAMETOOLONG;
85944
85945@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
85946 if (ret)
85947 return ret;
85948
85949+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85950+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
85951+ /* hack to workaround consolekit/udisks stupidity */
85952+ read_lock(&tasklist_lock);
85953+ if (!strcmp(current->comm, "mount") &&
85954+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
85955+ read_unlock(&tasklist_lock);
85956+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
85957+ return -EPERM;
85958+ }
85959+ read_unlock(&tasklist_lock);
85960+ }
85961+#endif
85962+
85963 /* If modprobe needs a service that is in a module, we get a recursive
85964 * loop. Limit the number of running kmod threads to max_threads/2 or
85965 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
85966@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
85967
85968 trace_module_request(module_name, wait, _RET_IP_);
85969
85970- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
85971+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
85972
85973 atomic_dec(&kmod_concurrent);
85974 return ret;
85975 }
85976+
85977+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
85978+{
85979+ va_list args;
85980+ int ret;
85981+
85982+ va_start(args, fmt);
85983+ ret = ____request_module(wait, module_param, fmt, args);
85984+ va_end(args);
85985+
85986+ return ret;
85987+}
85988+
85989+int __request_module(bool wait, const char *fmt, ...)
85990+{
85991+ va_list args;
85992+ int ret;
85993+
85994+#ifdef CONFIG_GRKERNSEC_MODHARDEN
85995+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
85996+ char module_param[MODULE_NAME_LEN];
85997+
85998+ memset(module_param, 0, sizeof(module_param));
85999+
86000+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
86001+
86002+ va_start(args, fmt);
86003+ ret = ____request_module(wait, module_param, fmt, args);
86004+ va_end(args);
86005+
86006+ return ret;
86007+ }
86008+#endif
86009+
86010+ va_start(args, fmt);
86011+ ret = ____request_module(wait, NULL, fmt, args);
86012+ va_end(args);
86013+
86014+ return ret;
86015+}
86016+
86017 EXPORT_SYMBOL(__request_module);
86018 #endif /* CONFIG_MODULES */
86019
86020@@ -218,6 +271,20 @@ static int ____call_usermodehelper(void *data)
86021 */
86022 set_user_nice(current, 0);
86023
86024+#ifdef CONFIG_GRKERNSEC
86025+ /* this is race-free as far as userland is concerned as we copied
86026+ out the path to be used prior to this point and are now operating
86027+ on that copy
86028+ */
86029+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
86030+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
86031+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
86032+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
86033+ retval = -EPERM;
86034+ goto fail;
86035+ }
86036+#endif
86037+
86038 retval = -ENOMEM;
86039 new = prepare_kernel_cred(current);
86040 if (!new)
86041@@ -240,8 +307,8 @@ static int ____call_usermodehelper(void *data)
86042 commit_creds(new);
86043
86044 retval = do_execve(sub_info->path,
86045- (const char __user *const __user *)sub_info->argv,
86046- (const char __user *const __user *)sub_info->envp);
86047+ (const char __user *const __force_user *)sub_info->argv,
86048+ (const char __user *const __force_user *)sub_info->envp);
86049 if (!retval)
86050 return 0;
86051
86052@@ -260,6 +327,10 @@ static int call_helper(void *data)
86053
86054 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
86055 {
86056+#ifdef CONFIG_GRKERNSEC
86057+ kfree(info->path);
86058+ info->path = info->origpath;
86059+#endif
86060 if (info->cleanup)
86061 (*info->cleanup)(info);
86062 kfree(info);
86063@@ -303,7 +374,7 @@ static int wait_for_helper(void *data)
86064 *
86065 * Thus the __user pointer cast is valid here.
86066 */
86067- sys_wait4(pid, (int __user *)&ret, 0, NULL);
86068+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
86069
86070 /*
86071 * If ret is 0, either ____call_usermodehelper failed and the
86072@@ -542,7 +613,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
86073 goto out;
86074
86075 INIT_WORK(&sub_info->work, __call_usermodehelper);
86076+#ifdef CONFIG_GRKERNSEC
86077+ sub_info->origpath = path;
86078+ sub_info->path = kstrdup(path, gfp_mask);
86079+#else
86080 sub_info->path = path;
86081+#endif
86082 sub_info->argv = argv;
86083 sub_info->envp = envp;
86084
86085@@ -650,7 +726,7 @@ EXPORT_SYMBOL(call_usermodehelper);
86086 static int proc_cap_handler(struct ctl_table *table, int write,
86087 void __user *buffer, size_t *lenp, loff_t *ppos)
86088 {
86089- struct ctl_table t;
86090+ ctl_table_no_const t;
86091 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
86092 kernel_cap_t new_cap;
86093 int err, i;
86094diff --git a/kernel/kprobes.c b/kernel/kprobes.c
86095index ceeadfc..11c18b6 100644
86096--- a/kernel/kprobes.c
86097+++ b/kernel/kprobes.c
86098@@ -31,6 +31,9 @@
86099 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
86100 * <prasanna@in.ibm.com> added function-return probes.
86101 */
86102+#ifdef CONFIG_GRKERNSEC_HIDESYM
86103+#define __INCLUDED_BY_HIDESYM 1
86104+#endif
86105 #include <linux/kprobes.h>
86106 #include <linux/hash.h>
86107 #include <linux/init.h>
86108@@ -135,12 +138,12 @@ enum kprobe_slot_state {
86109
86110 static void *alloc_insn_page(void)
86111 {
86112- return module_alloc(PAGE_SIZE);
86113+ return module_alloc_exec(PAGE_SIZE);
86114 }
86115
86116 static void free_insn_page(void *page)
86117 {
86118- module_free(NULL, page);
86119+ module_free_exec(NULL, page);
86120 }
86121
86122 struct kprobe_insn_cache kprobe_insn_slots = {
86123@@ -2151,11 +2154,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
86124 kprobe_type = "k";
86125
86126 if (sym)
86127- seq_printf(pi, "%p %s %s+0x%x %s ",
86128+ seq_printf(pi, "%pK %s %s+0x%x %s ",
86129 p->addr, kprobe_type, sym, offset,
86130 (modname ? modname : " "));
86131 else
86132- seq_printf(pi, "%p %s %p ",
86133+ seq_printf(pi, "%pK %s %pK ",
86134 p->addr, kprobe_type, p->addr);
86135
86136 if (!pp)
86137diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
86138index 9659d38..bffd520 100644
86139--- a/kernel/ksysfs.c
86140+++ b/kernel/ksysfs.c
86141@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
86142 {
86143 if (count+1 > UEVENT_HELPER_PATH_LEN)
86144 return -ENOENT;
86145+ if (!capable(CAP_SYS_ADMIN))
86146+ return -EPERM;
86147 memcpy(uevent_helper, buf, count);
86148 uevent_helper[count] = '\0';
86149 if (count && uevent_helper[count-1] == '\n')
86150@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
86151 return count;
86152 }
86153
86154-static struct bin_attribute notes_attr = {
86155+static bin_attribute_no_const notes_attr __read_only = {
86156 .attr = {
86157 .name = "notes",
86158 .mode = S_IRUGO,
86159diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
86160index 576ba75..7c256e4 100644
86161--- a/kernel/locking/lockdep.c
86162+++ b/kernel/locking/lockdep.c
86163@@ -596,6 +596,10 @@ static int static_obj(void *obj)
86164 end = (unsigned long) &_end,
86165 addr = (unsigned long) obj;
86166
86167+#ifdef CONFIG_PAX_KERNEXEC
86168+ start = ktla_ktva(start);
86169+#endif
86170+
86171 /*
86172 * static variable?
86173 */
86174@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
86175 if (!static_obj(lock->key)) {
86176 debug_locks_off();
86177 printk("INFO: trying to register non-static key.\n");
86178+ printk("lock:%pS key:%pS.\n", lock, lock->key);
86179 printk("the code is fine but needs lockdep annotation.\n");
86180 printk("turning off the locking correctness validator.\n");
86181 dump_stack();
86182@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
86183 if (!class)
86184 return 0;
86185 }
86186- atomic_inc((atomic_t *)&class->ops);
86187+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
86188 if (very_verbose(class)) {
86189 printk("\nacquire class [%p] %s", class->key, class->name);
86190 if (class->name_version > 1)
86191diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
86192index ef43ac4..2720dfa 100644
86193--- a/kernel/locking/lockdep_proc.c
86194+++ b/kernel/locking/lockdep_proc.c
86195@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
86196 return 0;
86197 }
86198
86199- seq_printf(m, "%p", class->key);
86200+ seq_printf(m, "%pK", class->key);
86201 #ifdef CONFIG_DEBUG_LOCKDEP
86202 seq_printf(m, " OPS:%8ld", class->ops);
86203 #endif
86204@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
86205
86206 list_for_each_entry(entry, &class->locks_after, entry) {
86207 if (entry->distance == 1) {
86208- seq_printf(m, " -> [%p] ", entry->class->key);
86209+ seq_printf(m, " -> [%pK] ", entry->class->key);
86210 print_name(m, entry->class);
86211 seq_puts(m, "\n");
86212 }
86213@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
86214 if (!class->key)
86215 continue;
86216
86217- seq_printf(m, "[%p] ", class->key);
86218+ seq_printf(m, "[%pK] ", class->key);
86219 print_name(m, class);
86220 seq_puts(m, "\n");
86221 }
86222@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
86223 if (!i)
86224 seq_line(m, '-', 40-namelen, namelen);
86225
86226- snprintf(ip, sizeof(ip), "[<%p>]",
86227+ snprintf(ip, sizeof(ip), "[<%pK>]",
86228 (void *)class->contention_point[i]);
86229 seq_printf(m, "%40s %14lu %29s %pS\n",
86230 name, stats->contention_point[i],
86231@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
86232 if (!i)
86233 seq_line(m, '-', 40-namelen, namelen);
86234
86235- snprintf(ip, sizeof(ip), "[<%p>]",
86236+ snprintf(ip, sizeof(ip), "[<%pK>]",
86237 (void *)class->contending_point[i]);
86238 seq_printf(m, "%40s %14lu %29s %pS\n",
86239 name, stats->contending_point[i],
86240diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
86241index 7e3443f..b2a1e6b 100644
86242--- a/kernel/locking/mutex-debug.c
86243+++ b/kernel/locking/mutex-debug.c
86244@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
86245 }
86246
86247 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
86248- struct thread_info *ti)
86249+ struct task_struct *task)
86250 {
86251 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
86252
86253 /* Mark the current thread as blocked on the lock: */
86254- ti->task->blocked_on = waiter;
86255+ task->blocked_on = waiter;
86256 }
86257
86258 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
86259- struct thread_info *ti)
86260+ struct task_struct *task)
86261 {
86262 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
86263- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
86264- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
86265- ti->task->blocked_on = NULL;
86266+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
86267+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
86268+ task->blocked_on = NULL;
86269
86270 list_del_init(&waiter->list);
86271 waiter->task = NULL;
86272diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
86273index 0799fd3..d06ae3b 100644
86274--- a/kernel/locking/mutex-debug.h
86275+++ b/kernel/locking/mutex-debug.h
86276@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
86277 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
86278 extern void debug_mutex_add_waiter(struct mutex *lock,
86279 struct mutex_waiter *waiter,
86280- struct thread_info *ti);
86281+ struct task_struct *task);
86282 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
86283- struct thread_info *ti);
86284+ struct task_struct *task);
86285 extern void debug_mutex_unlock(struct mutex *lock);
86286 extern void debug_mutex_init(struct mutex *lock, const char *name,
86287 struct lock_class_key *key);
86288diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
86289index 4dd6e4c..df52693 100644
86290--- a/kernel/locking/mutex.c
86291+++ b/kernel/locking/mutex.c
86292@@ -135,7 +135,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
86293 node->locked = 1;
86294 return;
86295 }
86296- ACCESS_ONCE(prev->next) = node;
86297+ ACCESS_ONCE_RW(prev->next) = node;
86298 smp_wmb();
86299 /* Wait until the lock holder passes the lock down */
86300 while (!ACCESS_ONCE(node->locked))
86301@@ -156,7 +156,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
86302 while (!(next = ACCESS_ONCE(node->next)))
86303 arch_mutex_cpu_relax();
86304 }
86305- ACCESS_ONCE(next->locked) = 1;
86306+ ACCESS_ONCE_RW(next->locked) = 1;
86307 smp_wmb();
86308 }
86309
86310@@ -520,7 +520,7 @@ slowpath:
86311 goto skip_wait;
86312
86313 debug_mutex_lock_common(lock, &waiter);
86314- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
86315+ debug_mutex_add_waiter(lock, &waiter, task);
86316
86317 /* add waiting tasks to the end of the waitqueue (FIFO): */
86318 list_add_tail(&waiter.list, &lock->wait_list);
86319@@ -564,7 +564,7 @@ slowpath:
86320 schedule_preempt_disabled();
86321 spin_lock_mutex(&lock->wait_lock, flags);
86322 }
86323- mutex_remove_waiter(lock, &waiter, current_thread_info());
86324+ mutex_remove_waiter(lock, &waiter, task);
86325 /* set it to 0 if there are no waiters left: */
86326 if (likely(list_empty(&lock->wait_list)))
86327 atomic_set(&lock->count, 0);
86328@@ -601,7 +601,7 @@ skip_wait:
86329 return 0;
86330
86331 err:
86332- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
86333+ mutex_remove_waiter(lock, &waiter, task);
86334 spin_unlock_mutex(&lock->wait_lock, flags);
86335 debug_mutex_free_waiter(&waiter);
86336 mutex_release(&lock->dep_map, 1, ip);
86337diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
86338index 1d96dd0..994ff19 100644
86339--- a/kernel/locking/rtmutex-tester.c
86340+++ b/kernel/locking/rtmutex-tester.c
86341@@ -22,7 +22,7 @@
86342 #define MAX_RT_TEST_MUTEXES 8
86343
86344 static spinlock_t rttest_lock;
86345-static atomic_t rttest_event;
86346+static atomic_unchecked_t rttest_event;
86347
86348 struct test_thread_data {
86349 int opcode;
86350@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
86351
86352 case RTTEST_LOCKCONT:
86353 td->mutexes[td->opdata] = 1;
86354- td->event = atomic_add_return(1, &rttest_event);
86355+ td->event = atomic_add_return_unchecked(1, &rttest_event);
86356 return 0;
86357
86358 case RTTEST_RESET:
86359@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
86360 return 0;
86361
86362 case RTTEST_RESETEVENT:
86363- atomic_set(&rttest_event, 0);
86364+ atomic_set_unchecked(&rttest_event, 0);
86365 return 0;
86366
86367 default:
86368@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
86369 return ret;
86370
86371 td->mutexes[id] = 1;
86372- td->event = atomic_add_return(1, &rttest_event);
86373+ td->event = atomic_add_return_unchecked(1, &rttest_event);
86374 rt_mutex_lock(&mutexes[id]);
86375- td->event = atomic_add_return(1, &rttest_event);
86376+ td->event = atomic_add_return_unchecked(1, &rttest_event);
86377 td->mutexes[id] = 4;
86378 return 0;
86379
86380@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
86381 return ret;
86382
86383 td->mutexes[id] = 1;
86384- td->event = atomic_add_return(1, &rttest_event);
86385+ td->event = atomic_add_return_unchecked(1, &rttest_event);
86386 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
86387- td->event = atomic_add_return(1, &rttest_event);
86388+ td->event = atomic_add_return_unchecked(1, &rttest_event);
86389 td->mutexes[id] = ret ? 0 : 4;
86390 return ret ? -EINTR : 0;
86391
86392@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
86393 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
86394 return ret;
86395
86396- td->event = atomic_add_return(1, &rttest_event);
86397+ td->event = atomic_add_return_unchecked(1, &rttest_event);
86398 rt_mutex_unlock(&mutexes[id]);
86399- td->event = atomic_add_return(1, &rttest_event);
86400+ td->event = atomic_add_return_unchecked(1, &rttest_event);
86401 td->mutexes[id] = 0;
86402 return 0;
86403
86404@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
86405 break;
86406
86407 td->mutexes[dat] = 2;
86408- td->event = atomic_add_return(1, &rttest_event);
86409+ td->event = atomic_add_return_unchecked(1, &rttest_event);
86410 break;
86411
86412 default:
86413@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
86414 return;
86415
86416 td->mutexes[dat] = 3;
86417- td->event = atomic_add_return(1, &rttest_event);
86418+ td->event = atomic_add_return_unchecked(1, &rttest_event);
86419 break;
86420
86421 case RTTEST_LOCKNOWAIT:
86422@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
86423 return;
86424
86425 td->mutexes[dat] = 1;
86426- td->event = atomic_add_return(1, &rttest_event);
86427+ td->event = atomic_add_return_unchecked(1, &rttest_event);
86428 return;
86429
86430 default:
86431diff --git a/kernel/module.c b/kernel/module.c
86432index f5a3b1e..97ebb15 100644
86433--- a/kernel/module.c
86434+++ b/kernel/module.c
86435@@ -61,6 +61,7 @@
86436 #include <linux/pfn.h>
86437 #include <linux/bsearch.h>
86438 #include <linux/fips.h>
86439+#include <linux/grsecurity.h>
86440 #include <uapi/linux/module.h>
86441 #include "module-internal.h"
86442
86443@@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
86444
86445 /* Bounds of module allocation, for speeding __module_address.
86446 * Protected by module_mutex. */
86447-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
86448+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
86449+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
86450
86451 int register_module_notifier(struct notifier_block * nb)
86452 {
86453@@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
86454 return true;
86455
86456 list_for_each_entry_rcu(mod, &modules, list) {
86457- struct symsearch arr[] = {
86458+ struct symsearch modarr[] = {
86459 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
86460 NOT_GPL_ONLY, false },
86461 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
86462@@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
86463 if (mod->state == MODULE_STATE_UNFORMED)
86464 continue;
86465
86466- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
86467+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
86468 return true;
86469 }
86470 return false;
86471@@ -489,7 +491,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
86472 if (!pcpusec->sh_size)
86473 return 0;
86474
86475- if (align > PAGE_SIZE) {
86476+ if (align-1 >= PAGE_SIZE) {
86477 pr_warn("%s: per-cpu alignment %li > %li\n",
86478 mod->name, align, PAGE_SIZE);
86479 align = PAGE_SIZE;
86480@@ -1064,7 +1066,7 @@ struct module_attribute module_uevent =
86481 static ssize_t show_coresize(struct module_attribute *mattr,
86482 struct module_kobject *mk, char *buffer)
86483 {
86484- return sprintf(buffer, "%u\n", mk->mod->core_size);
86485+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
86486 }
86487
86488 static struct module_attribute modinfo_coresize =
86489@@ -1073,7 +1075,7 @@ static struct module_attribute modinfo_coresize =
86490 static ssize_t show_initsize(struct module_attribute *mattr,
86491 struct module_kobject *mk, char *buffer)
86492 {
86493- return sprintf(buffer, "%u\n", mk->mod->init_size);
86494+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
86495 }
86496
86497 static struct module_attribute modinfo_initsize =
86498@@ -1165,12 +1167,29 @@ static int check_version(Elf_Shdr *sechdrs,
86499 goto bad_version;
86500 }
86501
86502+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
86503+ /*
86504+ * avoid potentially printing jibberish on attempted load
86505+ * of a module randomized with a different seed
86506+ */
86507+ pr_warn("no symbol version for %s\n", symname);
86508+#else
86509 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
86510+#endif
86511 return 0;
86512
86513 bad_version:
86514+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
86515+ /*
86516+ * avoid potentially printing jibberish on attempted load
86517+ * of a module randomized with a different seed
86518+ */
86519+ printk("attempted module disagrees about version of symbol %s\n",
86520+ symname);
86521+#else
86522 printk("%s: disagrees about version of symbol %s\n",
86523 mod->name, symname);
86524+#endif
86525 return 0;
86526 }
86527
86528@@ -1286,7 +1305,7 @@ resolve_symbol_wait(struct module *mod,
86529 */
86530 #ifdef CONFIG_SYSFS
86531
86532-#ifdef CONFIG_KALLSYMS
86533+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
86534 static inline bool sect_empty(const Elf_Shdr *sect)
86535 {
86536 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
86537@@ -1426,7 +1445,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
86538 {
86539 unsigned int notes, loaded, i;
86540 struct module_notes_attrs *notes_attrs;
86541- struct bin_attribute *nattr;
86542+ bin_attribute_no_const *nattr;
86543
86544 /* failed to create section attributes, so can't create notes */
86545 if (!mod->sect_attrs)
86546@@ -1538,7 +1557,7 @@ static void del_usage_links(struct module *mod)
86547 static int module_add_modinfo_attrs(struct module *mod)
86548 {
86549 struct module_attribute *attr;
86550- struct module_attribute *temp_attr;
86551+ module_attribute_no_const *temp_attr;
86552 int error = 0;
86553 int i;
86554
86555@@ -1759,21 +1778,21 @@ static void set_section_ro_nx(void *base,
86556
86557 static void unset_module_core_ro_nx(struct module *mod)
86558 {
86559- set_page_attributes(mod->module_core + mod->core_text_size,
86560- mod->module_core + mod->core_size,
86561+ set_page_attributes(mod->module_core_rw,
86562+ mod->module_core_rw + mod->core_size_rw,
86563 set_memory_x);
86564- set_page_attributes(mod->module_core,
86565- mod->module_core + mod->core_ro_size,
86566+ set_page_attributes(mod->module_core_rx,
86567+ mod->module_core_rx + mod->core_size_rx,
86568 set_memory_rw);
86569 }
86570
86571 static void unset_module_init_ro_nx(struct module *mod)
86572 {
86573- set_page_attributes(mod->module_init + mod->init_text_size,
86574- mod->module_init + mod->init_size,
86575+ set_page_attributes(mod->module_init_rw,
86576+ mod->module_init_rw + mod->init_size_rw,
86577 set_memory_x);
86578- set_page_attributes(mod->module_init,
86579- mod->module_init + mod->init_ro_size,
86580+ set_page_attributes(mod->module_init_rx,
86581+ mod->module_init_rx + mod->init_size_rx,
86582 set_memory_rw);
86583 }
86584
86585@@ -1786,14 +1805,14 @@ void set_all_modules_text_rw(void)
86586 list_for_each_entry_rcu(mod, &modules, list) {
86587 if (mod->state == MODULE_STATE_UNFORMED)
86588 continue;
86589- if ((mod->module_core) && (mod->core_text_size)) {
86590- set_page_attributes(mod->module_core,
86591- mod->module_core + mod->core_text_size,
86592+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
86593+ set_page_attributes(mod->module_core_rx,
86594+ mod->module_core_rx + mod->core_size_rx,
86595 set_memory_rw);
86596 }
86597- if ((mod->module_init) && (mod->init_text_size)) {
86598- set_page_attributes(mod->module_init,
86599- mod->module_init + mod->init_text_size,
86600+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
86601+ set_page_attributes(mod->module_init_rx,
86602+ mod->module_init_rx + mod->init_size_rx,
86603 set_memory_rw);
86604 }
86605 }
86606@@ -1809,14 +1828,14 @@ void set_all_modules_text_ro(void)
86607 list_for_each_entry_rcu(mod, &modules, list) {
86608 if (mod->state == MODULE_STATE_UNFORMED)
86609 continue;
86610- if ((mod->module_core) && (mod->core_text_size)) {
86611- set_page_attributes(mod->module_core,
86612- mod->module_core + mod->core_text_size,
86613+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
86614+ set_page_attributes(mod->module_core_rx,
86615+ mod->module_core_rx + mod->core_size_rx,
86616 set_memory_ro);
86617 }
86618- if ((mod->module_init) && (mod->init_text_size)) {
86619- set_page_attributes(mod->module_init,
86620- mod->module_init + mod->init_text_size,
86621+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
86622+ set_page_attributes(mod->module_init_rx,
86623+ mod->module_init_rx + mod->init_size_rx,
86624 set_memory_ro);
86625 }
86626 }
86627@@ -1867,16 +1886,19 @@ static void free_module(struct module *mod)
86628
86629 /* This may be NULL, but that's OK */
86630 unset_module_init_ro_nx(mod);
86631- module_free(mod, mod->module_init);
86632+ module_free(mod, mod->module_init_rw);
86633+ module_free_exec(mod, mod->module_init_rx);
86634 kfree(mod->args);
86635 percpu_modfree(mod);
86636
86637 /* Free lock-classes: */
86638- lockdep_free_key_range(mod->module_core, mod->core_size);
86639+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
86640+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
86641
86642 /* Finally, free the core (containing the module structure) */
86643 unset_module_core_ro_nx(mod);
86644- module_free(mod, mod->module_core);
86645+ module_free_exec(mod, mod->module_core_rx);
86646+ module_free(mod, mod->module_core_rw);
86647
86648 #ifdef CONFIG_MPU
86649 update_protections(current->mm);
86650@@ -1945,9 +1967,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
86651 int ret = 0;
86652 const struct kernel_symbol *ksym;
86653
86654+#ifdef CONFIG_GRKERNSEC_MODHARDEN
86655+ int is_fs_load = 0;
86656+ int register_filesystem_found = 0;
86657+ char *p;
86658+
86659+ p = strstr(mod->args, "grsec_modharden_fs");
86660+ if (p) {
86661+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
86662+ /* copy \0 as well */
86663+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
86664+ is_fs_load = 1;
86665+ }
86666+#endif
86667+
86668 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
86669 const char *name = info->strtab + sym[i].st_name;
86670
86671+#ifdef CONFIG_GRKERNSEC_MODHARDEN
86672+ /* it's a real shame this will never get ripped and copied
86673+ upstream! ;(
86674+ */
86675+ if (is_fs_load && !strcmp(name, "register_filesystem"))
86676+ register_filesystem_found = 1;
86677+#endif
86678+
86679 switch (sym[i].st_shndx) {
86680 case SHN_COMMON:
86681 /* We compiled with -fno-common. These are not
86682@@ -1968,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
86683 ksym = resolve_symbol_wait(mod, info, name);
86684 /* Ok if resolved. */
86685 if (ksym && !IS_ERR(ksym)) {
86686+ pax_open_kernel();
86687 sym[i].st_value = ksym->value;
86688+ pax_close_kernel();
86689 break;
86690 }
86691
86692@@ -1987,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
86693 secbase = (unsigned long)mod_percpu(mod);
86694 else
86695 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
86696+ pax_open_kernel();
86697 sym[i].st_value += secbase;
86698+ pax_close_kernel();
86699 break;
86700 }
86701 }
86702
86703+#ifdef CONFIG_GRKERNSEC_MODHARDEN
86704+ if (is_fs_load && !register_filesystem_found) {
86705+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
86706+ ret = -EPERM;
86707+ }
86708+#endif
86709+
86710 return ret;
86711 }
86712
86713@@ -2075,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
86714 || s->sh_entsize != ~0UL
86715 || strstarts(sname, ".init"))
86716 continue;
86717- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
86718+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
86719+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
86720+ else
86721+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
86722 pr_debug("\t%s\n", sname);
86723 }
86724- switch (m) {
86725- case 0: /* executable */
86726- mod->core_size = debug_align(mod->core_size);
86727- mod->core_text_size = mod->core_size;
86728- break;
86729- case 1: /* RO: text and ro-data */
86730- mod->core_size = debug_align(mod->core_size);
86731- mod->core_ro_size = mod->core_size;
86732- break;
86733- case 3: /* whole core */
86734- mod->core_size = debug_align(mod->core_size);
86735- break;
86736- }
86737 }
86738
86739 pr_debug("Init section allocation order:\n");
86740@@ -2104,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
86741 || s->sh_entsize != ~0UL
86742 || !strstarts(sname, ".init"))
86743 continue;
86744- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
86745- | INIT_OFFSET_MASK);
86746+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
86747+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
86748+ else
86749+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
86750+ s->sh_entsize |= INIT_OFFSET_MASK;
86751 pr_debug("\t%s\n", sname);
86752 }
86753- switch (m) {
86754- case 0: /* executable */
86755- mod->init_size = debug_align(mod->init_size);
86756- mod->init_text_size = mod->init_size;
86757- break;
86758- case 1: /* RO: text and ro-data */
86759- mod->init_size = debug_align(mod->init_size);
86760- mod->init_ro_size = mod->init_size;
86761- break;
86762- case 3: /* whole init */
86763- mod->init_size = debug_align(mod->init_size);
86764- break;
86765- }
86766 }
86767 }
86768
86769@@ -2293,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
86770
86771 /* Put symbol section at end of init part of module. */
86772 symsect->sh_flags |= SHF_ALLOC;
86773- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
86774+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
86775 info->index.sym) | INIT_OFFSET_MASK;
86776 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
86777
86778@@ -2310,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
86779 }
86780
86781 /* Append room for core symbols at end of core part. */
86782- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
86783- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
86784- mod->core_size += strtab_size;
86785+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
86786+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
86787+ mod->core_size_rx += strtab_size;
86788
86789 /* Put string table section at end of init part of module. */
86790 strsect->sh_flags |= SHF_ALLOC;
86791- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
86792+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
86793 info->index.str) | INIT_OFFSET_MASK;
86794 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
86795 }
86796@@ -2334,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
86797 /* Make sure we get permanent strtab: don't use info->strtab. */
86798 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
86799
86800+ pax_open_kernel();
86801+
86802 /* Set types up while we still have access to sections. */
86803 for (i = 0; i < mod->num_symtab; i++)
86804 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
86805
86806- mod->core_symtab = dst = mod->module_core + info->symoffs;
86807- mod->core_strtab = s = mod->module_core + info->stroffs;
86808+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
86809+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
86810 src = mod->symtab;
86811 for (ndst = i = 0; i < mod->num_symtab; i++) {
86812 if (i == 0 ||
86813@@ -2351,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
86814 }
86815 }
86816 mod->core_num_syms = ndst;
86817+
86818+ pax_close_kernel();
86819 }
86820 #else
86821 static inline void layout_symtab(struct module *mod, struct load_info *info)
86822@@ -2384,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
86823 return vmalloc_exec(size);
86824 }
86825
86826-static void *module_alloc_update_bounds(unsigned long size)
86827+static void *module_alloc_update_bounds_rw(unsigned long size)
86828 {
86829 void *ret = module_alloc(size);
86830
86831 if (ret) {
86832 mutex_lock(&module_mutex);
86833 /* Update module bounds. */
86834- if ((unsigned long)ret < module_addr_min)
86835- module_addr_min = (unsigned long)ret;
86836- if ((unsigned long)ret + size > module_addr_max)
86837- module_addr_max = (unsigned long)ret + size;
86838+ if ((unsigned long)ret < module_addr_min_rw)
86839+ module_addr_min_rw = (unsigned long)ret;
86840+ if ((unsigned long)ret + size > module_addr_max_rw)
86841+ module_addr_max_rw = (unsigned long)ret + size;
86842+ mutex_unlock(&module_mutex);
86843+ }
86844+ return ret;
86845+}
86846+
86847+static void *module_alloc_update_bounds_rx(unsigned long size)
86848+{
86849+ void *ret = module_alloc_exec(size);
86850+
86851+ if (ret) {
86852+ mutex_lock(&module_mutex);
86853+ /* Update module bounds. */
86854+ if ((unsigned long)ret < module_addr_min_rx)
86855+ module_addr_min_rx = (unsigned long)ret;
86856+ if ((unsigned long)ret + size > module_addr_max_rx)
86857+ module_addr_max_rx = (unsigned long)ret + size;
86858 mutex_unlock(&module_mutex);
86859 }
86860 return ret;
86861@@ -2651,7 +2706,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
86862 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
86863
86864 if (info->index.sym == 0) {
86865+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
86866+ /*
86867+ * avoid potentially printing jibberish on attempted load
86868+ * of a module randomized with a different seed
86869+ */
86870+ pr_warn("module has no symbols (stripped?)\n");
86871+#else
86872 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
86873+#endif
86874 return ERR_PTR(-ENOEXEC);
86875 }
86876
86877@@ -2667,8 +2730,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
86878 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
86879 {
86880 const char *modmagic = get_modinfo(info, "vermagic");
86881+ const char *license = get_modinfo(info, "license");
86882 int err;
86883
86884+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
86885+ if (!license || !license_is_gpl_compatible(license))
86886+ return -ENOEXEC;
86887+#endif
86888+
86889 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
86890 modmagic = NULL;
86891
86892@@ -2693,7 +2762,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
86893 }
86894
86895 /* Set up license info based on the info section */
86896- set_license(mod, get_modinfo(info, "license"));
86897+ set_license(mod, license);
86898
86899 return 0;
86900 }
86901@@ -2787,7 +2856,7 @@ static int move_module(struct module *mod, struct load_info *info)
86902 void *ptr;
86903
86904 /* Do the allocs. */
86905- ptr = module_alloc_update_bounds(mod->core_size);
86906+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
86907 /*
86908 * The pointer to this block is stored in the module structure
86909 * which is inside the block. Just mark it as not being a
86910@@ -2797,11 +2866,11 @@ static int move_module(struct module *mod, struct load_info *info)
86911 if (!ptr)
86912 return -ENOMEM;
86913
86914- memset(ptr, 0, mod->core_size);
86915- mod->module_core = ptr;
86916+ memset(ptr, 0, mod->core_size_rw);
86917+ mod->module_core_rw = ptr;
86918
86919- if (mod->init_size) {
86920- ptr = module_alloc_update_bounds(mod->init_size);
86921+ if (mod->init_size_rw) {
86922+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
86923 /*
86924 * The pointer to this block is stored in the module structure
86925 * which is inside the block. This block doesn't need to be
86926@@ -2810,13 +2879,45 @@ static int move_module(struct module *mod, struct load_info *info)
86927 */
86928 kmemleak_ignore(ptr);
86929 if (!ptr) {
86930- module_free(mod, mod->module_core);
86931+ module_free(mod, mod->module_core_rw);
86932 return -ENOMEM;
86933 }
86934- memset(ptr, 0, mod->init_size);
86935- mod->module_init = ptr;
86936+ memset(ptr, 0, mod->init_size_rw);
86937+ mod->module_init_rw = ptr;
86938 } else
86939- mod->module_init = NULL;
86940+ mod->module_init_rw = NULL;
86941+
86942+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
86943+ kmemleak_not_leak(ptr);
86944+ if (!ptr) {
86945+ if (mod->module_init_rw)
86946+ module_free(mod, mod->module_init_rw);
86947+ module_free(mod, mod->module_core_rw);
86948+ return -ENOMEM;
86949+ }
86950+
86951+ pax_open_kernel();
86952+ memset(ptr, 0, mod->core_size_rx);
86953+ pax_close_kernel();
86954+ mod->module_core_rx = ptr;
86955+
86956+ if (mod->init_size_rx) {
86957+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
86958+ kmemleak_ignore(ptr);
86959+ if (!ptr && mod->init_size_rx) {
86960+ module_free_exec(mod, mod->module_core_rx);
86961+ if (mod->module_init_rw)
86962+ module_free(mod, mod->module_init_rw);
86963+ module_free(mod, mod->module_core_rw);
86964+ return -ENOMEM;
86965+ }
86966+
86967+ pax_open_kernel();
86968+ memset(ptr, 0, mod->init_size_rx);
86969+ pax_close_kernel();
86970+ mod->module_init_rx = ptr;
86971+ } else
86972+ mod->module_init_rx = NULL;
86973
86974 /* Transfer each section which specifies SHF_ALLOC */
86975 pr_debug("final section addresses:\n");
86976@@ -2827,16 +2928,45 @@ static int move_module(struct module *mod, struct load_info *info)
86977 if (!(shdr->sh_flags & SHF_ALLOC))
86978 continue;
86979
86980- if (shdr->sh_entsize & INIT_OFFSET_MASK)
86981- dest = mod->module_init
86982- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
86983- else
86984- dest = mod->module_core + shdr->sh_entsize;
86985+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
86986+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
86987+ dest = mod->module_init_rw
86988+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
86989+ else
86990+ dest = mod->module_init_rx
86991+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
86992+ } else {
86993+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
86994+ dest = mod->module_core_rw + shdr->sh_entsize;
86995+ else
86996+ dest = mod->module_core_rx + shdr->sh_entsize;
86997+ }
86998+
86999+ if (shdr->sh_type != SHT_NOBITS) {
87000+
87001+#ifdef CONFIG_PAX_KERNEXEC
87002+#ifdef CONFIG_X86_64
87003+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
87004+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
87005+#endif
87006+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
87007+ pax_open_kernel();
87008+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
87009+ pax_close_kernel();
87010+ } else
87011+#endif
87012
87013- if (shdr->sh_type != SHT_NOBITS)
87014 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
87015+ }
87016 /* Update sh_addr to point to copy in image. */
87017- shdr->sh_addr = (unsigned long)dest;
87018+
87019+#ifdef CONFIG_PAX_KERNEXEC
87020+ if (shdr->sh_flags & SHF_EXECINSTR)
87021+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
87022+ else
87023+#endif
87024+
87025+ shdr->sh_addr = (unsigned long)dest;
87026 pr_debug("\t0x%lx %s\n",
87027 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
87028 }
87029@@ -2893,12 +3023,12 @@ static void flush_module_icache(const struct module *mod)
87030 * Do it before processing of module parameters, so the module
87031 * can provide parameter accessor functions of its own.
87032 */
87033- if (mod->module_init)
87034- flush_icache_range((unsigned long)mod->module_init,
87035- (unsigned long)mod->module_init
87036- + mod->init_size);
87037- flush_icache_range((unsigned long)mod->module_core,
87038- (unsigned long)mod->module_core + mod->core_size);
87039+ if (mod->module_init_rx)
87040+ flush_icache_range((unsigned long)mod->module_init_rx,
87041+ (unsigned long)mod->module_init_rx
87042+ + mod->init_size_rx);
87043+ flush_icache_range((unsigned long)mod->module_core_rx,
87044+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
87045
87046 set_fs(old_fs);
87047 }
87048@@ -2955,8 +3085,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
87049 static void module_deallocate(struct module *mod, struct load_info *info)
87050 {
87051 percpu_modfree(mod);
87052- module_free(mod, mod->module_init);
87053- module_free(mod, mod->module_core);
87054+ module_free_exec(mod, mod->module_init_rx);
87055+ module_free_exec(mod, mod->module_core_rx);
87056+ module_free(mod, mod->module_init_rw);
87057+ module_free(mod, mod->module_core_rw);
87058 }
87059
87060 int __weak module_finalize(const Elf_Ehdr *hdr,
87061@@ -2969,7 +3101,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
87062 static int post_relocation(struct module *mod, const struct load_info *info)
87063 {
87064 /* Sort exception table now relocations are done. */
87065+ pax_open_kernel();
87066 sort_extable(mod->extable, mod->extable + mod->num_exentries);
87067+ pax_close_kernel();
87068
87069 /* Copy relocated percpu area over. */
87070 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
87071@@ -3023,16 +3157,16 @@ static int do_init_module(struct module *mod)
87072 MODULE_STATE_COMING, mod);
87073
87074 /* Set RO and NX regions for core */
87075- set_section_ro_nx(mod->module_core,
87076- mod->core_text_size,
87077- mod->core_ro_size,
87078- mod->core_size);
87079+ set_section_ro_nx(mod->module_core_rx,
87080+ mod->core_size_rx,
87081+ mod->core_size_rx,
87082+ mod->core_size_rx);
87083
87084 /* Set RO and NX regions for init */
87085- set_section_ro_nx(mod->module_init,
87086- mod->init_text_size,
87087- mod->init_ro_size,
87088- mod->init_size);
87089+ set_section_ro_nx(mod->module_init_rx,
87090+ mod->init_size_rx,
87091+ mod->init_size_rx,
87092+ mod->init_size_rx);
87093
87094 do_mod_ctors(mod);
87095 /* Start the module */
87096@@ -3093,11 +3227,12 @@ static int do_init_module(struct module *mod)
87097 mod->strtab = mod->core_strtab;
87098 #endif
87099 unset_module_init_ro_nx(mod);
87100- module_free(mod, mod->module_init);
87101- mod->module_init = NULL;
87102- mod->init_size = 0;
87103- mod->init_ro_size = 0;
87104- mod->init_text_size = 0;
87105+ module_free(mod, mod->module_init_rw);
87106+ module_free_exec(mod, mod->module_init_rx);
87107+ mod->module_init_rw = NULL;
87108+ mod->module_init_rx = NULL;
87109+ mod->init_size_rw = 0;
87110+ mod->init_size_rx = 0;
87111 mutex_unlock(&module_mutex);
87112 wake_up_all(&module_wq);
87113
87114@@ -3240,9 +3375,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
87115 if (err)
87116 goto free_unload;
87117
87118+ /* Now copy in args */
87119+ mod->args = strndup_user(uargs, ~0UL >> 1);
87120+ if (IS_ERR(mod->args)) {
87121+ err = PTR_ERR(mod->args);
87122+ goto free_unload;
87123+ }
87124+
87125 /* Set up MODINFO_ATTR fields */
87126 setup_modinfo(mod, info);
87127
87128+#ifdef CONFIG_GRKERNSEC_MODHARDEN
87129+ {
87130+ char *p, *p2;
87131+
87132+ if (strstr(mod->args, "grsec_modharden_netdev")) {
87133+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
87134+ err = -EPERM;
87135+ goto free_modinfo;
87136+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
87137+ p += sizeof("grsec_modharden_normal") - 1;
87138+ p2 = strstr(p, "_");
87139+ if (p2) {
87140+ *p2 = '\0';
87141+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
87142+ *p2 = '_';
87143+ }
87144+ err = -EPERM;
87145+ goto free_modinfo;
87146+ }
87147+ }
87148+#endif
87149+
87150 /* Fix up syms, so that st_value is a pointer to location. */
87151 err = simplify_symbols(mod, info);
87152 if (err < 0)
87153@@ -3258,13 +3422,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
87154
87155 flush_module_icache(mod);
87156
87157- /* Now copy in args */
87158- mod->args = strndup_user(uargs, ~0UL >> 1);
87159- if (IS_ERR(mod->args)) {
87160- err = PTR_ERR(mod->args);
87161- goto free_arch_cleanup;
87162- }
87163-
87164 dynamic_debug_setup(info->debug, info->num_debug);
87165
87166 /* Finally it's fully formed, ready to start executing. */
87167@@ -3299,11 +3456,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
87168 ddebug_cleanup:
87169 dynamic_debug_remove(info->debug);
87170 synchronize_sched();
87171- kfree(mod->args);
87172- free_arch_cleanup:
87173 module_arch_cleanup(mod);
87174 free_modinfo:
87175 free_modinfo(mod);
87176+ kfree(mod->args);
87177 free_unload:
87178 module_unload_free(mod);
87179 unlink_mod:
87180@@ -3386,10 +3542,16 @@ static const char *get_ksymbol(struct module *mod,
87181 unsigned long nextval;
87182
87183 /* At worse, next value is at end of module */
87184- if (within_module_init(addr, mod))
87185- nextval = (unsigned long)mod->module_init+mod->init_text_size;
87186+ if (within_module_init_rx(addr, mod))
87187+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
87188+ else if (within_module_init_rw(addr, mod))
87189+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
87190+ else if (within_module_core_rx(addr, mod))
87191+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
87192+ else if (within_module_core_rw(addr, mod))
87193+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
87194 else
87195- nextval = (unsigned long)mod->module_core+mod->core_text_size;
87196+ return NULL;
87197
87198 /* Scan for closest preceding symbol, and next symbol. (ELF
87199 starts real symbols at 1). */
87200@@ -3640,7 +3802,7 @@ static int m_show(struct seq_file *m, void *p)
87201 return 0;
87202
87203 seq_printf(m, "%s %u",
87204- mod->name, mod->init_size + mod->core_size);
87205+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
87206 print_unload_info(m, mod);
87207
87208 /* Informative for users. */
87209@@ -3649,7 +3811,7 @@ static int m_show(struct seq_file *m, void *p)
87210 mod->state == MODULE_STATE_COMING ? "Loading":
87211 "Live");
87212 /* Used by oprofile and other similar tools. */
87213- seq_printf(m, " 0x%pK", mod->module_core);
87214+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
87215
87216 /* Taints info */
87217 if (mod->taints)
87218@@ -3685,7 +3847,17 @@ static const struct file_operations proc_modules_operations = {
87219
87220 static int __init proc_modules_init(void)
87221 {
87222+#ifndef CONFIG_GRKERNSEC_HIDESYM
87223+#ifdef CONFIG_GRKERNSEC_PROC_USER
87224+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
87225+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
87226+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
87227+#else
87228 proc_create("modules", 0, NULL, &proc_modules_operations);
87229+#endif
87230+#else
87231+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
87232+#endif
87233 return 0;
87234 }
87235 module_init(proc_modules_init);
87236@@ -3746,14 +3918,14 @@ struct module *__module_address(unsigned long addr)
87237 {
87238 struct module *mod;
87239
87240- if (addr < module_addr_min || addr > module_addr_max)
87241+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
87242+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
87243 return NULL;
87244
87245 list_for_each_entry_rcu(mod, &modules, list) {
87246 if (mod->state == MODULE_STATE_UNFORMED)
87247 continue;
87248- if (within_module_core(addr, mod)
87249- || within_module_init(addr, mod))
87250+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
87251 return mod;
87252 }
87253 return NULL;
87254@@ -3788,11 +3960,20 @@ bool is_module_text_address(unsigned long addr)
87255 */
87256 struct module *__module_text_address(unsigned long addr)
87257 {
87258- struct module *mod = __module_address(addr);
87259+ struct module *mod;
87260+
87261+#ifdef CONFIG_X86_32
87262+ addr = ktla_ktva(addr);
87263+#endif
87264+
87265+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
87266+ return NULL;
87267+
87268+ mod = __module_address(addr);
87269+
87270 if (mod) {
87271 /* Make sure it's within the text section. */
87272- if (!within(addr, mod->module_init, mod->init_text_size)
87273- && !within(addr, mod->module_core, mod->core_text_size))
87274+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
87275 mod = NULL;
87276 }
87277 return mod;
87278diff --git a/kernel/notifier.c b/kernel/notifier.c
87279index 2d5cc4c..d9ea600 100644
87280--- a/kernel/notifier.c
87281+++ b/kernel/notifier.c
87282@@ -5,6 +5,7 @@
87283 #include <linux/rcupdate.h>
87284 #include <linux/vmalloc.h>
87285 #include <linux/reboot.h>
87286+#include <linux/mm.h>
87287
87288 /*
87289 * Notifier list for kernel code which wants to be called
87290@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
87291 while ((*nl) != NULL) {
87292 if (n->priority > (*nl)->priority)
87293 break;
87294- nl = &((*nl)->next);
87295+ nl = (struct notifier_block **)&((*nl)->next);
87296 }
87297- n->next = *nl;
87298+ pax_open_kernel();
87299+ *(const void **)&n->next = *nl;
87300 rcu_assign_pointer(*nl, n);
87301+ pax_close_kernel();
87302 return 0;
87303 }
87304
87305@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
87306 return 0;
87307 if (n->priority > (*nl)->priority)
87308 break;
87309- nl = &((*nl)->next);
87310+ nl = (struct notifier_block **)&((*nl)->next);
87311 }
87312- n->next = *nl;
87313+ pax_open_kernel();
87314+ *(const void **)&n->next = *nl;
87315 rcu_assign_pointer(*nl, n);
87316+ pax_close_kernel();
87317 return 0;
87318 }
87319
87320@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
87321 {
87322 while ((*nl) != NULL) {
87323 if ((*nl) == n) {
87324+ pax_open_kernel();
87325 rcu_assign_pointer(*nl, n->next);
87326+ pax_close_kernel();
87327 return 0;
87328 }
87329- nl = &((*nl)->next);
87330+ nl = (struct notifier_block **)&((*nl)->next);
87331 }
87332 return -ENOENT;
87333 }
87334diff --git a/kernel/padata.c b/kernel/padata.c
87335index 2abd25d..02c4faa 100644
87336--- a/kernel/padata.c
87337+++ b/kernel/padata.c
87338@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
87339 * seq_nr mod. number of cpus in use.
87340 */
87341
87342- seq_nr = atomic_inc_return(&pd->seq_nr);
87343+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
87344 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
87345
87346 return padata_index_to_cpu(pd, cpu_index);
87347@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
87348 padata_init_pqueues(pd);
87349 padata_init_squeues(pd);
87350 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
87351- atomic_set(&pd->seq_nr, -1);
87352+ atomic_set_unchecked(&pd->seq_nr, -1);
87353 atomic_set(&pd->reorder_objects, 0);
87354 atomic_set(&pd->refcnt, 0);
87355 pd->pinst = pinst;
87356diff --git a/kernel/panic.c b/kernel/panic.c
87357index c00b4ce..98c7d1a 100644
87358--- a/kernel/panic.c
87359+++ b/kernel/panic.c
87360@@ -52,7 +52,7 @@ EXPORT_SYMBOL(panic_blink);
87361 /*
87362 * Stop ourself in panic -- architecture code may override this
87363 */
87364-void __weak panic_smp_self_stop(void)
87365+void __weak __noreturn panic_smp_self_stop(void)
87366 {
87367 while (1)
87368 cpu_relax();
87369@@ -407,7 +407,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
87370 disable_trace_on_warning();
87371
87372 pr_warn("------------[ cut here ]------------\n");
87373- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
87374+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
87375 raw_smp_processor_id(), current->pid, file, line, caller);
87376
87377 if (args)
87378@@ -461,7 +461,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
87379 */
87380 void __stack_chk_fail(void)
87381 {
87382- panic("stack-protector: Kernel stack is corrupted in: %p\n",
87383+ dump_stack();
87384+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
87385 __builtin_return_address(0));
87386 }
87387 EXPORT_SYMBOL(__stack_chk_fail);
87388diff --git a/kernel/pid.c b/kernel/pid.c
87389index 9b9a266..c20ef80 100644
87390--- a/kernel/pid.c
87391+++ b/kernel/pid.c
87392@@ -33,6 +33,7 @@
87393 #include <linux/rculist.h>
87394 #include <linux/bootmem.h>
87395 #include <linux/hash.h>
87396+#include <linux/security.h>
87397 #include <linux/pid_namespace.h>
87398 #include <linux/init_task.h>
87399 #include <linux/syscalls.h>
87400@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
87401
87402 int pid_max = PID_MAX_DEFAULT;
87403
87404-#define RESERVED_PIDS 300
87405+#define RESERVED_PIDS 500
87406
87407 int pid_max_min = RESERVED_PIDS + 1;
87408 int pid_max_max = PID_MAX_LIMIT;
87409@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
87410 */
87411 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
87412 {
87413+ struct task_struct *task;
87414+
87415 rcu_lockdep_assert(rcu_read_lock_held(),
87416 "find_task_by_pid_ns() needs rcu_read_lock()"
87417 " protection");
87418- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
87419+
87420+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
87421+
87422+ if (gr_pid_is_chrooted(task))
87423+ return NULL;
87424+
87425+ return task;
87426 }
87427
87428 struct task_struct *find_task_by_vpid(pid_t vnr)
87429@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
87430 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
87431 }
87432
87433+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
87434+{
87435+ rcu_lockdep_assert(rcu_read_lock_held(),
87436+ "find_task_by_pid_ns() needs rcu_read_lock()"
87437+ " protection");
87438+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
87439+}
87440+
87441 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
87442 {
87443 struct pid *pid;
87444diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
87445index 06c62de..b08cc6c 100644
87446--- a/kernel/pid_namespace.c
87447+++ b/kernel/pid_namespace.c
87448@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
87449 void __user *buffer, size_t *lenp, loff_t *ppos)
87450 {
87451 struct pid_namespace *pid_ns = task_active_pid_ns(current);
87452- struct ctl_table tmp = *table;
87453+ ctl_table_no_const tmp = *table;
87454
87455 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
87456 return -EPERM;
87457diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
87458index c7f31aa..2b44977 100644
87459--- a/kernel/posix-cpu-timers.c
87460+++ b/kernel/posix-cpu-timers.c
87461@@ -1521,14 +1521,14 @@ struct k_clock clock_posix_cpu = {
87462
87463 static __init int init_posix_cpu_timers(void)
87464 {
87465- struct k_clock process = {
87466+ static struct k_clock process = {
87467 .clock_getres = process_cpu_clock_getres,
87468 .clock_get = process_cpu_clock_get,
87469 .timer_create = process_cpu_timer_create,
87470 .nsleep = process_cpu_nsleep,
87471 .nsleep_restart = process_cpu_nsleep_restart,
87472 };
87473- struct k_clock thread = {
87474+ static struct k_clock thread = {
87475 .clock_getres = thread_cpu_clock_getres,
87476 .clock_get = thread_cpu_clock_get,
87477 .timer_create = thread_cpu_timer_create,
87478diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
87479index 424c2d4..679242f 100644
87480--- a/kernel/posix-timers.c
87481+++ b/kernel/posix-timers.c
87482@@ -43,6 +43,7 @@
87483 #include <linux/hash.h>
87484 #include <linux/posix-clock.h>
87485 #include <linux/posix-timers.h>
87486+#include <linux/grsecurity.h>
87487 #include <linux/syscalls.h>
87488 #include <linux/wait.h>
87489 #include <linux/workqueue.h>
87490@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
87491 * which we beg off on and pass to do_sys_settimeofday().
87492 */
87493
87494-static struct k_clock posix_clocks[MAX_CLOCKS];
87495+static struct k_clock *posix_clocks[MAX_CLOCKS];
87496
87497 /*
87498 * These ones are defined below.
87499@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
87500 */
87501 static __init int init_posix_timers(void)
87502 {
87503- struct k_clock clock_realtime = {
87504+ static struct k_clock clock_realtime = {
87505 .clock_getres = hrtimer_get_res,
87506 .clock_get = posix_clock_realtime_get,
87507 .clock_set = posix_clock_realtime_set,
87508@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
87509 .timer_get = common_timer_get,
87510 .timer_del = common_timer_del,
87511 };
87512- struct k_clock clock_monotonic = {
87513+ static struct k_clock clock_monotonic = {
87514 .clock_getres = hrtimer_get_res,
87515 .clock_get = posix_ktime_get_ts,
87516 .nsleep = common_nsleep,
87517@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
87518 .timer_get = common_timer_get,
87519 .timer_del = common_timer_del,
87520 };
87521- struct k_clock clock_monotonic_raw = {
87522+ static struct k_clock clock_monotonic_raw = {
87523 .clock_getres = hrtimer_get_res,
87524 .clock_get = posix_get_monotonic_raw,
87525 };
87526- struct k_clock clock_realtime_coarse = {
87527+ static struct k_clock clock_realtime_coarse = {
87528 .clock_getres = posix_get_coarse_res,
87529 .clock_get = posix_get_realtime_coarse,
87530 };
87531- struct k_clock clock_monotonic_coarse = {
87532+ static struct k_clock clock_monotonic_coarse = {
87533 .clock_getres = posix_get_coarse_res,
87534 .clock_get = posix_get_monotonic_coarse,
87535 };
87536- struct k_clock clock_tai = {
87537+ static struct k_clock clock_tai = {
87538 .clock_getres = hrtimer_get_res,
87539 .clock_get = posix_get_tai,
87540 .nsleep = common_nsleep,
87541@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
87542 .timer_get = common_timer_get,
87543 .timer_del = common_timer_del,
87544 };
87545- struct k_clock clock_boottime = {
87546+ static struct k_clock clock_boottime = {
87547 .clock_getres = hrtimer_get_res,
87548 .clock_get = posix_get_boottime,
87549 .nsleep = common_nsleep,
87550@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
87551 return;
87552 }
87553
87554- posix_clocks[clock_id] = *new_clock;
87555+ posix_clocks[clock_id] = new_clock;
87556 }
87557 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
87558
87559@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
87560 return (id & CLOCKFD_MASK) == CLOCKFD ?
87561 &clock_posix_dynamic : &clock_posix_cpu;
87562
87563- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
87564+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
87565 return NULL;
87566- return &posix_clocks[id];
87567+ return posix_clocks[id];
87568 }
87569
87570 static int common_timer_create(struct k_itimer *new_timer)
87571@@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
87572 struct k_clock *kc = clockid_to_kclock(which_clock);
87573 struct k_itimer *new_timer;
87574 int error, new_timer_id;
87575- sigevent_t event;
87576+ sigevent_t event = { };
87577 int it_id_set = IT_ID_NOT_SET;
87578
87579 if (!kc)
87580@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
87581 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
87582 return -EFAULT;
87583
87584+ /* only the CLOCK_REALTIME clock can be set, all other clocks
87585+ have their clock_set fptr set to a nosettime dummy function
87586+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
87587+ call common_clock_set, which calls do_sys_settimeofday, which
87588+ we hook
87589+ */
87590+
87591 return kc->clock_set(which_clock, &new_tp);
87592 }
87593
87594diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
87595index 2fac9cc..56fef29 100644
87596--- a/kernel/power/Kconfig
87597+++ b/kernel/power/Kconfig
87598@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
87599 config HIBERNATION
87600 bool "Hibernation (aka 'suspend to disk')"
87601 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
87602+ depends on !GRKERNSEC_KMEM
87603+ depends on !PAX_MEMORY_SANITIZE
87604 select HIBERNATE_CALLBACKS
87605 select LZO_COMPRESS
87606 select LZO_DECOMPRESS
87607diff --git a/kernel/power/process.c b/kernel/power/process.c
87608index 06ec886..9dba35e 100644
87609--- a/kernel/power/process.c
87610+++ b/kernel/power/process.c
87611@@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user_only)
87612 unsigned int elapsed_msecs;
87613 bool wakeup = false;
87614 int sleep_usecs = USEC_PER_MSEC;
87615+ bool timedout = false;
87616
87617 do_gettimeofday(&start);
87618
87619@@ -44,13 +45,20 @@ static int try_to_freeze_tasks(bool user_only)
87620
87621 while (true) {
87622 todo = 0;
87623+ if (time_after(jiffies, end_time))
87624+ timedout = true;
87625 read_lock(&tasklist_lock);
87626 do_each_thread(g, p) {
87627 if (p == current || !freeze_task(p))
87628 continue;
87629
87630- if (!freezer_should_skip(p))
87631+ if (!freezer_should_skip(p)) {
87632 todo++;
87633+ if (timedout) {
87634+ printk(KERN_ERR "Task refusing to freeze:\n");
87635+ sched_show_task(p);
87636+ }
87637+ }
87638 } while_each_thread(g, p);
87639 read_unlock(&tasklist_lock);
87640
87641@@ -59,7 +67,7 @@ static int try_to_freeze_tasks(bool user_only)
87642 todo += wq_busy;
87643 }
87644
87645- if (!todo || time_after(jiffies, end_time))
87646+ if (!todo || timedout)
87647 break;
87648
87649 if (pm_wakeup_pending()) {
87650diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
87651index be7c86b..b972b27 100644
87652--- a/kernel/printk/printk.c
87653+++ b/kernel/printk/printk.c
87654@@ -385,6 +385,11 @@ static int check_syslog_permissions(int type, bool from_file)
87655 if (from_file && type != SYSLOG_ACTION_OPEN)
87656 return 0;
87657
87658+#ifdef CONFIG_GRKERNSEC_DMESG
87659+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
87660+ return -EPERM;
87661+#endif
87662+
87663 if (syslog_action_restricted(type)) {
87664 if (capable(CAP_SYSLOG))
87665 return 0;
87666@@ -1080,7 +1085,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
87667 next_seq = log_next_seq;
87668
87669 len = 0;
87670- prev = 0;
87671 while (len >= 0 && seq < next_seq) {
87672 struct printk_log *msg = log_from_idx(idx);
87673 int textlen;
87674@@ -2789,7 +2793,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
87675 next_idx = idx;
87676
87677 l = 0;
87678- prev = 0;
87679 while (seq < dumper->next_seq) {
87680 struct printk_log *msg = log_from_idx(idx);
87681
87682diff --git a/kernel/profile.c b/kernel/profile.c
87683index 6631e1e..310c266 100644
87684--- a/kernel/profile.c
87685+++ b/kernel/profile.c
87686@@ -37,7 +37,7 @@ struct profile_hit {
87687 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
87688 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
87689
87690-static atomic_t *prof_buffer;
87691+static atomic_unchecked_t *prof_buffer;
87692 static unsigned long prof_len, prof_shift;
87693
87694 int prof_on __read_mostly;
87695@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
87696 hits[i].pc = 0;
87697 continue;
87698 }
87699- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
87700+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
87701 hits[i].hits = hits[i].pc = 0;
87702 }
87703 }
87704@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
87705 * Add the current hit(s) and flush the write-queue out
87706 * to the global buffer:
87707 */
87708- atomic_add(nr_hits, &prof_buffer[pc]);
87709+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
87710 for (i = 0; i < NR_PROFILE_HIT; ++i) {
87711- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
87712+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
87713 hits[i].pc = hits[i].hits = 0;
87714 }
87715 out:
87716@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
87717 {
87718 unsigned long pc;
87719 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
87720- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
87721+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
87722 }
87723 #endif /* !CONFIG_SMP */
87724
87725@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
87726 return -EFAULT;
87727 buf++; p++; count--; read++;
87728 }
87729- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
87730+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
87731 if (copy_to_user(buf, (void *)pnt, count))
87732 return -EFAULT;
87733 read += count;
87734@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
87735 }
87736 #endif
87737 profile_discard_flip_buffers();
87738- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
87739+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
87740 return count;
87741 }
87742
87743diff --git a/kernel/ptrace.c b/kernel/ptrace.c
87744index 1f4bcb3..99cf7ab 100644
87745--- a/kernel/ptrace.c
87746+++ b/kernel/ptrace.c
87747@@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request,
87748 if (seize)
87749 flags |= PT_SEIZED;
87750 rcu_read_lock();
87751- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
87752+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
87753 flags |= PT_PTRACE_CAP;
87754 rcu_read_unlock();
87755 task->ptrace = flags;
87756@@ -538,7 +538,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
87757 break;
87758 return -EIO;
87759 }
87760- if (copy_to_user(dst, buf, retval))
87761+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
87762 return -EFAULT;
87763 copied += retval;
87764 src += retval;
87765@@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request,
87766 bool seized = child->ptrace & PT_SEIZED;
87767 int ret = -EIO;
87768 siginfo_t siginfo, *si;
87769- void __user *datavp = (void __user *) data;
87770+ void __user *datavp = (__force void __user *) data;
87771 unsigned long __user *datalp = datavp;
87772 unsigned long flags;
87773
87774@@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
87775 goto out;
87776 }
87777
87778+ if (gr_handle_ptrace(child, request)) {
87779+ ret = -EPERM;
87780+ goto out_put_task_struct;
87781+ }
87782+
87783 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
87784 ret = ptrace_attach(child, request, addr, data);
87785 /*
87786 * Some architectures need to do book-keeping after
87787 * a ptrace attach.
87788 */
87789- if (!ret)
87790+ if (!ret) {
87791 arch_ptrace_attach(child);
87792+ gr_audit_ptrace(child);
87793+ }
87794 goto out_put_task_struct;
87795 }
87796
87797@@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
87798 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
87799 if (copied != sizeof(tmp))
87800 return -EIO;
87801- return put_user(tmp, (unsigned long __user *)data);
87802+ return put_user(tmp, (__force unsigned long __user *)data);
87803 }
87804
87805 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
87806@@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
87807 }
87808
87809 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
87810- compat_long_t addr, compat_long_t data)
87811+ compat_ulong_t addr, compat_ulong_t data)
87812 {
87813 struct task_struct *child;
87814 long ret;
87815@@ -1197,14 +1204,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
87816 goto out;
87817 }
87818
87819+ if (gr_handle_ptrace(child, request)) {
87820+ ret = -EPERM;
87821+ goto out_put_task_struct;
87822+ }
87823+
87824 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
87825 ret = ptrace_attach(child, request, addr, data);
87826 /*
87827 * Some architectures need to do book-keeping after
87828 * a ptrace attach.
87829 */
87830- if (!ret)
87831+ if (!ret) {
87832 arch_ptrace_attach(child);
87833+ gr_audit_ptrace(child);
87834+ }
87835 goto out_put_task_struct;
87836 }
87837
87838diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
87839index 01d5ccb..cdcbee6 100644
87840--- a/kernel/rcu/srcu.c
87841+++ b/kernel/rcu/srcu.c
87842@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
87843
87844 idx = ACCESS_ONCE(sp->completed) & 0x1;
87845 preempt_disable();
87846- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
87847+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
87848 smp_mb(); /* B */ /* Avoid leaking the critical section. */
87849- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
87850+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
87851 preempt_enable();
87852 return idx;
87853 }
87854diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
87855index 1254f31..16258dc 100644
87856--- a/kernel/rcu/tiny.c
87857+++ b/kernel/rcu/tiny.c
87858@@ -46,7 +46,7 @@
87859 /* Forward declarations for tiny_plugin.h. */
87860 struct rcu_ctrlblk;
87861 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
87862-static void rcu_process_callbacks(struct softirq_action *unused);
87863+static void rcu_process_callbacks(void);
87864 static void __call_rcu(struct rcu_head *head,
87865 void (*func)(struct rcu_head *rcu),
87866 struct rcu_ctrlblk *rcp);
87867@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
87868 false));
87869 }
87870
87871-static void rcu_process_callbacks(struct softirq_action *unused)
87872+static __latent_entropy void rcu_process_callbacks(void)
87873 {
87874 __rcu_process_callbacks(&rcu_sched_ctrlblk);
87875 __rcu_process_callbacks(&rcu_bh_ctrlblk);
87876diff --git a/kernel/rcu/torture.c b/kernel/rcu/torture.c
87877index 3929cd4..421624d 100644
87878--- a/kernel/rcu/torture.c
87879+++ b/kernel/rcu/torture.c
87880@@ -176,12 +176,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
87881 { 0 };
87882 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
87883 { 0 };
87884-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
87885-static atomic_t n_rcu_torture_alloc;
87886-static atomic_t n_rcu_torture_alloc_fail;
87887-static atomic_t n_rcu_torture_free;
87888-static atomic_t n_rcu_torture_mberror;
87889-static atomic_t n_rcu_torture_error;
87890+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
87891+static atomic_unchecked_t n_rcu_torture_alloc;
87892+static atomic_unchecked_t n_rcu_torture_alloc_fail;
87893+static atomic_unchecked_t n_rcu_torture_free;
87894+static atomic_unchecked_t n_rcu_torture_mberror;
87895+static atomic_unchecked_t n_rcu_torture_error;
87896 static long n_rcu_torture_barrier_error;
87897 static long n_rcu_torture_boost_ktrerror;
87898 static long n_rcu_torture_boost_rterror;
87899@@ -299,11 +299,11 @@ rcu_torture_alloc(void)
87900
87901 spin_lock_bh(&rcu_torture_lock);
87902 if (list_empty(&rcu_torture_freelist)) {
87903- atomic_inc(&n_rcu_torture_alloc_fail);
87904+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
87905 spin_unlock_bh(&rcu_torture_lock);
87906 return NULL;
87907 }
87908- atomic_inc(&n_rcu_torture_alloc);
87909+ atomic_inc_unchecked(&n_rcu_torture_alloc);
87910 p = rcu_torture_freelist.next;
87911 list_del_init(p);
87912 spin_unlock_bh(&rcu_torture_lock);
87913@@ -316,7 +316,7 @@ rcu_torture_alloc(void)
87914 static void
87915 rcu_torture_free(struct rcu_torture *p)
87916 {
87917- atomic_inc(&n_rcu_torture_free);
87918+ atomic_inc_unchecked(&n_rcu_torture_free);
87919 spin_lock_bh(&rcu_torture_lock);
87920 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
87921 spin_unlock_bh(&rcu_torture_lock);
87922@@ -437,7 +437,7 @@ rcu_torture_cb(struct rcu_head *p)
87923 i = rp->rtort_pipe_count;
87924 if (i > RCU_TORTURE_PIPE_LEN)
87925 i = RCU_TORTURE_PIPE_LEN;
87926- atomic_inc(&rcu_torture_wcount[i]);
87927+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
87928 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
87929 rp->rtort_mbtest = 0;
87930 rcu_torture_free(rp);
87931@@ -827,7 +827,7 @@ rcu_torture_writer(void *arg)
87932 i = old_rp->rtort_pipe_count;
87933 if (i > RCU_TORTURE_PIPE_LEN)
87934 i = RCU_TORTURE_PIPE_LEN;
87935- atomic_inc(&rcu_torture_wcount[i]);
87936+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
87937 old_rp->rtort_pipe_count++;
87938 if (gp_normal == gp_exp)
87939 exp = !!(rcu_random(&rand) & 0x80);
87940@@ -845,7 +845,7 @@ rcu_torture_writer(void *arg)
87941 i = rp->rtort_pipe_count;
87942 if (i > RCU_TORTURE_PIPE_LEN)
87943 i = RCU_TORTURE_PIPE_LEN;
87944- atomic_inc(&rcu_torture_wcount[i]);
87945+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
87946 if (++rp->rtort_pipe_count >=
87947 RCU_TORTURE_PIPE_LEN) {
87948 rp->rtort_mbtest = 0;
87949@@ -944,7 +944,7 @@ static void rcu_torture_timer(unsigned long unused)
87950 return;
87951 }
87952 if (p->rtort_mbtest == 0)
87953- atomic_inc(&n_rcu_torture_mberror);
87954+ atomic_inc_unchecked(&n_rcu_torture_mberror);
87955 spin_lock(&rand_lock);
87956 cur_ops->read_delay(&rand);
87957 n_rcu_torture_timers++;
87958@@ -1014,7 +1014,7 @@ rcu_torture_reader(void *arg)
87959 continue;
87960 }
87961 if (p->rtort_mbtest == 0)
87962- atomic_inc(&n_rcu_torture_mberror);
87963+ atomic_inc_unchecked(&n_rcu_torture_mberror);
87964 cur_ops->read_delay(&rand);
87965 preempt_disable();
87966 pipe_count = p->rtort_pipe_count;
87967@@ -1077,11 +1077,11 @@ rcu_torture_printk(char *page)
87968 rcu_torture_current,
87969 rcu_torture_current_version,
87970 list_empty(&rcu_torture_freelist),
87971- atomic_read(&n_rcu_torture_alloc),
87972- atomic_read(&n_rcu_torture_alloc_fail),
87973- atomic_read(&n_rcu_torture_free));
87974+ atomic_read_unchecked(&n_rcu_torture_alloc),
87975+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
87976+ atomic_read_unchecked(&n_rcu_torture_free));
87977 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
87978- atomic_read(&n_rcu_torture_mberror),
87979+ atomic_read_unchecked(&n_rcu_torture_mberror),
87980 n_rcu_torture_boost_ktrerror,
87981 n_rcu_torture_boost_rterror);
87982 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
87983@@ -1100,14 +1100,14 @@ rcu_torture_printk(char *page)
87984 n_barrier_attempts,
87985 n_rcu_torture_barrier_error);
87986 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
87987- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
87988+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
87989 n_rcu_torture_barrier_error != 0 ||
87990 n_rcu_torture_boost_ktrerror != 0 ||
87991 n_rcu_torture_boost_rterror != 0 ||
87992 n_rcu_torture_boost_failure != 0 ||
87993 i > 1) {
87994 cnt += sprintf(&page[cnt], "!!! ");
87995- atomic_inc(&n_rcu_torture_error);
87996+ atomic_inc_unchecked(&n_rcu_torture_error);
87997 WARN_ON_ONCE(1);
87998 }
87999 cnt += sprintf(&page[cnt], "Reader Pipe: ");
88000@@ -1121,7 +1121,7 @@ rcu_torture_printk(char *page)
88001 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
88002 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
88003 cnt += sprintf(&page[cnt], " %d",
88004- atomic_read(&rcu_torture_wcount[i]));
88005+ atomic_read_unchecked(&rcu_torture_wcount[i]));
88006 }
88007 cnt += sprintf(&page[cnt], "\n");
88008 if (cur_ops->stats)
88009@@ -1836,7 +1836,7 @@ rcu_torture_cleanup(void)
88010
88011 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
88012
88013- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
88014+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
88015 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
88016 else if (n_online_successes != n_online_attempts ||
88017 n_offline_successes != n_offline_attempts)
88018@@ -1958,18 +1958,18 @@ rcu_torture_init(void)
88019
88020 rcu_torture_current = NULL;
88021 rcu_torture_current_version = 0;
88022- atomic_set(&n_rcu_torture_alloc, 0);
88023- atomic_set(&n_rcu_torture_alloc_fail, 0);
88024- atomic_set(&n_rcu_torture_free, 0);
88025- atomic_set(&n_rcu_torture_mberror, 0);
88026- atomic_set(&n_rcu_torture_error, 0);
88027+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
88028+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
88029+ atomic_set_unchecked(&n_rcu_torture_free, 0);
88030+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
88031+ atomic_set_unchecked(&n_rcu_torture_error, 0);
88032 n_rcu_torture_barrier_error = 0;
88033 n_rcu_torture_boost_ktrerror = 0;
88034 n_rcu_torture_boost_rterror = 0;
88035 n_rcu_torture_boost_failure = 0;
88036 n_rcu_torture_boosts = 0;
88037 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
88038- atomic_set(&rcu_torture_wcount[i], 0);
88039+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
88040 for_each_possible_cpu(cpu) {
88041 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
88042 per_cpu(rcu_torture_count, cpu)[i] = 0;
88043diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
88044index dd08198..5ccccbe 100644
88045--- a/kernel/rcu/tree.c
88046+++ b/kernel/rcu/tree.c
88047@@ -383,9 +383,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
88048 rcu_prepare_for_idle(smp_processor_id());
88049 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
88050 smp_mb__before_atomic_inc(); /* See above. */
88051- atomic_inc(&rdtp->dynticks);
88052+ atomic_inc_unchecked(&rdtp->dynticks);
88053 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
88054- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
88055+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
88056
88057 /*
88058 * It is illegal to enter an extended quiescent state while
88059@@ -502,10 +502,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
88060 int user)
88061 {
88062 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
88063- atomic_inc(&rdtp->dynticks);
88064+ atomic_inc_unchecked(&rdtp->dynticks);
88065 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
88066 smp_mb__after_atomic_inc(); /* See above. */
88067- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
88068+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
88069 rcu_cleanup_after_idle(smp_processor_id());
88070 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
88071 if (!user && !is_idle_task(current)) {
88072@@ -625,14 +625,14 @@ void rcu_nmi_enter(void)
88073 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
88074
88075 if (rdtp->dynticks_nmi_nesting == 0 &&
88076- (atomic_read(&rdtp->dynticks) & 0x1))
88077+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
88078 return;
88079 rdtp->dynticks_nmi_nesting++;
88080 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
88081- atomic_inc(&rdtp->dynticks);
88082+ atomic_inc_unchecked(&rdtp->dynticks);
88083 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
88084 smp_mb__after_atomic_inc(); /* See above. */
88085- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
88086+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
88087 }
88088
88089 /**
88090@@ -651,9 +651,9 @@ void rcu_nmi_exit(void)
88091 return;
88092 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
88093 smp_mb__before_atomic_inc(); /* See above. */
88094- atomic_inc(&rdtp->dynticks);
88095+ atomic_inc_unchecked(&rdtp->dynticks);
88096 smp_mb__after_atomic_inc(); /* Force delay to next write. */
88097- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
88098+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
88099 }
88100
88101 /**
88102@@ -666,7 +666,7 @@ void rcu_nmi_exit(void)
88103 */
88104 bool notrace __rcu_is_watching(void)
88105 {
88106- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
88107+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
88108 }
88109
88110 /**
88111@@ -749,7 +749,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
88112 static int dyntick_save_progress_counter(struct rcu_data *rdp,
88113 bool *isidle, unsigned long *maxj)
88114 {
88115- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
88116+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
88117 rcu_sysidle_check_cpu(rdp, isidle, maxj);
88118 return (rdp->dynticks_snap & 0x1) == 0;
88119 }
88120@@ -766,7 +766,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
88121 unsigned int curr;
88122 unsigned int snap;
88123
88124- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
88125+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
88126 snap = (unsigned int)rdp->dynticks_snap;
88127
88128 /*
88129@@ -1412,9 +1412,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
88130 rdp = this_cpu_ptr(rsp->rda);
88131 rcu_preempt_check_blocked_tasks(rnp);
88132 rnp->qsmask = rnp->qsmaskinit;
88133- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
88134+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
88135 WARN_ON_ONCE(rnp->completed != rsp->completed);
88136- ACCESS_ONCE(rnp->completed) = rsp->completed;
88137+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
88138 if (rnp == rdp->mynode)
88139 __note_gp_changes(rsp, rnp, rdp);
88140 rcu_preempt_boost_start_gp(rnp);
88141@@ -1505,7 +1505,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
88142 */
88143 rcu_for_each_node_breadth_first(rsp, rnp) {
88144 raw_spin_lock_irq(&rnp->lock);
88145- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
88146+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
88147 rdp = this_cpu_ptr(rsp->rda);
88148 if (rnp == rdp->mynode)
88149 __note_gp_changes(rsp, rnp, rdp);
88150@@ -1865,7 +1865,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
88151 rsp->qlen += rdp->qlen;
88152 rdp->n_cbs_orphaned += rdp->qlen;
88153 rdp->qlen_lazy = 0;
88154- ACCESS_ONCE(rdp->qlen) = 0;
88155+ ACCESS_ONCE_RW(rdp->qlen) = 0;
88156 }
88157
88158 /*
88159@@ -2111,7 +2111,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
88160 }
88161 smp_mb(); /* List handling before counting for rcu_barrier(). */
88162 rdp->qlen_lazy -= count_lazy;
88163- ACCESS_ONCE(rdp->qlen) -= count;
88164+ ACCESS_ONCE_RW(rdp->qlen) -= count;
88165 rdp->n_cbs_invoked += count;
88166
88167 /* Reinstate batch limit if we have worked down the excess. */
88168@@ -2308,7 +2308,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
88169 /*
88170 * Do RCU core processing for the current CPU.
88171 */
88172-static void rcu_process_callbacks(struct softirq_action *unused)
88173+static void rcu_process_callbacks(void)
88174 {
88175 struct rcu_state *rsp;
88176
88177@@ -2415,7 +2415,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
88178 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
88179 if (debug_rcu_head_queue(head)) {
88180 /* Probable double call_rcu(), so leak the callback. */
88181- ACCESS_ONCE(head->func) = rcu_leak_callback;
88182+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
88183 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
88184 return;
88185 }
88186@@ -2443,7 +2443,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
88187 local_irq_restore(flags);
88188 return;
88189 }
88190- ACCESS_ONCE(rdp->qlen)++;
88191+ ACCESS_ONCE_RW(rdp->qlen)++;
88192 if (lazy)
88193 rdp->qlen_lazy++;
88194 else
88195@@ -2652,11 +2652,11 @@ void synchronize_sched_expedited(void)
88196 * counter wrap on a 32-bit system. Quite a few more CPUs would of
88197 * course be required on a 64-bit system.
88198 */
88199- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
88200+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
88201 (ulong)atomic_long_read(&rsp->expedited_done) +
88202 ULONG_MAX / 8)) {
88203 synchronize_sched();
88204- atomic_long_inc(&rsp->expedited_wrap);
88205+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
88206 return;
88207 }
88208
88209@@ -2664,7 +2664,7 @@ void synchronize_sched_expedited(void)
88210 * Take a ticket. Note that atomic_inc_return() implies a
88211 * full memory barrier.
88212 */
88213- snap = atomic_long_inc_return(&rsp->expedited_start);
88214+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
88215 firstsnap = snap;
88216 get_online_cpus();
88217 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
88218@@ -2677,14 +2677,14 @@ void synchronize_sched_expedited(void)
88219 synchronize_sched_expedited_cpu_stop,
88220 NULL) == -EAGAIN) {
88221 put_online_cpus();
88222- atomic_long_inc(&rsp->expedited_tryfail);
88223+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
88224
88225 /* Check to see if someone else did our work for us. */
88226 s = atomic_long_read(&rsp->expedited_done);
88227 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
88228 /* ensure test happens before caller kfree */
88229 smp_mb__before_atomic_inc(); /* ^^^ */
88230- atomic_long_inc(&rsp->expedited_workdone1);
88231+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
88232 return;
88233 }
88234
88235@@ -2693,7 +2693,7 @@ void synchronize_sched_expedited(void)
88236 udelay(trycount * num_online_cpus());
88237 } else {
88238 wait_rcu_gp(call_rcu_sched);
88239- atomic_long_inc(&rsp->expedited_normal);
88240+ atomic_long_inc_unchecked(&rsp->expedited_normal);
88241 return;
88242 }
88243
88244@@ -2702,7 +2702,7 @@ void synchronize_sched_expedited(void)
88245 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
88246 /* ensure test happens before caller kfree */
88247 smp_mb__before_atomic_inc(); /* ^^^ */
88248- atomic_long_inc(&rsp->expedited_workdone2);
88249+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
88250 return;
88251 }
88252
88253@@ -2714,10 +2714,10 @@ void synchronize_sched_expedited(void)
88254 * period works for us.
88255 */
88256 get_online_cpus();
88257- snap = atomic_long_read(&rsp->expedited_start);
88258+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
88259 smp_mb(); /* ensure read is before try_stop_cpus(). */
88260 }
88261- atomic_long_inc(&rsp->expedited_stoppedcpus);
88262+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
88263
88264 /*
88265 * Everyone up to our most recent fetch is covered by our grace
88266@@ -2726,16 +2726,16 @@ void synchronize_sched_expedited(void)
88267 * than we did already did their update.
88268 */
88269 do {
88270- atomic_long_inc(&rsp->expedited_done_tries);
88271+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
88272 s = atomic_long_read(&rsp->expedited_done);
88273 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
88274 /* ensure test happens before caller kfree */
88275 smp_mb__before_atomic_inc(); /* ^^^ */
88276- atomic_long_inc(&rsp->expedited_done_lost);
88277+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
88278 break;
88279 }
88280 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
88281- atomic_long_inc(&rsp->expedited_done_exit);
88282+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
88283
88284 put_online_cpus();
88285 }
88286@@ -2931,7 +2931,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
88287 * ACCESS_ONCE() to prevent the compiler from speculating
88288 * the increment to precede the early-exit check.
88289 */
88290- ACCESS_ONCE(rsp->n_barrier_done)++;
88291+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
88292 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
88293 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
88294 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
88295@@ -2981,7 +2981,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
88296
88297 /* Increment ->n_barrier_done to prevent duplicate work. */
88298 smp_mb(); /* Keep increment after above mechanism. */
88299- ACCESS_ONCE(rsp->n_barrier_done)++;
88300+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
88301 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
88302 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
88303 smp_mb(); /* Keep increment before caller's subsequent code. */
88304@@ -3026,10 +3026,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
88305 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
88306 init_callback_list(rdp);
88307 rdp->qlen_lazy = 0;
88308- ACCESS_ONCE(rdp->qlen) = 0;
88309+ ACCESS_ONCE_RW(rdp->qlen) = 0;
88310 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
88311 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
88312- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
88313+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
88314 rdp->cpu = cpu;
88315 rdp->rsp = rsp;
88316 rcu_boot_init_nocb_percpu_data(rdp);
88317@@ -3063,8 +3063,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
88318 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
88319 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
88320 rcu_sysidle_init_percpu_data(rdp->dynticks);
88321- atomic_set(&rdp->dynticks->dynticks,
88322- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
88323+ atomic_set_unchecked(&rdp->dynticks->dynticks,
88324+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
88325 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
88326
88327 /* Add CPU to rcu_node bitmasks. */
88328diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
88329index 52be957..365ded3 100644
88330--- a/kernel/rcu/tree.h
88331+++ b/kernel/rcu/tree.h
88332@@ -87,11 +87,11 @@ struct rcu_dynticks {
88333 long long dynticks_nesting; /* Track irq/process nesting level. */
88334 /* Process level is worth LLONG_MAX/2. */
88335 int dynticks_nmi_nesting; /* Track NMI nesting level. */
88336- atomic_t dynticks; /* Even value for idle, else odd. */
88337+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
88338 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
88339 long long dynticks_idle_nesting;
88340 /* irq/process nesting level from idle. */
88341- atomic_t dynticks_idle; /* Even value for idle, else odd. */
88342+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
88343 /* "Idle" excludes userspace execution. */
88344 unsigned long dynticks_idle_jiffies;
88345 /* End of last non-NMI non-idle period. */
88346@@ -429,17 +429,17 @@ struct rcu_state {
88347 /* _rcu_barrier(). */
88348 /* End of fields guarded by barrier_mutex. */
88349
88350- atomic_long_t expedited_start; /* Starting ticket. */
88351- atomic_long_t expedited_done; /* Done ticket. */
88352- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
88353- atomic_long_t expedited_tryfail; /* # acquisition failures. */
88354- atomic_long_t expedited_workdone1; /* # done by others #1. */
88355- atomic_long_t expedited_workdone2; /* # done by others #2. */
88356- atomic_long_t expedited_normal; /* # fallbacks to normal. */
88357- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
88358- atomic_long_t expedited_done_tries; /* # tries to update _done. */
88359- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
88360- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
88361+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
88362+ atomic_long_t expedited_done; /* Done ticket. */
88363+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
88364+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
88365+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
88366+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
88367+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
88368+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
88369+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
88370+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
88371+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
88372
88373 unsigned long jiffies_force_qs; /* Time at which to invoke */
88374 /* force_quiescent_state(). */
88375diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
88376index 08a7652..3598c7e 100644
88377--- a/kernel/rcu/tree_plugin.h
88378+++ b/kernel/rcu/tree_plugin.h
88379@@ -749,7 +749,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
88380 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
88381 {
88382 return !rcu_preempted_readers_exp(rnp) &&
88383- ACCESS_ONCE(rnp->expmask) == 0;
88384+ ACCESS_ONCE_RW(rnp->expmask) == 0;
88385 }
88386
88387 /*
88388@@ -905,7 +905,7 @@ void synchronize_rcu_expedited(void)
88389
88390 /* Clean up and exit. */
88391 smp_mb(); /* ensure expedited GP seen before counter increment. */
88392- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
88393+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
88394 unlock_mb_ret:
88395 mutex_unlock(&sync_rcu_preempt_exp_mutex);
88396 mb_ret:
88397@@ -1479,7 +1479,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
88398 free_cpumask_var(cm);
88399 }
88400
88401-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
88402+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
88403 .store = &rcu_cpu_kthread_task,
88404 .thread_should_run = rcu_cpu_kthread_should_run,
88405 .thread_fn = rcu_cpu_kthread,
88406@@ -1946,7 +1946,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
88407 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
88408 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
88409 cpu, ticks_value, ticks_title,
88410- atomic_read(&rdtp->dynticks) & 0xfff,
88411+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
88412 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
88413 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
88414 fast_no_hz);
88415@@ -2109,7 +2109,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
88416
88417 /* Enqueue the callback on the nocb list and update counts. */
88418 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
88419- ACCESS_ONCE(*old_rhpp) = rhp;
88420+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
88421 atomic_long_add(rhcount, &rdp->nocb_q_count);
88422 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
88423
88424@@ -2272,12 +2272,12 @@ static int rcu_nocb_kthread(void *arg)
88425 * Extract queued callbacks, update counts, and wait
88426 * for a grace period to elapse.
88427 */
88428- ACCESS_ONCE(rdp->nocb_head) = NULL;
88429+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
88430 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
88431 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
88432 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
88433- ACCESS_ONCE(rdp->nocb_p_count) += c;
88434- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
88435+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
88436+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
88437 rcu_nocb_wait_gp(rdp);
88438
88439 /* Each pass through the following loop invokes a callback. */
88440@@ -2303,8 +2303,8 @@ static int rcu_nocb_kthread(void *arg)
88441 list = next;
88442 }
88443 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
88444- ACCESS_ONCE(rdp->nocb_p_count) -= c;
88445- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
88446+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
88447+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
88448 rdp->n_nocbs_invoked += c;
88449 }
88450 return 0;
88451@@ -2331,7 +2331,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
88452 t = kthread_run(rcu_nocb_kthread, rdp,
88453 "rcuo%c/%d", rsp->abbr, cpu);
88454 BUG_ON(IS_ERR(t));
88455- ACCESS_ONCE(rdp->nocb_kthread) = t;
88456+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
88457 }
88458 }
88459
88460@@ -2457,11 +2457,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
88461
88462 /* Record start of fully idle period. */
88463 j = jiffies;
88464- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
88465+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
88466 smp_mb__before_atomic_inc();
88467- atomic_inc(&rdtp->dynticks_idle);
88468+ atomic_inc_unchecked(&rdtp->dynticks_idle);
88469 smp_mb__after_atomic_inc();
88470- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
88471+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
88472 }
88473
88474 /*
88475@@ -2526,9 +2526,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
88476
88477 /* Record end of idle period. */
88478 smp_mb__before_atomic_inc();
88479- atomic_inc(&rdtp->dynticks_idle);
88480+ atomic_inc_unchecked(&rdtp->dynticks_idle);
88481 smp_mb__after_atomic_inc();
88482- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
88483+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
88484
88485 /*
88486 * If we are the timekeeping CPU, we are permitted to be non-idle
88487@@ -2569,7 +2569,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
88488 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
88489
88490 /* Pick up current idle and NMI-nesting counter and check. */
88491- cur = atomic_read(&rdtp->dynticks_idle);
88492+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
88493 if (cur & 0x1) {
88494 *isidle = false; /* We are not idle! */
88495 return;
88496@@ -2632,7 +2632,7 @@ static void rcu_sysidle(unsigned long j)
88497 case RCU_SYSIDLE_NOT:
88498
88499 /* First time all are idle, so note a short idle period. */
88500- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
88501+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
88502 break;
88503
88504 case RCU_SYSIDLE_SHORT:
88505@@ -2669,7 +2669,7 @@ static void rcu_sysidle(unsigned long j)
88506 static void rcu_sysidle_cancel(void)
88507 {
88508 smp_mb();
88509- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
88510+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
88511 }
88512
88513 /*
88514@@ -2717,7 +2717,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
88515 smp_mb(); /* grace period precedes setting inuse. */
88516
88517 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
88518- ACCESS_ONCE(rshp->inuse) = 0;
88519+ ACCESS_ONCE_RW(rshp->inuse) = 0;
88520 }
88521
88522 /*
88523diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
88524index 3596797..f78391c 100644
88525--- a/kernel/rcu/tree_trace.c
88526+++ b/kernel/rcu/tree_trace.c
88527@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
88528 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
88529 rdp->passed_quiesce, rdp->qs_pending);
88530 seq_printf(m, " dt=%d/%llx/%d df=%lu",
88531- atomic_read(&rdp->dynticks->dynticks),
88532+ atomic_read_unchecked(&rdp->dynticks->dynticks),
88533 rdp->dynticks->dynticks_nesting,
88534 rdp->dynticks->dynticks_nmi_nesting,
88535 rdp->dynticks_fqs);
88536@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
88537 struct rcu_state *rsp = (struct rcu_state *)m->private;
88538
88539 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
88540- atomic_long_read(&rsp->expedited_start),
88541+ atomic_long_read_unchecked(&rsp->expedited_start),
88542 atomic_long_read(&rsp->expedited_done),
88543- atomic_long_read(&rsp->expedited_wrap),
88544- atomic_long_read(&rsp->expedited_tryfail),
88545- atomic_long_read(&rsp->expedited_workdone1),
88546- atomic_long_read(&rsp->expedited_workdone2),
88547- atomic_long_read(&rsp->expedited_normal),
88548- atomic_long_read(&rsp->expedited_stoppedcpus),
88549- atomic_long_read(&rsp->expedited_done_tries),
88550- atomic_long_read(&rsp->expedited_done_lost),
88551- atomic_long_read(&rsp->expedited_done_exit));
88552+ atomic_long_read_unchecked(&rsp->expedited_wrap),
88553+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
88554+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
88555+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
88556+ atomic_long_read_unchecked(&rsp->expedited_normal),
88557+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
88558+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
88559+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
88560+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
88561 return 0;
88562 }
88563
88564diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
88565index 6cb3dff..dc5710f 100644
88566--- a/kernel/rcu/update.c
88567+++ b/kernel/rcu/update.c
88568@@ -318,10 +318,10 @@ int rcu_jiffies_till_stall_check(void)
88569 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
88570 */
88571 if (till_stall_check < 3) {
88572- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
88573+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
88574 till_stall_check = 3;
88575 } else if (till_stall_check > 300) {
88576- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
88577+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
88578 till_stall_check = 300;
88579 }
88580 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
88581diff --git a/kernel/resource.c b/kernel/resource.c
88582index 3f285dc..5755f62 100644
88583--- a/kernel/resource.c
88584+++ b/kernel/resource.c
88585@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
88586
88587 static int __init ioresources_init(void)
88588 {
88589+#ifdef CONFIG_GRKERNSEC_PROC_ADD
88590+#ifdef CONFIG_GRKERNSEC_PROC_USER
88591+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
88592+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
88593+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88594+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
88595+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
88596+#endif
88597+#else
88598 proc_create("ioports", 0, NULL, &proc_ioports_operations);
88599 proc_create("iomem", 0, NULL, &proc_iomem_operations);
88600+#endif
88601 return 0;
88602 }
88603 __initcall(ioresources_init);
88604diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
88605index 4a07353..66b5291 100644
88606--- a/kernel/sched/auto_group.c
88607+++ b/kernel/sched/auto_group.c
88608@@ -11,7 +11,7 @@
88609
88610 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
88611 static struct autogroup autogroup_default;
88612-static atomic_t autogroup_seq_nr;
88613+static atomic_unchecked_t autogroup_seq_nr;
88614
88615 void __init autogroup_init(struct task_struct *init_task)
88616 {
88617@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
88618
88619 kref_init(&ag->kref);
88620 init_rwsem(&ag->lock);
88621- ag->id = atomic_inc_return(&autogroup_seq_nr);
88622+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
88623 ag->tg = tg;
88624 #ifdef CONFIG_RT_GROUP_SCHED
88625 /*
88626diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
88627index a63f4dc..349bbb0 100644
88628--- a/kernel/sched/completion.c
88629+++ b/kernel/sched/completion.c
88630@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
88631 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
88632 * or number of jiffies left till timeout) if completed.
88633 */
88634-long __sched
88635+long __sched __intentional_overflow(-1)
88636 wait_for_completion_interruptible_timeout(struct completion *x,
88637 unsigned long timeout)
88638 {
88639@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
88640 *
88641 * Return: -ERESTARTSYS if interrupted, 0 if completed.
88642 */
88643-int __sched wait_for_completion_killable(struct completion *x)
88644+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
88645 {
88646 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
88647 if (t == -ERESTARTSYS)
88648@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
88649 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
88650 * or number of jiffies left till timeout) if completed.
88651 */
88652-long __sched
88653+long __sched __intentional_overflow(-1)
88654 wait_for_completion_killable_timeout(struct completion *x,
88655 unsigned long timeout)
88656 {
88657diff --git a/kernel/sched/core.c b/kernel/sched/core.c
88658index c677510..132bb14 100644
88659--- a/kernel/sched/core.c
88660+++ b/kernel/sched/core.c
88661@@ -1768,7 +1768,7 @@ void set_numabalancing_state(bool enabled)
88662 int sysctl_numa_balancing(struct ctl_table *table, int write,
88663 void __user *buffer, size_t *lenp, loff_t *ppos)
88664 {
88665- struct ctl_table t;
88666+ ctl_table_no_const t;
88667 int err;
88668 int state = numabalancing_enabled;
88669
88670@@ -2893,6 +2893,8 @@ int can_nice(const struct task_struct *p, const int nice)
88671 /* convert nice value [19,-20] to rlimit style value [1,40] */
88672 int nice_rlim = 20 - nice;
88673
88674+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
88675+
88676 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
88677 capable(CAP_SYS_NICE));
88678 }
88679@@ -2926,7 +2928,8 @@ SYSCALL_DEFINE1(nice, int, increment)
88680 if (nice > 19)
88681 nice = 19;
88682
88683- if (increment < 0 && !can_nice(current, nice))
88684+ if (increment < 0 && (!can_nice(current, nice) ||
88685+ gr_handle_chroot_nice()))
88686 return -EPERM;
88687
88688 retval = security_task_setnice(current, nice);
88689@@ -3088,6 +3091,7 @@ recheck:
88690 unsigned long rlim_rtprio =
88691 task_rlimit(p, RLIMIT_RTPRIO);
88692
88693+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
88694 /* can't set/change the rt policy */
88695 if (policy != p->policy && !rlim_rtprio)
88696 return -EPERM;
88697@@ -4254,7 +4258,7 @@ static void migrate_tasks(unsigned int dead_cpu)
88698
88699 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
88700
88701-static struct ctl_table sd_ctl_dir[] = {
88702+static ctl_table_no_const sd_ctl_dir[] __read_only = {
88703 {
88704 .procname = "sched_domain",
88705 .mode = 0555,
88706@@ -4271,17 +4275,17 @@ static struct ctl_table sd_ctl_root[] = {
88707 {}
88708 };
88709
88710-static struct ctl_table *sd_alloc_ctl_entry(int n)
88711+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
88712 {
88713- struct ctl_table *entry =
88714+ ctl_table_no_const *entry =
88715 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
88716
88717 return entry;
88718 }
88719
88720-static void sd_free_ctl_entry(struct ctl_table **tablep)
88721+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
88722 {
88723- struct ctl_table *entry;
88724+ ctl_table_no_const *entry;
88725
88726 /*
88727 * In the intermediate directories, both the child directory and
88728@@ -4289,22 +4293,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
88729 * will always be set. In the lowest directory the names are
88730 * static strings and all have proc handlers.
88731 */
88732- for (entry = *tablep; entry->mode; entry++) {
88733- if (entry->child)
88734- sd_free_ctl_entry(&entry->child);
88735+ for (entry = tablep; entry->mode; entry++) {
88736+ if (entry->child) {
88737+ sd_free_ctl_entry(entry->child);
88738+ pax_open_kernel();
88739+ entry->child = NULL;
88740+ pax_close_kernel();
88741+ }
88742 if (entry->proc_handler == NULL)
88743 kfree(entry->procname);
88744 }
88745
88746- kfree(*tablep);
88747- *tablep = NULL;
88748+ kfree(tablep);
88749 }
88750
88751 static int min_load_idx = 0;
88752 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
88753
88754 static void
88755-set_table_entry(struct ctl_table *entry,
88756+set_table_entry(ctl_table_no_const *entry,
88757 const char *procname, void *data, int maxlen,
88758 umode_t mode, proc_handler *proc_handler,
88759 bool load_idx)
88760@@ -4324,7 +4331,7 @@ set_table_entry(struct ctl_table *entry,
88761 static struct ctl_table *
88762 sd_alloc_ctl_domain_table(struct sched_domain *sd)
88763 {
88764- struct ctl_table *table = sd_alloc_ctl_entry(13);
88765+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
88766
88767 if (table == NULL)
88768 return NULL;
88769@@ -4359,9 +4366,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
88770 return table;
88771 }
88772
88773-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
88774+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
88775 {
88776- struct ctl_table *entry, *table;
88777+ ctl_table_no_const *entry, *table;
88778 struct sched_domain *sd;
88779 int domain_num = 0, i;
88780 char buf[32];
88781@@ -4388,11 +4395,13 @@ static struct ctl_table_header *sd_sysctl_header;
88782 static void register_sched_domain_sysctl(void)
88783 {
88784 int i, cpu_num = num_possible_cpus();
88785- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
88786+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
88787 char buf[32];
88788
88789 WARN_ON(sd_ctl_dir[0].child);
88790+ pax_open_kernel();
88791 sd_ctl_dir[0].child = entry;
88792+ pax_close_kernel();
88793
88794 if (entry == NULL)
88795 return;
88796@@ -4415,8 +4424,12 @@ static void unregister_sched_domain_sysctl(void)
88797 if (sd_sysctl_header)
88798 unregister_sysctl_table(sd_sysctl_header);
88799 sd_sysctl_header = NULL;
88800- if (sd_ctl_dir[0].child)
88801- sd_free_ctl_entry(&sd_ctl_dir[0].child);
88802+ if (sd_ctl_dir[0].child) {
88803+ sd_free_ctl_entry(sd_ctl_dir[0].child);
88804+ pax_open_kernel();
88805+ sd_ctl_dir[0].child = NULL;
88806+ pax_close_kernel();
88807+ }
88808 }
88809 #else
88810 static void register_sched_domain_sysctl(void)
88811diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
88812index ce501de..1805320 100644
88813--- a/kernel/sched/fair.c
88814+++ b/kernel/sched/fair.c
88815@@ -1652,7 +1652,7 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
88816
88817 static void reset_ptenuma_scan(struct task_struct *p)
88818 {
88819- ACCESS_ONCE(p->mm->numa_scan_seq)++;
88820+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
88821 p->mm->numa_scan_offset = 0;
88822 }
88823
88824@@ -6863,7 +6863,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
88825 * run_rebalance_domains is triggered when needed from the scheduler tick.
88826 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
88827 */
88828-static void run_rebalance_domains(struct softirq_action *h)
88829+static __latent_entropy void run_rebalance_domains(void)
88830 {
88831 int this_cpu = smp_processor_id();
88832 struct rq *this_rq = cpu_rq(this_cpu);
88833diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
88834index 88c85b2..a1dec86 100644
88835--- a/kernel/sched/sched.h
88836+++ b/kernel/sched/sched.h
88837@@ -1035,7 +1035,7 @@ struct sched_class {
88838 #ifdef CONFIG_FAIR_GROUP_SCHED
88839 void (*task_move_group) (struct task_struct *p, int on_rq);
88840 #endif
88841-};
88842+} __do_const;
88843
88844 #define sched_class_highest (&stop_sched_class)
88845 #define for_each_class(class) \
88846diff --git a/kernel/signal.c b/kernel/signal.c
88847index 940b30e..7fd6041 100644
88848--- a/kernel/signal.c
88849+++ b/kernel/signal.c
88850@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
88851
88852 int print_fatal_signals __read_mostly;
88853
88854-static void __user *sig_handler(struct task_struct *t, int sig)
88855+static __sighandler_t sig_handler(struct task_struct *t, int sig)
88856 {
88857 return t->sighand->action[sig - 1].sa.sa_handler;
88858 }
88859
88860-static int sig_handler_ignored(void __user *handler, int sig)
88861+static int sig_handler_ignored(__sighandler_t handler, int sig)
88862 {
88863 /* Is it explicitly or implicitly ignored? */
88864 return handler == SIG_IGN ||
88865@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
88866
88867 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
88868 {
88869- void __user *handler;
88870+ __sighandler_t handler;
88871
88872 handler = sig_handler(t, sig);
88873
88874@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
88875 atomic_inc(&user->sigpending);
88876 rcu_read_unlock();
88877
88878+ if (!override_rlimit)
88879+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
88880+
88881 if (override_rlimit ||
88882 atomic_read(&user->sigpending) <=
88883 task_rlimit(t, RLIMIT_SIGPENDING)) {
88884@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
88885
88886 int unhandled_signal(struct task_struct *tsk, int sig)
88887 {
88888- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
88889+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
88890 if (is_global_init(tsk))
88891 return 1;
88892 if (handler != SIG_IGN && handler != SIG_DFL)
88893@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
88894 }
88895 }
88896
88897+ /* allow glibc communication via tgkill to other threads in our
88898+ thread group */
88899+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
88900+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
88901+ && gr_handle_signal(t, sig))
88902+ return -EPERM;
88903+
88904 return security_task_kill(t, info, sig, 0);
88905 }
88906
88907@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
88908 return send_signal(sig, info, p, 1);
88909 }
88910
88911-static int
88912+int
88913 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
88914 {
88915 return send_signal(sig, info, t, 0);
88916@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
88917 unsigned long int flags;
88918 int ret, blocked, ignored;
88919 struct k_sigaction *action;
88920+ int is_unhandled = 0;
88921
88922 spin_lock_irqsave(&t->sighand->siglock, flags);
88923 action = &t->sighand->action[sig-1];
88924@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
88925 }
88926 if (action->sa.sa_handler == SIG_DFL)
88927 t->signal->flags &= ~SIGNAL_UNKILLABLE;
88928+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
88929+ is_unhandled = 1;
88930 ret = specific_send_sig_info(sig, info, t);
88931 spin_unlock_irqrestore(&t->sighand->siglock, flags);
88932
88933+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
88934+ normal operation */
88935+ if (is_unhandled) {
88936+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
88937+ gr_handle_crash(t, sig);
88938+ }
88939+
88940 return ret;
88941 }
88942
88943@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
88944 ret = check_kill_permission(sig, info, p);
88945 rcu_read_unlock();
88946
88947- if (!ret && sig)
88948+ if (!ret && sig) {
88949 ret = do_send_sig_info(sig, info, p, true);
88950+ if (!ret)
88951+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
88952+ }
88953
88954 return ret;
88955 }
88956@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
88957 int error = -ESRCH;
88958
88959 rcu_read_lock();
88960- p = find_task_by_vpid(pid);
88961+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
88962+ /* allow glibc communication via tgkill to other threads in our
88963+ thread group */
88964+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
88965+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
88966+ p = find_task_by_vpid_unrestricted(pid);
88967+ else
88968+#endif
88969+ p = find_task_by_vpid(pid);
88970 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
88971 error = check_kill_permission(sig, info, p);
88972 /*
88973@@ -3240,8 +3271,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
88974 }
88975 seg = get_fs();
88976 set_fs(KERNEL_DS);
88977- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
88978- (stack_t __force __user *) &uoss,
88979+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
88980+ (stack_t __force_user *) &uoss,
88981 compat_user_stack_pointer());
88982 set_fs(seg);
88983 if (ret >= 0 && uoss_ptr) {
88984diff --git a/kernel/smpboot.c b/kernel/smpboot.c
88985index eb89e18..a4e6792 100644
88986--- a/kernel/smpboot.c
88987+++ b/kernel/smpboot.c
88988@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
88989 }
88990 smpboot_unpark_thread(plug_thread, cpu);
88991 }
88992- list_add(&plug_thread->list, &hotplug_threads);
88993+ pax_list_add(&plug_thread->list, &hotplug_threads);
88994 out:
88995 mutex_unlock(&smpboot_threads_lock);
88996 return ret;
88997@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
88998 {
88999 get_online_cpus();
89000 mutex_lock(&smpboot_threads_lock);
89001- list_del(&plug_thread->list);
89002+ pax_list_del(&plug_thread->list);
89003 smpboot_destroy_threads(plug_thread);
89004 mutex_unlock(&smpboot_threads_lock);
89005 put_online_cpus();
89006diff --git a/kernel/softirq.c b/kernel/softirq.c
89007index 11025cc..bc0e4dc 100644
89008--- a/kernel/softirq.c
89009+++ b/kernel/softirq.c
89010@@ -50,11 +50,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
89011 EXPORT_SYMBOL(irq_stat);
89012 #endif
89013
89014-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
89015+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
89016
89017 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
89018
89019-char *softirq_to_name[NR_SOFTIRQS] = {
89020+const char * const softirq_to_name[NR_SOFTIRQS] = {
89021 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
89022 "TASKLET", "SCHED", "HRTIMER", "RCU"
89023 };
89024@@ -250,7 +250,7 @@ restart:
89025 kstat_incr_softirqs_this_cpu(vec_nr);
89026
89027 trace_softirq_entry(vec_nr);
89028- h->action(h);
89029+ h->action();
89030 trace_softirq_exit(vec_nr);
89031 if (unlikely(prev_count != preempt_count())) {
89032 printk(KERN_ERR "huh, entered softirq %u %s %p"
89033@@ -419,7 +419,7 @@ void __raise_softirq_irqoff(unsigned int nr)
89034 or_softirq_pending(1UL << nr);
89035 }
89036
89037-void open_softirq(int nr, void (*action)(struct softirq_action *))
89038+void __init open_softirq(int nr, void (*action)(void))
89039 {
89040 softirq_vec[nr].action = action;
89041 }
89042@@ -475,7 +475,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
89043
89044 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
89045
89046-static void tasklet_action(struct softirq_action *a)
89047+static __latent_entropy void tasklet_action(void)
89048 {
89049 struct tasklet_struct *list;
89050
89051@@ -510,7 +510,7 @@ static void tasklet_action(struct softirq_action *a)
89052 }
89053 }
89054
89055-static void tasklet_hi_action(struct softirq_action *a)
89056+static __latent_entropy void tasklet_hi_action(void)
89057 {
89058 struct tasklet_struct *list;
89059
89060@@ -740,7 +740,7 @@ static struct notifier_block cpu_nfb = {
89061 .notifier_call = cpu_callback
89062 };
89063
89064-static struct smp_hotplug_thread softirq_threads = {
89065+static struct smp_hotplug_thread softirq_threads __read_only = {
89066 .store = &ksoftirqd,
89067 .thread_should_run = ksoftirqd_should_run,
89068 .thread_fn = run_ksoftirqd,
89069diff --git a/kernel/sys.c b/kernel/sys.c
89070index c723113..46bf922 100644
89071--- a/kernel/sys.c
89072+++ b/kernel/sys.c
89073@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
89074 error = -EACCES;
89075 goto out;
89076 }
89077+
89078+ if (gr_handle_chroot_setpriority(p, niceval)) {
89079+ error = -EACCES;
89080+ goto out;
89081+ }
89082+
89083 no_nice = security_task_setnice(p, niceval);
89084 if (no_nice) {
89085 error = no_nice;
89086@@ -351,6 +357,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
89087 goto error;
89088 }
89089
89090+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
89091+ goto error;
89092+
89093 if (rgid != (gid_t) -1 ||
89094 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
89095 new->sgid = new->egid;
89096@@ -386,6 +395,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
89097 old = current_cred();
89098
89099 retval = -EPERM;
89100+
89101+ if (gr_check_group_change(kgid, kgid, kgid))
89102+ goto error;
89103+
89104 if (ns_capable(old->user_ns, CAP_SETGID))
89105 new->gid = new->egid = new->sgid = new->fsgid = kgid;
89106 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
89107@@ -403,7 +416,7 @@ error:
89108 /*
89109 * change the user struct in a credentials set to match the new UID
89110 */
89111-static int set_user(struct cred *new)
89112+int set_user(struct cred *new)
89113 {
89114 struct user_struct *new_user;
89115
89116@@ -483,6 +496,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
89117 goto error;
89118 }
89119
89120+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
89121+ goto error;
89122+
89123 if (!uid_eq(new->uid, old->uid)) {
89124 retval = set_user(new);
89125 if (retval < 0)
89126@@ -533,6 +549,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
89127 old = current_cred();
89128
89129 retval = -EPERM;
89130+
89131+ if (gr_check_crash_uid(kuid))
89132+ goto error;
89133+ if (gr_check_user_change(kuid, kuid, kuid))
89134+ goto error;
89135+
89136 if (ns_capable(old->user_ns, CAP_SETUID)) {
89137 new->suid = new->uid = kuid;
89138 if (!uid_eq(kuid, old->uid)) {
89139@@ -602,6 +624,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
89140 goto error;
89141 }
89142
89143+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
89144+ goto error;
89145+
89146 if (ruid != (uid_t) -1) {
89147 new->uid = kruid;
89148 if (!uid_eq(kruid, old->uid)) {
89149@@ -684,6 +709,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
89150 goto error;
89151 }
89152
89153+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
89154+ goto error;
89155+
89156 if (rgid != (gid_t) -1)
89157 new->gid = krgid;
89158 if (egid != (gid_t) -1)
89159@@ -745,12 +773,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
89160 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
89161 ns_capable(old->user_ns, CAP_SETUID)) {
89162 if (!uid_eq(kuid, old->fsuid)) {
89163+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
89164+ goto error;
89165+
89166 new->fsuid = kuid;
89167 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
89168 goto change_okay;
89169 }
89170 }
89171
89172+error:
89173 abort_creds(new);
89174 return old_fsuid;
89175
89176@@ -783,12 +815,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
89177 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
89178 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
89179 ns_capable(old->user_ns, CAP_SETGID)) {
89180+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
89181+ goto error;
89182+
89183 if (!gid_eq(kgid, old->fsgid)) {
89184 new->fsgid = kgid;
89185 goto change_okay;
89186 }
89187 }
89188
89189+error:
89190 abort_creds(new);
89191 return old_fsgid;
89192
89193@@ -1168,19 +1204,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
89194 return -EFAULT;
89195
89196 down_read(&uts_sem);
89197- error = __copy_to_user(&name->sysname, &utsname()->sysname,
89198+ error = __copy_to_user(name->sysname, &utsname()->sysname,
89199 __OLD_UTS_LEN);
89200 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
89201- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
89202+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
89203 __OLD_UTS_LEN);
89204 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
89205- error |= __copy_to_user(&name->release, &utsname()->release,
89206+ error |= __copy_to_user(name->release, &utsname()->release,
89207 __OLD_UTS_LEN);
89208 error |= __put_user(0, name->release + __OLD_UTS_LEN);
89209- error |= __copy_to_user(&name->version, &utsname()->version,
89210+ error |= __copy_to_user(name->version, &utsname()->version,
89211 __OLD_UTS_LEN);
89212 error |= __put_user(0, name->version + __OLD_UTS_LEN);
89213- error |= __copy_to_user(&name->machine, &utsname()->machine,
89214+ error |= __copy_to_user(name->machine, &utsname()->machine,
89215 __OLD_UTS_LEN);
89216 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
89217 up_read(&uts_sem);
89218@@ -1382,6 +1418,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
89219 */
89220 new_rlim->rlim_cur = 1;
89221 }
89222+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
89223+ is changed to a lower value. Since tasks can be created by the same
89224+ user in between this limit change and an execve by this task, force
89225+ a recheck only for this task by setting PF_NPROC_EXCEEDED
89226+ */
89227+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
89228+ tsk->flags |= PF_NPROC_EXCEEDED;
89229 }
89230 if (!retval) {
89231 if (old_rlim)
89232diff --git a/kernel/sysctl.c b/kernel/sysctl.c
89233index 06962ba..a54d45e 100644
89234--- a/kernel/sysctl.c
89235+++ b/kernel/sysctl.c
89236@@ -93,7 +93,6 @@
89237
89238
89239 #if defined(CONFIG_SYSCTL)
89240-
89241 /* External variables not in a header file. */
89242 extern int sysctl_overcommit_memory;
89243 extern int sysctl_overcommit_ratio;
89244@@ -119,17 +118,18 @@ extern int blk_iopoll_enabled;
89245
89246 /* Constants used for minimum and maximum */
89247 #ifdef CONFIG_LOCKUP_DETECTOR
89248-static int sixty = 60;
89249+static int sixty __read_only = 60;
89250 #endif
89251
89252-static int zero;
89253-static int __maybe_unused one = 1;
89254-static int __maybe_unused two = 2;
89255-static int __maybe_unused three = 3;
89256-static unsigned long one_ul = 1;
89257-static int one_hundred = 100;
89258+static int neg_one __read_only = -1;
89259+static int zero __read_only = 0;
89260+static int __maybe_unused one __read_only = 1;
89261+static int __maybe_unused two __read_only = 2;
89262+static int __maybe_unused three __read_only = 3;
89263+static unsigned long one_ul __read_only = 1;
89264+static int one_hundred __read_only = 100;
89265 #ifdef CONFIG_PRINTK
89266-static int ten_thousand = 10000;
89267+static int ten_thousand __read_only = 10000;
89268 #endif
89269
89270 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
89271@@ -176,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
89272 void __user *buffer, size_t *lenp, loff_t *ppos);
89273 #endif
89274
89275-#ifdef CONFIG_PRINTK
89276 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
89277 void __user *buffer, size_t *lenp, loff_t *ppos);
89278-#endif
89279
89280 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
89281 void __user *buffer, size_t *lenp, loff_t *ppos);
89282@@ -210,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
89283
89284 #endif
89285
89286+extern struct ctl_table grsecurity_table[];
89287+
89288 static struct ctl_table kern_table[];
89289 static struct ctl_table vm_table[];
89290 static struct ctl_table fs_table[];
89291@@ -224,6 +224,20 @@ extern struct ctl_table epoll_table[];
89292 int sysctl_legacy_va_layout;
89293 #endif
89294
89295+#ifdef CONFIG_PAX_SOFTMODE
89296+static ctl_table pax_table[] = {
89297+ {
89298+ .procname = "softmode",
89299+ .data = &pax_softmode,
89300+ .maxlen = sizeof(unsigned int),
89301+ .mode = 0600,
89302+ .proc_handler = &proc_dointvec,
89303+ },
89304+
89305+ { }
89306+};
89307+#endif
89308+
89309 /* The default sysctl tables: */
89310
89311 static struct ctl_table sysctl_base_table[] = {
89312@@ -272,6 +286,22 @@ static int max_extfrag_threshold = 1000;
89313 #endif
89314
89315 static struct ctl_table kern_table[] = {
89316+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
89317+ {
89318+ .procname = "grsecurity",
89319+ .mode = 0500,
89320+ .child = grsecurity_table,
89321+ },
89322+#endif
89323+
89324+#ifdef CONFIG_PAX_SOFTMODE
89325+ {
89326+ .procname = "pax",
89327+ .mode = 0500,
89328+ .child = pax_table,
89329+ },
89330+#endif
89331+
89332 {
89333 .procname = "sched_child_runs_first",
89334 .data = &sysctl_sched_child_runs_first,
89335@@ -629,7 +659,7 @@ static struct ctl_table kern_table[] = {
89336 .data = &modprobe_path,
89337 .maxlen = KMOD_PATH_LEN,
89338 .mode = 0644,
89339- .proc_handler = proc_dostring,
89340+ .proc_handler = proc_dostring_modpriv,
89341 },
89342 {
89343 .procname = "modules_disabled",
89344@@ -796,16 +826,20 @@ static struct ctl_table kern_table[] = {
89345 .extra1 = &zero,
89346 .extra2 = &one,
89347 },
89348+#endif
89349 {
89350 .procname = "kptr_restrict",
89351 .data = &kptr_restrict,
89352 .maxlen = sizeof(int),
89353 .mode = 0644,
89354 .proc_handler = proc_dointvec_minmax_sysadmin,
89355+#ifdef CONFIG_GRKERNSEC_HIDESYM
89356+ .extra1 = &two,
89357+#else
89358 .extra1 = &zero,
89359+#endif
89360 .extra2 = &two,
89361 },
89362-#endif
89363 {
89364 .procname = "ngroups_max",
89365 .data = &ngroups_max,
89366@@ -1048,10 +1082,17 @@ static struct ctl_table kern_table[] = {
89367 */
89368 {
89369 .procname = "perf_event_paranoid",
89370- .data = &sysctl_perf_event_paranoid,
89371- .maxlen = sizeof(sysctl_perf_event_paranoid),
89372+ .data = &sysctl_perf_event_legitimately_concerned,
89373+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
89374 .mode = 0644,
89375- .proc_handler = proc_dointvec,
89376+ /* go ahead, be a hero */
89377+ .proc_handler = proc_dointvec_minmax_sysadmin,
89378+ .extra1 = &neg_one,
89379+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89380+ .extra2 = &three,
89381+#else
89382+ .extra2 = &two,
89383+#endif
89384 },
89385 {
89386 .procname = "perf_event_mlock_kb",
89387@@ -1315,6 +1356,13 @@ static struct ctl_table vm_table[] = {
89388 .proc_handler = proc_dointvec_minmax,
89389 .extra1 = &zero,
89390 },
89391+ {
89392+ .procname = "heap_stack_gap",
89393+ .data = &sysctl_heap_stack_gap,
89394+ .maxlen = sizeof(sysctl_heap_stack_gap),
89395+ .mode = 0644,
89396+ .proc_handler = proc_doulongvec_minmax,
89397+ },
89398 #else
89399 {
89400 .procname = "nr_trim_pages",
89401@@ -1779,6 +1827,16 @@ int proc_dostring(struct ctl_table *table, int write,
89402 buffer, lenp, ppos);
89403 }
89404
89405+int proc_dostring_modpriv(struct ctl_table *table, int write,
89406+ void __user *buffer, size_t *lenp, loff_t *ppos)
89407+{
89408+ if (write && !capable(CAP_SYS_MODULE))
89409+ return -EPERM;
89410+
89411+ return _proc_do_string(table->data, table->maxlen, write,
89412+ buffer, lenp, ppos);
89413+}
89414+
89415 static size_t proc_skip_spaces(char **buf)
89416 {
89417 size_t ret;
89418@@ -1884,6 +1942,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
89419 len = strlen(tmp);
89420 if (len > *size)
89421 len = *size;
89422+ if (len > sizeof(tmp))
89423+ len = sizeof(tmp);
89424 if (copy_to_user(*buf, tmp, len))
89425 return -EFAULT;
89426 *size -= len;
89427@@ -2048,7 +2108,7 @@ int proc_dointvec(struct ctl_table *table, int write,
89428 static int proc_taint(struct ctl_table *table, int write,
89429 void __user *buffer, size_t *lenp, loff_t *ppos)
89430 {
89431- struct ctl_table t;
89432+ ctl_table_no_const t;
89433 unsigned long tmptaint = get_taint();
89434 int err;
89435
89436@@ -2076,7 +2136,6 @@ static int proc_taint(struct ctl_table *table, int write,
89437 return err;
89438 }
89439
89440-#ifdef CONFIG_PRINTK
89441 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
89442 void __user *buffer, size_t *lenp, loff_t *ppos)
89443 {
89444@@ -2085,7 +2144,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
89445
89446 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
89447 }
89448-#endif
89449
89450 struct do_proc_dointvec_minmax_conv_param {
89451 int *min;
89452@@ -2632,6 +2690,12 @@ int proc_dostring(struct ctl_table *table, int write,
89453 return -ENOSYS;
89454 }
89455
89456+int proc_dostring_modpriv(struct ctl_table *table, int write,
89457+ void __user *buffer, size_t *lenp, loff_t *ppos)
89458+{
89459+ return -ENOSYS;
89460+}
89461+
89462 int proc_dointvec(struct ctl_table *table, int write,
89463 void __user *buffer, size_t *lenp, loff_t *ppos)
89464 {
89465@@ -2688,5 +2752,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
89466 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
89467 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
89468 EXPORT_SYMBOL(proc_dostring);
89469+EXPORT_SYMBOL(proc_dostring_modpriv);
89470 EXPORT_SYMBOL(proc_doulongvec_minmax);
89471 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
89472diff --git a/kernel/taskstats.c b/kernel/taskstats.c
89473index 13d2f7c..c93d0b0 100644
89474--- a/kernel/taskstats.c
89475+++ b/kernel/taskstats.c
89476@@ -28,9 +28,12 @@
89477 #include <linux/fs.h>
89478 #include <linux/file.h>
89479 #include <linux/pid_namespace.h>
89480+#include <linux/grsecurity.h>
89481 #include <net/genetlink.h>
89482 #include <linux/atomic.h>
89483
89484+extern int gr_is_taskstats_denied(int pid);
89485+
89486 /*
89487 * Maximum length of a cpumask that can be specified in
89488 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
89489@@ -576,6 +579,9 @@ err:
89490
89491 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
89492 {
89493+ if (gr_is_taskstats_denied(current->pid))
89494+ return -EACCES;
89495+
89496 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
89497 return cmd_attr_register_cpumask(info);
89498 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
89499diff --git a/kernel/time.c b/kernel/time.c
89500index 7c7964c..2a0d412 100644
89501--- a/kernel/time.c
89502+++ b/kernel/time.c
89503@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
89504 return error;
89505
89506 if (tz) {
89507+ /* we log in do_settimeofday called below, so don't log twice
89508+ */
89509+ if (!tv)
89510+ gr_log_timechange();
89511+
89512 sys_tz = *tz;
89513 update_vsyscall_tz();
89514 if (firsttime) {
89515diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
89516index 88c9c65..7497ebc 100644
89517--- a/kernel/time/alarmtimer.c
89518+++ b/kernel/time/alarmtimer.c
89519@@ -795,7 +795,7 @@ static int __init alarmtimer_init(void)
89520 struct platform_device *pdev;
89521 int error = 0;
89522 int i;
89523- struct k_clock alarm_clock = {
89524+ static struct k_clock alarm_clock = {
89525 .clock_getres = alarm_clock_getres,
89526 .clock_get = alarm_clock_get,
89527 .timer_create = alarm_timer_create,
89528diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
89529index b415457..c26876d 100644
89530--- a/kernel/time/timekeeping.c
89531+++ b/kernel/time/timekeeping.c
89532@@ -15,6 +15,7 @@
89533 #include <linux/init.h>
89534 #include <linux/mm.h>
89535 #include <linux/sched.h>
89536+#include <linux/grsecurity.h>
89537 #include <linux/syscore_ops.h>
89538 #include <linux/clocksource.h>
89539 #include <linux/jiffies.h>
89540@@ -500,6 +501,8 @@ int do_settimeofday(const struct timespec *tv)
89541 if (!timespec_valid_strict(tv))
89542 return -EINVAL;
89543
89544+ gr_log_timechange();
89545+
89546 raw_spin_lock_irqsave(&timekeeper_lock, flags);
89547 write_seqcount_begin(&timekeeper_seq);
89548
89549diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
89550index 61ed862..3b52c65 100644
89551--- a/kernel/time/timer_list.c
89552+++ b/kernel/time/timer_list.c
89553@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
89554
89555 static void print_name_offset(struct seq_file *m, void *sym)
89556 {
89557+#ifdef CONFIG_GRKERNSEC_HIDESYM
89558+ SEQ_printf(m, "<%p>", NULL);
89559+#else
89560 char symname[KSYM_NAME_LEN];
89561
89562 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
89563 SEQ_printf(m, "<%pK>", sym);
89564 else
89565 SEQ_printf(m, "%s", symname);
89566+#endif
89567 }
89568
89569 static void
89570@@ -119,7 +123,11 @@ next_one:
89571 static void
89572 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
89573 {
89574+#ifdef CONFIG_GRKERNSEC_HIDESYM
89575+ SEQ_printf(m, " .base: %p\n", NULL);
89576+#else
89577 SEQ_printf(m, " .base: %pK\n", base);
89578+#endif
89579 SEQ_printf(m, " .index: %d\n",
89580 base->index);
89581 SEQ_printf(m, " .resolution: %Lu nsecs\n",
89582@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
89583 {
89584 struct proc_dir_entry *pe;
89585
89586+#ifdef CONFIG_GRKERNSEC_PROC_ADD
89587+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
89588+#else
89589 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
89590+#endif
89591 if (!pe)
89592 return -ENOMEM;
89593 return 0;
89594diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
89595index 1fb08f2..ca4bb1e 100644
89596--- a/kernel/time/timer_stats.c
89597+++ b/kernel/time/timer_stats.c
89598@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
89599 static unsigned long nr_entries;
89600 static struct entry entries[MAX_ENTRIES];
89601
89602-static atomic_t overflow_count;
89603+static atomic_unchecked_t overflow_count;
89604
89605 /*
89606 * The entries are in a hash-table, for fast lookup:
89607@@ -140,7 +140,7 @@ static void reset_entries(void)
89608 nr_entries = 0;
89609 memset(entries, 0, sizeof(entries));
89610 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
89611- atomic_set(&overflow_count, 0);
89612+ atomic_set_unchecked(&overflow_count, 0);
89613 }
89614
89615 static struct entry *alloc_entry(void)
89616@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
89617 if (likely(entry))
89618 entry->count++;
89619 else
89620- atomic_inc(&overflow_count);
89621+ atomic_inc_unchecked(&overflow_count);
89622
89623 out_unlock:
89624 raw_spin_unlock_irqrestore(lock, flags);
89625@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
89626
89627 static void print_name_offset(struct seq_file *m, unsigned long addr)
89628 {
89629+#ifdef CONFIG_GRKERNSEC_HIDESYM
89630+ seq_printf(m, "<%p>", NULL);
89631+#else
89632 char symname[KSYM_NAME_LEN];
89633
89634 if (lookup_symbol_name(addr, symname) < 0)
89635- seq_printf(m, "<%p>", (void *)addr);
89636+ seq_printf(m, "<%pK>", (void *)addr);
89637 else
89638 seq_printf(m, "%s", symname);
89639+#endif
89640 }
89641
89642 static int tstats_show(struct seq_file *m, void *v)
89643@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
89644
89645 seq_puts(m, "Timer Stats Version: v0.3\n");
89646 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
89647- if (atomic_read(&overflow_count))
89648- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
89649+ if (atomic_read_unchecked(&overflow_count))
89650+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
89651 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
89652
89653 for (i = 0; i < nr_entries; i++) {
89654@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
89655 {
89656 struct proc_dir_entry *pe;
89657
89658+#ifdef CONFIG_GRKERNSEC_PROC_ADD
89659+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
89660+#else
89661 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
89662+#endif
89663 if (!pe)
89664 return -ENOMEM;
89665 return 0;
89666diff --git a/kernel/timer.c b/kernel/timer.c
89667index accfd24..e00f0c0 100644
89668--- a/kernel/timer.c
89669+++ b/kernel/timer.c
89670@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
89671 /*
89672 * This function runs timers and the timer-tq in bottom half context.
89673 */
89674-static void run_timer_softirq(struct softirq_action *h)
89675+static __latent_entropy void run_timer_softirq(void)
89676 {
89677 struct tvec_base *base = __this_cpu_read(tvec_bases);
89678
89679@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
89680 *
89681 * In all cases the return value is guaranteed to be non-negative.
89682 */
89683-signed long __sched schedule_timeout(signed long timeout)
89684+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
89685 {
89686 struct timer_list timer;
89687 unsigned long expire;
89688diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
89689index f785aef..59f1b18 100644
89690--- a/kernel/trace/blktrace.c
89691+++ b/kernel/trace/blktrace.c
89692@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
89693 struct blk_trace *bt = filp->private_data;
89694 char buf[16];
89695
89696- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
89697+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
89698
89699 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
89700 }
89701@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
89702 return 1;
89703
89704 bt = buf->chan->private_data;
89705- atomic_inc(&bt->dropped);
89706+ atomic_inc_unchecked(&bt->dropped);
89707 return 0;
89708 }
89709
89710@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
89711
89712 bt->dir = dir;
89713 bt->dev = dev;
89714- atomic_set(&bt->dropped, 0);
89715+ atomic_set_unchecked(&bt->dropped, 0);
89716 INIT_LIST_HEAD(&bt->running_list);
89717
89718 ret = -EIO;
89719diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
89720index 38463d2..68abe92 100644
89721--- a/kernel/trace/ftrace.c
89722+++ b/kernel/trace/ftrace.c
89723@@ -1978,12 +1978,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
89724 if (unlikely(ftrace_disabled))
89725 return 0;
89726
89727+ ret = ftrace_arch_code_modify_prepare();
89728+ FTRACE_WARN_ON(ret);
89729+ if (ret)
89730+ return 0;
89731+
89732 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
89733+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
89734 if (ret) {
89735 ftrace_bug(ret, ip);
89736- return 0;
89737 }
89738- return 1;
89739+ return ret ? 0 : 1;
89740 }
89741
89742 /*
89743@@ -4190,8 +4195,10 @@ static int ftrace_process_locs(struct module *mod,
89744 if (!count)
89745 return 0;
89746
89747+ pax_open_kernel();
89748 sort(start, count, sizeof(*start),
89749 ftrace_cmp_ips, ftrace_swap_ips);
89750+ pax_close_kernel();
89751
89752 start_pg = ftrace_allocate_pages(count);
89753 if (!start_pg)
89754@@ -4922,8 +4929,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
89755 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
89756
89757 static int ftrace_graph_active;
89758-static struct notifier_block ftrace_suspend_notifier;
89759-
89760 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
89761 {
89762 return 0;
89763@@ -5099,6 +5104,10 @@ static void update_function_graph_func(void)
89764 ftrace_graph_entry = ftrace_graph_entry_test;
89765 }
89766
89767+static struct notifier_block ftrace_suspend_notifier = {
89768+ .notifier_call = ftrace_suspend_notifier_call
89769+};
89770+
89771 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
89772 trace_func_graph_ent_t entryfunc)
89773 {
89774@@ -5112,7 +5121,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
89775 goto out;
89776 }
89777
89778- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
89779 register_pm_notifier(&ftrace_suspend_notifier);
89780
89781 ftrace_graph_active++;
89782diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
89783index 0e337ee..3370631 100644
89784--- a/kernel/trace/ring_buffer.c
89785+++ b/kernel/trace/ring_buffer.c
89786@@ -352,9 +352,9 @@ struct buffer_data_page {
89787 */
89788 struct buffer_page {
89789 struct list_head list; /* list of buffer pages */
89790- local_t write; /* index for next write */
89791+ local_unchecked_t write; /* index for next write */
89792 unsigned read; /* index for next read */
89793- local_t entries; /* entries on this page */
89794+ local_unchecked_t entries; /* entries on this page */
89795 unsigned long real_end; /* real end of data */
89796 struct buffer_data_page *page; /* Actual data page */
89797 };
89798@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
89799 unsigned long last_overrun;
89800 local_t entries_bytes;
89801 local_t entries;
89802- local_t overrun;
89803- local_t commit_overrun;
89804+ local_unchecked_t overrun;
89805+ local_unchecked_t commit_overrun;
89806 local_t dropped_events;
89807 local_t committing;
89808 local_t commits;
89809@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
89810 *
89811 * We add a counter to the write field to denote this.
89812 */
89813- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
89814- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
89815+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
89816+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
89817
89818 /*
89819 * Just make sure we have seen our old_write and synchronize
89820@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
89821 * cmpxchg to only update if an interrupt did not already
89822 * do it for us. If the cmpxchg fails, we don't care.
89823 */
89824- (void)local_cmpxchg(&next_page->write, old_write, val);
89825- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
89826+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
89827+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
89828
89829 /*
89830 * No need to worry about races with clearing out the commit.
89831@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
89832
89833 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
89834 {
89835- return local_read(&bpage->entries) & RB_WRITE_MASK;
89836+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
89837 }
89838
89839 static inline unsigned long rb_page_write(struct buffer_page *bpage)
89840 {
89841- return local_read(&bpage->write) & RB_WRITE_MASK;
89842+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
89843 }
89844
89845 static int
89846@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
89847 * bytes consumed in ring buffer from here.
89848 * Increment overrun to account for the lost events.
89849 */
89850- local_add(page_entries, &cpu_buffer->overrun);
89851+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
89852 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
89853 }
89854
89855@@ -2064,7 +2064,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
89856 * it is our responsibility to update
89857 * the counters.
89858 */
89859- local_add(entries, &cpu_buffer->overrun);
89860+ local_add_unchecked(entries, &cpu_buffer->overrun);
89861 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
89862
89863 /*
89864@@ -2214,7 +2214,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
89865 if (tail == BUF_PAGE_SIZE)
89866 tail_page->real_end = 0;
89867
89868- local_sub(length, &tail_page->write);
89869+ local_sub_unchecked(length, &tail_page->write);
89870 return;
89871 }
89872
89873@@ -2249,7 +2249,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
89874 rb_event_set_padding(event);
89875
89876 /* Set the write back to the previous setting */
89877- local_sub(length, &tail_page->write);
89878+ local_sub_unchecked(length, &tail_page->write);
89879 return;
89880 }
89881
89882@@ -2261,7 +2261,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
89883
89884 /* Set write to end of buffer */
89885 length = (tail + length) - BUF_PAGE_SIZE;
89886- local_sub(length, &tail_page->write);
89887+ local_sub_unchecked(length, &tail_page->write);
89888 }
89889
89890 /*
89891@@ -2287,7 +2287,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
89892 * about it.
89893 */
89894 if (unlikely(next_page == commit_page)) {
89895- local_inc(&cpu_buffer->commit_overrun);
89896+ local_inc_unchecked(&cpu_buffer->commit_overrun);
89897 goto out_reset;
89898 }
89899
89900@@ -2343,7 +2343,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
89901 cpu_buffer->tail_page) &&
89902 (cpu_buffer->commit_page ==
89903 cpu_buffer->reader_page))) {
89904- local_inc(&cpu_buffer->commit_overrun);
89905+ local_inc_unchecked(&cpu_buffer->commit_overrun);
89906 goto out_reset;
89907 }
89908 }
89909@@ -2391,7 +2391,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
89910 length += RB_LEN_TIME_EXTEND;
89911
89912 tail_page = cpu_buffer->tail_page;
89913- write = local_add_return(length, &tail_page->write);
89914+ write = local_add_return_unchecked(length, &tail_page->write);
89915
89916 /* set write to only the index of the write */
89917 write &= RB_WRITE_MASK;
89918@@ -2415,7 +2415,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
89919 kmemcheck_annotate_bitfield(event, bitfield);
89920 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
89921
89922- local_inc(&tail_page->entries);
89923+ local_inc_unchecked(&tail_page->entries);
89924
89925 /*
89926 * If this is the first commit on the page, then update
89927@@ -2448,7 +2448,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
89928
89929 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
89930 unsigned long write_mask =
89931- local_read(&bpage->write) & ~RB_WRITE_MASK;
89932+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
89933 unsigned long event_length = rb_event_length(event);
89934 /*
89935 * This is on the tail page. It is possible that
89936@@ -2458,7 +2458,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
89937 */
89938 old_index += write_mask;
89939 new_index += write_mask;
89940- index = local_cmpxchg(&bpage->write, old_index, new_index);
89941+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
89942 if (index == old_index) {
89943 /* update counters */
89944 local_sub(event_length, &cpu_buffer->entries_bytes);
89945@@ -2850,7 +2850,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
89946
89947 /* Do the likely case first */
89948 if (likely(bpage->page == (void *)addr)) {
89949- local_dec(&bpage->entries);
89950+ local_dec_unchecked(&bpage->entries);
89951 return;
89952 }
89953
89954@@ -2862,7 +2862,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
89955 start = bpage;
89956 do {
89957 if (bpage->page == (void *)addr) {
89958- local_dec(&bpage->entries);
89959+ local_dec_unchecked(&bpage->entries);
89960 return;
89961 }
89962 rb_inc_page(cpu_buffer, &bpage);
89963@@ -3146,7 +3146,7 @@ static inline unsigned long
89964 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
89965 {
89966 return local_read(&cpu_buffer->entries) -
89967- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
89968+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
89969 }
89970
89971 /**
89972@@ -3235,7 +3235,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
89973 return 0;
89974
89975 cpu_buffer = buffer->buffers[cpu];
89976- ret = local_read(&cpu_buffer->overrun);
89977+ ret = local_read_unchecked(&cpu_buffer->overrun);
89978
89979 return ret;
89980 }
89981@@ -3258,7 +3258,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
89982 return 0;
89983
89984 cpu_buffer = buffer->buffers[cpu];
89985- ret = local_read(&cpu_buffer->commit_overrun);
89986+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
89987
89988 return ret;
89989 }
89990@@ -3343,7 +3343,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
89991 /* if you care about this being correct, lock the buffer */
89992 for_each_buffer_cpu(buffer, cpu) {
89993 cpu_buffer = buffer->buffers[cpu];
89994- overruns += local_read(&cpu_buffer->overrun);
89995+ overruns += local_read_unchecked(&cpu_buffer->overrun);
89996 }
89997
89998 return overruns;
89999@@ -3519,8 +3519,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
90000 /*
90001 * Reset the reader page to size zero.
90002 */
90003- local_set(&cpu_buffer->reader_page->write, 0);
90004- local_set(&cpu_buffer->reader_page->entries, 0);
90005+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
90006+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
90007 local_set(&cpu_buffer->reader_page->page->commit, 0);
90008 cpu_buffer->reader_page->real_end = 0;
90009
90010@@ -3554,7 +3554,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
90011 * want to compare with the last_overrun.
90012 */
90013 smp_mb();
90014- overwrite = local_read(&(cpu_buffer->overrun));
90015+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
90016
90017 /*
90018 * Here's the tricky part.
90019@@ -4124,8 +4124,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
90020
90021 cpu_buffer->head_page
90022 = list_entry(cpu_buffer->pages, struct buffer_page, list);
90023- local_set(&cpu_buffer->head_page->write, 0);
90024- local_set(&cpu_buffer->head_page->entries, 0);
90025+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
90026+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
90027 local_set(&cpu_buffer->head_page->page->commit, 0);
90028
90029 cpu_buffer->head_page->read = 0;
90030@@ -4135,14 +4135,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
90031
90032 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
90033 INIT_LIST_HEAD(&cpu_buffer->new_pages);
90034- local_set(&cpu_buffer->reader_page->write, 0);
90035- local_set(&cpu_buffer->reader_page->entries, 0);
90036+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
90037+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
90038 local_set(&cpu_buffer->reader_page->page->commit, 0);
90039 cpu_buffer->reader_page->read = 0;
90040
90041 local_set(&cpu_buffer->entries_bytes, 0);
90042- local_set(&cpu_buffer->overrun, 0);
90043- local_set(&cpu_buffer->commit_overrun, 0);
90044+ local_set_unchecked(&cpu_buffer->overrun, 0);
90045+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
90046 local_set(&cpu_buffer->dropped_events, 0);
90047 local_set(&cpu_buffer->entries, 0);
90048 local_set(&cpu_buffer->committing, 0);
90049@@ -4547,8 +4547,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
90050 rb_init_page(bpage);
90051 bpage = reader->page;
90052 reader->page = *data_page;
90053- local_set(&reader->write, 0);
90054- local_set(&reader->entries, 0);
90055+ local_set_unchecked(&reader->write, 0);
90056+ local_set_unchecked(&reader->entries, 0);
90057 reader->read = 0;
90058 *data_page = bpage;
90059
90060diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
90061index 0a360ce..7bd800e 100644
90062--- a/kernel/trace/trace.c
90063+++ b/kernel/trace/trace.c
90064@@ -3352,7 +3352,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
90065 return 0;
90066 }
90067
90068-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
90069+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
90070 {
90071 /* do nothing if flag is already set */
90072 if (!!(trace_flags & mask) == !!enabled)
90073diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
90074index ea189e0..a5b48c4 100644
90075--- a/kernel/trace/trace.h
90076+++ b/kernel/trace/trace.h
90077@@ -1040,7 +1040,7 @@ extern const char *__stop___tracepoint_str[];
90078 void trace_printk_init_buffers(void);
90079 void trace_printk_start_comm(void);
90080 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
90081-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
90082+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
90083
90084 /*
90085 * Normal trace_printk() and friends allocates special buffers
90086diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
90087index 26dc348..8708ca7 100644
90088--- a/kernel/trace/trace_clock.c
90089+++ b/kernel/trace/trace_clock.c
90090@@ -123,7 +123,7 @@ u64 notrace trace_clock_global(void)
90091 return now;
90092 }
90093
90094-static atomic64_t trace_counter;
90095+static atomic64_unchecked_t trace_counter;
90096
90097 /*
90098 * trace_clock_counter(): simply an atomic counter.
90099@@ -132,5 +132,5 @@ static atomic64_t trace_counter;
90100 */
90101 u64 notrace trace_clock_counter(void)
90102 {
90103- return atomic64_add_return(1, &trace_counter);
90104+ return atomic64_inc_return_unchecked(&trace_counter);
90105 }
90106diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
90107index 2e58196..fdd3d61 100644
90108--- a/kernel/trace/trace_events.c
90109+++ b/kernel/trace/trace_events.c
90110@@ -1681,7 +1681,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
90111 return 0;
90112 }
90113
90114-struct ftrace_module_file_ops;
90115 static void __add_event_to_tracers(struct ftrace_event_call *call);
90116
90117 /* Add an additional event_call dynamically */
90118diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
90119index 0abd9b8..6a663a2 100644
90120--- a/kernel/trace/trace_mmiotrace.c
90121+++ b/kernel/trace/trace_mmiotrace.c
90122@@ -24,7 +24,7 @@ struct header_iter {
90123 static struct trace_array *mmio_trace_array;
90124 static bool overrun_detected;
90125 static unsigned long prev_overruns;
90126-static atomic_t dropped_count;
90127+static atomic_unchecked_t dropped_count;
90128
90129 static void mmio_reset_data(struct trace_array *tr)
90130 {
90131@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
90132
90133 static unsigned long count_overruns(struct trace_iterator *iter)
90134 {
90135- unsigned long cnt = atomic_xchg(&dropped_count, 0);
90136+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
90137 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
90138
90139 if (over > prev_overruns)
90140@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
90141 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
90142 sizeof(*entry), 0, pc);
90143 if (!event) {
90144- atomic_inc(&dropped_count);
90145+ atomic_inc_unchecked(&dropped_count);
90146 return;
90147 }
90148 entry = ring_buffer_event_data(event);
90149@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
90150 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
90151 sizeof(*entry), 0, pc);
90152 if (!event) {
90153- atomic_inc(&dropped_count);
90154+ atomic_inc_unchecked(&dropped_count);
90155 return;
90156 }
90157 entry = ring_buffer_event_data(event);
90158diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
90159index ed32284..884d6c3 100644
90160--- a/kernel/trace/trace_output.c
90161+++ b/kernel/trace/trace_output.c
90162@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
90163
90164 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
90165 if (!IS_ERR(p)) {
90166- p = mangle_path(s->buffer + s->len, p, "\n");
90167+ p = mangle_path(s->buffer + s->len, p, "\n\\");
90168 if (p) {
90169 s->len = p - s->buffer;
90170 return 1;
90171@@ -908,14 +908,16 @@ int register_ftrace_event(struct trace_event *event)
90172 goto out;
90173 }
90174
90175+ pax_open_kernel();
90176 if (event->funcs->trace == NULL)
90177- event->funcs->trace = trace_nop_print;
90178+ *(void **)&event->funcs->trace = trace_nop_print;
90179 if (event->funcs->raw == NULL)
90180- event->funcs->raw = trace_nop_print;
90181+ *(void **)&event->funcs->raw = trace_nop_print;
90182 if (event->funcs->hex == NULL)
90183- event->funcs->hex = trace_nop_print;
90184+ *(void **)&event->funcs->hex = trace_nop_print;
90185 if (event->funcs->binary == NULL)
90186- event->funcs->binary = trace_nop_print;
90187+ *(void **)&event->funcs->binary = trace_nop_print;
90188+ pax_close_kernel();
90189
90190 key = event->type & (EVENT_HASHSIZE - 1);
90191
90192diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
90193index b20428c..4845a10 100644
90194--- a/kernel/trace/trace_stack.c
90195+++ b/kernel/trace/trace_stack.c
90196@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
90197 return;
90198
90199 /* we do not handle interrupt stacks yet */
90200- if (!object_is_on_stack(stack))
90201+ if (!object_starts_on_stack(stack))
90202 return;
90203
90204 local_irq_save(flags);
90205diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
90206index 240fb62..583473e 100644
90207--- a/kernel/user_namespace.c
90208+++ b/kernel/user_namespace.c
90209@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
90210 !kgid_has_mapping(parent_ns, group))
90211 return -EPERM;
90212
90213+#ifdef CONFIG_GRKERNSEC
90214+ /*
90215+ * This doesn't really inspire confidence:
90216+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
90217+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
90218+ * Increases kernel attack surface in areas developers
90219+ * previously cared little about ("low importance due
90220+ * to requiring "root" capability")
90221+ * To be removed when this code receives *proper* review
90222+ */
90223+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
90224+ !capable(CAP_SETGID))
90225+ return -EPERM;
90226+#endif
90227+
90228 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
90229 if (!ns)
90230 return -ENOMEM;
90231@@ -866,7 +881,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
90232 if (atomic_read(&current->mm->mm_users) > 1)
90233 return -EINVAL;
90234
90235- if (current->fs->users != 1)
90236+ if (atomic_read(&current->fs->users) != 1)
90237 return -EINVAL;
90238
90239 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
90240diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
90241index 4f69f9a..7c6f8f8 100644
90242--- a/kernel/utsname_sysctl.c
90243+++ b/kernel/utsname_sysctl.c
90244@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
90245 static int proc_do_uts_string(ctl_table *table, int write,
90246 void __user *buffer, size_t *lenp, loff_t *ppos)
90247 {
90248- struct ctl_table uts_table;
90249+ ctl_table_no_const uts_table;
90250 int r;
90251 memcpy(&uts_table, table, sizeof(uts_table));
90252 uts_table.data = get_uts(table, write);
90253diff --git a/kernel/watchdog.c b/kernel/watchdog.c
90254index 4431610..4265616 100644
90255--- a/kernel/watchdog.c
90256+++ b/kernel/watchdog.c
90257@@ -475,7 +475,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
90258 static void watchdog_nmi_disable(unsigned int cpu) { return; }
90259 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
90260
90261-static struct smp_hotplug_thread watchdog_threads = {
90262+static struct smp_hotplug_thread watchdog_threads __read_only = {
90263 .store = &softlockup_watchdog,
90264 .thread_should_run = watchdog_should_run,
90265 .thread_fn = watchdog,
90266diff --git a/kernel/workqueue.c b/kernel/workqueue.c
90267index a8381cf..1ce1331 100644
90268--- a/kernel/workqueue.c
90269+++ b/kernel/workqueue.c
90270@@ -4678,7 +4678,7 @@ static void rebind_workers(struct worker_pool *pool)
90271 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
90272 worker_flags |= WORKER_REBOUND;
90273 worker_flags &= ~WORKER_UNBOUND;
90274- ACCESS_ONCE(worker->flags) = worker_flags;
90275+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
90276 }
90277
90278 spin_unlock_irq(&pool->lock);
90279diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
90280index db25707..8b16430 100644
90281--- a/lib/Kconfig.debug
90282+++ b/lib/Kconfig.debug
90283@@ -845,7 +845,7 @@ config DEBUG_MUTEXES
90284
90285 config DEBUG_WW_MUTEX_SLOWPATH
90286 bool "Wait/wound mutex debugging: Slowpath testing"
90287- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
90288+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
90289 select DEBUG_LOCK_ALLOC
90290 select DEBUG_SPINLOCK
90291 select DEBUG_MUTEXES
90292@@ -858,7 +858,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
90293
90294 config DEBUG_LOCK_ALLOC
90295 bool "Lock debugging: detect incorrect freeing of live locks"
90296- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
90297+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
90298 select DEBUG_SPINLOCK
90299 select DEBUG_MUTEXES
90300 select LOCKDEP
90301@@ -872,7 +872,7 @@ config DEBUG_LOCK_ALLOC
90302
90303 config PROVE_LOCKING
90304 bool "Lock debugging: prove locking correctness"
90305- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
90306+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
90307 select LOCKDEP
90308 select DEBUG_SPINLOCK
90309 select DEBUG_MUTEXES
90310@@ -923,7 +923,7 @@ config LOCKDEP
90311
90312 config LOCK_STAT
90313 bool "Lock usage statistics"
90314- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
90315+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
90316 select LOCKDEP
90317 select DEBUG_SPINLOCK
90318 select DEBUG_MUTEXES
90319@@ -1385,6 +1385,7 @@ config LATENCYTOP
90320 depends on DEBUG_KERNEL
90321 depends on STACKTRACE_SUPPORT
90322 depends on PROC_FS
90323+ depends on !GRKERNSEC_HIDESYM
90324 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
90325 select KALLSYMS
90326 select KALLSYMS_ALL
90327@@ -1401,7 +1402,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
90328 config DEBUG_STRICT_USER_COPY_CHECKS
90329 bool "Strict user copy size checks"
90330 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
90331- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
90332+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
90333 help
90334 Enabling this option turns a certain set of sanity checks for user
90335 copy operations into compile time failures.
90336@@ -1520,7 +1521,7 @@ endmenu # runtime tests
90337
90338 config PROVIDE_OHCI1394_DMA_INIT
90339 bool "Remote debugging over FireWire early on boot"
90340- depends on PCI && X86
90341+ depends on PCI && X86 && !GRKERNSEC
90342 help
90343 If you want to debug problems which hang or crash the kernel early
90344 on boot and the crashing machine has a FireWire port, you can use
90345@@ -1549,7 +1550,7 @@ config PROVIDE_OHCI1394_DMA_INIT
90346
90347 config FIREWIRE_OHCI_REMOTE_DMA
90348 bool "Remote debugging over FireWire with firewire-ohci"
90349- depends on FIREWIRE_OHCI
90350+ depends on FIREWIRE_OHCI && !GRKERNSEC
90351 help
90352 This option lets you use the FireWire bus for remote debugging
90353 with help of the firewire-ohci driver. It enables unfiltered
90354diff --git a/lib/Makefile b/lib/Makefile
90355index 04944e9..f43eabe 100644
90356--- a/lib/Makefile
90357+++ b/lib/Makefile
90358@@ -50,7 +50,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
90359 obj-$(CONFIG_BTREE) += btree.o
90360 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
90361 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
90362-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
90363+obj-y += list_debug.o
90364 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
90365
90366 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
90367diff --git a/lib/bitmap.c b/lib/bitmap.c
90368index 06f7e4f..f3cf2b0 100644
90369--- a/lib/bitmap.c
90370+++ b/lib/bitmap.c
90371@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
90372 {
90373 int c, old_c, totaldigits, ndigits, nchunks, nbits;
90374 u32 chunk;
90375- const char __user __force *ubuf = (const char __user __force *)buf;
90376+ const char __user *ubuf = (const char __force_user *)buf;
90377
90378 bitmap_zero(maskp, nmaskbits);
90379
90380@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
90381 {
90382 if (!access_ok(VERIFY_READ, ubuf, ulen))
90383 return -EFAULT;
90384- return __bitmap_parse((const char __force *)ubuf,
90385+ return __bitmap_parse((const char __force_kernel *)ubuf,
90386 ulen, 1, maskp, nmaskbits);
90387
90388 }
90389@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
90390 {
90391 unsigned a, b;
90392 int c, old_c, totaldigits;
90393- const char __user __force *ubuf = (const char __user __force *)buf;
90394+ const char __user *ubuf = (const char __force_user *)buf;
90395 int exp_digit, in_range;
90396
90397 totaldigits = c = 0;
90398@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
90399 {
90400 if (!access_ok(VERIFY_READ, ubuf, ulen))
90401 return -EFAULT;
90402- return __bitmap_parselist((const char __force *)ubuf,
90403+ return __bitmap_parselist((const char __force_kernel *)ubuf,
90404 ulen, 1, maskp, nmaskbits);
90405 }
90406 EXPORT_SYMBOL(bitmap_parselist_user);
90407diff --git a/lib/bug.c b/lib/bug.c
90408index 1686034..a9c00c8 100644
90409--- a/lib/bug.c
90410+++ b/lib/bug.c
90411@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
90412 return BUG_TRAP_TYPE_NONE;
90413
90414 bug = find_bug(bugaddr);
90415+ if (!bug)
90416+ return BUG_TRAP_TYPE_NONE;
90417
90418 file = NULL;
90419 line = 0;
90420diff --git a/lib/debugobjects.c b/lib/debugobjects.c
90421index e0731c3..ad66444 100644
90422--- a/lib/debugobjects.c
90423+++ b/lib/debugobjects.c
90424@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
90425 if (limit > 4)
90426 return;
90427
90428- is_on_stack = object_is_on_stack(addr);
90429+ is_on_stack = object_starts_on_stack(addr);
90430 if (is_on_stack == onstack)
90431 return;
90432
90433diff --git a/lib/devres.c b/lib/devres.c
90434index 8235331..5881053 100644
90435--- a/lib/devres.c
90436+++ b/lib/devres.c
90437@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
90438 void devm_iounmap(struct device *dev, void __iomem *addr)
90439 {
90440 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
90441- (void *)addr));
90442+ (void __force *)addr));
90443 iounmap(addr);
90444 }
90445 EXPORT_SYMBOL(devm_iounmap);
90446@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
90447 {
90448 ioport_unmap(addr);
90449 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
90450- devm_ioport_map_match, (void *)addr));
90451+ devm_ioport_map_match, (void __force *)addr));
90452 }
90453 EXPORT_SYMBOL(devm_ioport_unmap);
90454 #endif /* CONFIG_HAS_IOPORT */
90455diff --git a/lib/div64.c b/lib/div64.c
90456index 4382ad7..08aa558 100644
90457--- a/lib/div64.c
90458+++ b/lib/div64.c
90459@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
90460 EXPORT_SYMBOL(__div64_32);
90461
90462 #ifndef div_s64_rem
90463-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
90464+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
90465 {
90466 u64 quotient;
90467
90468@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
90469 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
90470 */
90471 #ifndef div64_u64
90472-u64 div64_u64(u64 dividend, u64 divisor)
90473+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
90474 {
90475 u32 high = divisor >> 32;
90476 u64 quot;
90477diff --git a/lib/dma-debug.c b/lib/dma-debug.c
90478index d87a17a..ac0d79a 100644
90479--- a/lib/dma-debug.c
90480+++ b/lib/dma-debug.c
90481@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
90482
90483 void dma_debug_add_bus(struct bus_type *bus)
90484 {
90485- struct notifier_block *nb;
90486+ notifier_block_no_const *nb;
90487
90488 if (global_disable)
90489 return;
90490@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
90491
90492 static void check_for_stack(struct device *dev, void *addr)
90493 {
90494- if (object_is_on_stack(addr))
90495+ if (object_starts_on_stack(addr))
90496 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
90497 "stack [addr=%p]\n", addr);
90498 }
90499diff --git a/lib/inflate.c b/lib/inflate.c
90500index 013a761..c28f3fc 100644
90501--- a/lib/inflate.c
90502+++ b/lib/inflate.c
90503@@ -269,7 +269,7 @@ static void free(void *where)
90504 malloc_ptr = free_mem_ptr;
90505 }
90506 #else
90507-#define malloc(a) kmalloc(a, GFP_KERNEL)
90508+#define malloc(a) kmalloc((a), GFP_KERNEL)
90509 #define free(a) kfree(a)
90510 #endif
90511
90512diff --git a/lib/ioremap.c b/lib/ioremap.c
90513index 0c9216c..863bd89 100644
90514--- a/lib/ioremap.c
90515+++ b/lib/ioremap.c
90516@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
90517 unsigned long next;
90518
90519 phys_addr -= addr;
90520- pmd = pmd_alloc(&init_mm, pud, addr);
90521+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
90522 if (!pmd)
90523 return -ENOMEM;
90524 do {
90525@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
90526 unsigned long next;
90527
90528 phys_addr -= addr;
90529- pud = pud_alloc(&init_mm, pgd, addr);
90530+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
90531 if (!pud)
90532 return -ENOMEM;
90533 do {
90534diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
90535index bd2bea9..6b3c95e 100644
90536--- a/lib/is_single_threaded.c
90537+++ b/lib/is_single_threaded.c
90538@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
90539 struct task_struct *p, *t;
90540 bool ret;
90541
90542+ if (!mm)
90543+ return true;
90544+
90545 if (atomic_read(&task->signal->live) != 1)
90546 return false;
90547
90548diff --git a/lib/kobject.c b/lib/kobject.c
90549index 5b4b888..c2950f7 100644
90550--- a/lib/kobject.c
90551+++ b/lib/kobject.c
90552@@ -957,9 +957,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
90553
90554
90555 static DEFINE_SPINLOCK(kobj_ns_type_lock);
90556-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
90557+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
90558
90559-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
90560+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
90561 {
90562 enum kobj_ns_type type = ops->type;
90563 int error;
90564diff --git a/lib/list_debug.c b/lib/list_debug.c
90565index c24c2f7..f0296f4 100644
90566--- a/lib/list_debug.c
90567+++ b/lib/list_debug.c
90568@@ -11,7 +11,9 @@
90569 #include <linux/bug.h>
90570 #include <linux/kernel.h>
90571 #include <linux/rculist.h>
90572+#include <linux/mm.h>
90573
90574+#ifdef CONFIG_DEBUG_LIST
90575 /*
90576 * Insert a new entry between two known consecutive entries.
90577 *
90578@@ -19,21 +21,40 @@
90579 * the prev/next entries already!
90580 */
90581
90582+static bool __list_add_debug(struct list_head *new,
90583+ struct list_head *prev,
90584+ struct list_head *next)
90585+{
90586+ if (unlikely(next->prev != prev)) {
90587+ printk(KERN_ERR "list_add corruption. next->prev should be "
90588+ "prev (%p), but was %p. (next=%p).\n",
90589+ prev, next->prev, next);
90590+ BUG();
90591+ return false;
90592+ }
90593+ if (unlikely(prev->next != next)) {
90594+ printk(KERN_ERR "list_add corruption. prev->next should be "
90595+ "next (%p), but was %p. (prev=%p).\n",
90596+ next, prev->next, prev);
90597+ BUG();
90598+ return false;
90599+ }
90600+ if (unlikely(new == prev || new == next)) {
90601+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
90602+ new, prev, next);
90603+ BUG();
90604+ return false;
90605+ }
90606+ return true;
90607+}
90608+
90609 void __list_add(struct list_head *new,
90610- struct list_head *prev,
90611- struct list_head *next)
90612+ struct list_head *prev,
90613+ struct list_head *next)
90614 {
90615- WARN(next->prev != prev,
90616- "list_add corruption. next->prev should be "
90617- "prev (%p), but was %p. (next=%p).\n",
90618- prev, next->prev, next);
90619- WARN(prev->next != next,
90620- "list_add corruption. prev->next should be "
90621- "next (%p), but was %p. (prev=%p).\n",
90622- next, prev->next, prev);
90623- WARN(new == prev || new == next,
90624- "list_add double add: new=%p, prev=%p, next=%p.\n",
90625- new, prev, next);
90626+ if (!__list_add_debug(new, prev, next))
90627+ return;
90628+
90629 next->prev = new;
90630 new->next = next;
90631 new->prev = prev;
90632@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
90633 }
90634 EXPORT_SYMBOL(__list_add);
90635
90636-void __list_del_entry(struct list_head *entry)
90637+static bool __list_del_entry_debug(struct list_head *entry)
90638 {
90639 struct list_head *prev, *next;
90640
90641 prev = entry->prev;
90642 next = entry->next;
90643
90644- if (WARN(next == LIST_POISON1,
90645- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
90646- entry, LIST_POISON1) ||
90647- WARN(prev == LIST_POISON2,
90648- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
90649- entry, LIST_POISON2) ||
90650- WARN(prev->next != entry,
90651- "list_del corruption. prev->next should be %p, "
90652- "but was %p\n", entry, prev->next) ||
90653- WARN(next->prev != entry,
90654- "list_del corruption. next->prev should be %p, "
90655- "but was %p\n", entry, next->prev))
90656+ if (unlikely(next == LIST_POISON1)) {
90657+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
90658+ entry, LIST_POISON1);
90659+ BUG();
90660+ return false;
90661+ }
90662+ if (unlikely(prev == LIST_POISON2)) {
90663+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
90664+ entry, LIST_POISON2);
90665+ BUG();
90666+ return false;
90667+ }
90668+ if (unlikely(entry->prev->next != entry)) {
90669+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
90670+ "but was %p\n", entry, prev->next);
90671+ BUG();
90672+ return false;
90673+ }
90674+ if (unlikely(entry->next->prev != entry)) {
90675+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
90676+ "but was %p\n", entry, next->prev);
90677+ BUG();
90678+ return false;
90679+ }
90680+ return true;
90681+}
90682+
90683+void __list_del_entry(struct list_head *entry)
90684+{
90685+ if (!__list_del_entry_debug(entry))
90686 return;
90687
90688- __list_del(prev, next);
90689+ __list_del(entry->prev, entry->next);
90690 }
90691 EXPORT_SYMBOL(__list_del_entry);
90692
90693@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
90694 void __list_add_rcu(struct list_head *new,
90695 struct list_head *prev, struct list_head *next)
90696 {
90697- WARN(next->prev != prev,
90698- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
90699- prev, next->prev, next);
90700- WARN(prev->next != next,
90701- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
90702- next, prev->next, prev);
90703+ if (!__list_add_debug(new, prev, next))
90704+ return;
90705+
90706 new->next = next;
90707 new->prev = prev;
90708 rcu_assign_pointer(list_next_rcu(prev), new);
90709 next->prev = new;
90710 }
90711 EXPORT_SYMBOL(__list_add_rcu);
90712+#endif
90713+
90714+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
90715+{
90716+#ifdef CONFIG_DEBUG_LIST
90717+ if (!__list_add_debug(new, prev, next))
90718+ return;
90719+#endif
90720+
90721+ pax_open_kernel();
90722+ next->prev = new;
90723+ new->next = next;
90724+ new->prev = prev;
90725+ prev->next = new;
90726+ pax_close_kernel();
90727+}
90728+EXPORT_SYMBOL(__pax_list_add);
90729+
90730+void pax_list_del(struct list_head *entry)
90731+{
90732+#ifdef CONFIG_DEBUG_LIST
90733+ if (!__list_del_entry_debug(entry))
90734+ return;
90735+#endif
90736+
90737+ pax_open_kernel();
90738+ __list_del(entry->prev, entry->next);
90739+ entry->next = LIST_POISON1;
90740+ entry->prev = LIST_POISON2;
90741+ pax_close_kernel();
90742+}
90743+EXPORT_SYMBOL(pax_list_del);
90744+
90745+void pax_list_del_init(struct list_head *entry)
90746+{
90747+ pax_open_kernel();
90748+ __list_del(entry->prev, entry->next);
90749+ INIT_LIST_HEAD(entry);
90750+ pax_close_kernel();
90751+}
90752+EXPORT_SYMBOL(pax_list_del_init);
90753+
90754+void __pax_list_add_rcu(struct list_head *new,
90755+ struct list_head *prev, struct list_head *next)
90756+{
90757+#ifdef CONFIG_DEBUG_LIST
90758+ if (!__list_add_debug(new, prev, next))
90759+ return;
90760+#endif
90761+
90762+ pax_open_kernel();
90763+ new->next = next;
90764+ new->prev = prev;
90765+ rcu_assign_pointer(list_next_rcu(prev), new);
90766+ next->prev = new;
90767+ pax_close_kernel();
90768+}
90769+EXPORT_SYMBOL(__pax_list_add_rcu);
90770+
90771+void pax_list_del_rcu(struct list_head *entry)
90772+{
90773+#ifdef CONFIG_DEBUG_LIST
90774+ if (!__list_del_entry_debug(entry))
90775+ return;
90776+#endif
90777+
90778+ pax_open_kernel();
90779+ __list_del(entry->prev, entry->next);
90780+ entry->next = LIST_POISON1;
90781+ entry->prev = LIST_POISON2;
90782+ pax_close_kernel();
90783+}
90784+EXPORT_SYMBOL(pax_list_del_rcu);
90785diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
90786index 1a53d49..ace934c 100644
90787--- a/lib/percpu-refcount.c
90788+++ b/lib/percpu-refcount.c
90789@@ -29,7 +29,7 @@
90790 * can't hit 0 before we've added up all the percpu refs.
90791 */
90792
90793-#define PCPU_COUNT_BIAS (1U << 31)
90794+#define PCPU_COUNT_BIAS (1U << 30)
90795
90796 /**
90797 * percpu_ref_init - initialize a percpu refcount
90798diff --git a/lib/radix-tree.c b/lib/radix-tree.c
90799index 7811ed3..f80ca19 100644
90800--- a/lib/radix-tree.c
90801+++ b/lib/radix-tree.c
90802@@ -93,7 +93,7 @@ struct radix_tree_preload {
90803 int nr;
90804 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
90805 };
90806-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
90807+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
90808
90809 static inline void *ptr_to_indirect(void *ptr)
90810 {
90811diff --git a/lib/random32.c b/lib/random32.c
90812index 1e5b2df..fb616c7 100644
90813--- a/lib/random32.c
90814+++ b/lib/random32.c
90815@@ -44,7 +44,7 @@
90816 static void __init prandom_state_selftest(void);
90817 #endif
90818
90819-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
90820+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
90821
90822 /**
90823 * prandom_u32_state - seeded pseudo-random number generator.
90824diff --git a/lib/rbtree.c b/lib/rbtree.c
90825index 65f4eff..2cfa167 100644
90826--- a/lib/rbtree.c
90827+++ b/lib/rbtree.c
90828@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
90829 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
90830
90831 static const struct rb_augment_callbacks dummy_callbacks = {
90832- dummy_propagate, dummy_copy, dummy_rotate
90833+ .propagate = dummy_propagate,
90834+ .copy = dummy_copy,
90835+ .rotate = dummy_rotate
90836 };
90837
90838 void rb_insert_color(struct rb_node *node, struct rb_root *root)
90839diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
90840index bb2b201..46abaf9 100644
90841--- a/lib/strncpy_from_user.c
90842+++ b/lib/strncpy_from_user.c
90843@@ -21,7 +21,7 @@
90844 */
90845 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
90846 {
90847- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
90848+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
90849 long res = 0;
90850
90851 /*
90852diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
90853index a28df52..3d55877 100644
90854--- a/lib/strnlen_user.c
90855+++ b/lib/strnlen_user.c
90856@@ -26,7 +26,7 @@
90857 */
90858 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
90859 {
90860- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
90861+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
90862 long align, res = 0;
90863 unsigned long c;
90864
90865diff --git a/lib/swiotlb.c b/lib/swiotlb.c
90866index e4399fa..5e8b214 100644
90867--- a/lib/swiotlb.c
90868+++ b/lib/swiotlb.c
90869@@ -668,7 +668,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
90870
90871 void
90872 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
90873- dma_addr_t dev_addr)
90874+ dma_addr_t dev_addr, struct dma_attrs *attrs)
90875 {
90876 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
90877
90878diff --git a/lib/usercopy.c b/lib/usercopy.c
90879index 4f5b1dd..7cab418 100644
90880--- a/lib/usercopy.c
90881+++ b/lib/usercopy.c
90882@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
90883 WARN(1, "Buffer overflow detected!\n");
90884 }
90885 EXPORT_SYMBOL(copy_from_user_overflow);
90886+
90887+void copy_to_user_overflow(void)
90888+{
90889+ WARN(1, "Buffer overflow detected!\n");
90890+}
90891+EXPORT_SYMBOL(copy_to_user_overflow);
90892diff --git a/lib/vsprintf.c b/lib/vsprintf.c
90893index 10909c5..653e1b8 100644
90894--- a/lib/vsprintf.c
90895+++ b/lib/vsprintf.c
90896@@ -16,6 +16,9 @@
90897 * - scnprintf and vscnprintf
90898 */
90899
90900+#ifdef CONFIG_GRKERNSEC_HIDESYM
90901+#define __INCLUDED_BY_HIDESYM 1
90902+#endif
90903 #include <stdarg.h>
90904 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
90905 #include <linux/types.h>
90906@@ -1155,7 +1158,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
90907 return number(buf, end, *(const netdev_features_t *)addr, spec);
90908 }
90909
90910+#ifdef CONFIG_GRKERNSEC_HIDESYM
90911+int kptr_restrict __read_mostly = 2;
90912+#else
90913 int kptr_restrict __read_mostly;
90914+#endif
90915
90916 /*
90917 * Show a '%p' thing. A kernel extension is that the '%p' is followed
90918@@ -1168,6 +1175,7 @@ int kptr_restrict __read_mostly;
90919 * - 'f' For simple symbolic function names without offset
90920 * - 'S' For symbolic direct pointers with offset
90921 * - 's' For symbolic direct pointers without offset
90922+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
90923 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
90924 * - 'B' For backtraced symbolic direct pointers with offset
90925 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
90926@@ -1234,12 +1242,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
90927
90928 if (!ptr && *fmt != 'K') {
90929 /*
90930- * Print (null) with the same width as a pointer so it makes
90931+ * Print (nil) with the same width as a pointer so it makes
90932 * tabular output look nice.
90933 */
90934 if (spec.field_width == -1)
90935 spec.field_width = default_width;
90936- return string(buf, end, "(null)", spec);
90937+ return string(buf, end, "(nil)", spec);
90938 }
90939
90940 switch (*fmt) {
90941@@ -1249,6 +1257,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
90942 /* Fallthrough */
90943 case 'S':
90944 case 's':
90945+#ifdef CONFIG_GRKERNSEC_HIDESYM
90946+ break;
90947+#else
90948+ return symbol_string(buf, end, ptr, spec, fmt);
90949+#endif
90950+ case 'A':
90951 case 'B':
90952 return symbol_string(buf, end, ptr, spec, fmt);
90953 case 'R':
90954@@ -1304,6 +1318,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
90955 va_end(va);
90956 return buf;
90957 }
90958+ case 'P':
90959+ break;
90960 case 'K':
90961 /*
90962 * %pK cannot be used in IRQ context because its test
90963@@ -1365,6 +1381,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
90964 ((const struct file *)ptr)->f_path.dentry,
90965 spec, fmt);
90966 }
90967+
90968+#ifdef CONFIG_GRKERNSEC_HIDESYM
90969+ /* 'P' = approved pointers to copy to userland,
90970+ as in the /proc/kallsyms case, as we make it display nothing
90971+ for non-root users, and the real contents for root users
90972+ Also ignore 'K' pointers, since we force their NULLing for non-root users
90973+ above
90974+ */
90975+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
90976+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
90977+ dump_stack();
90978+ ptr = NULL;
90979+ }
90980+#endif
90981+
90982 spec.flags |= SMALL;
90983 if (spec.field_width == -1) {
90984 spec.field_width = default_width;
90985@@ -2086,11 +2117,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
90986 typeof(type) value; \
90987 if (sizeof(type) == 8) { \
90988 args = PTR_ALIGN(args, sizeof(u32)); \
90989- *(u32 *)&value = *(u32 *)args; \
90990- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
90991+ *(u32 *)&value = *(const u32 *)args; \
90992+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
90993 } else { \
90994 args = PTR_ALIGN(args, sizeof(type)); \
90995- value = *(typeof(type) *)args; \
90996+ value = *(const typeof(type) *)args; \
90997 } \
90998 args += sizeof(type); \
90999 value; \
91000@@ -2153,7 +2184,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
91001 case FORMAT_TYPE_STR: {
91002 const char *str_arg = args;
91003 args += strlen(str_arg) + 1;
91004- str = string(str, end, (char *)str_arg, spec);
91005+ str = string(str, end, str_arg, spec);
91006 break;
91007 }
91008
91009diff --git a/localversion-grsec b/localversion-grsec
91010new file mode 100644
91011index 0000000..7cd6065
91012--- /dev/null
91013+++ b/localversion-grsec
91014@@ -0,0 +1 @@
91015+-grsec
91016diff --git a/mm/Kconfig b/mm/Kconfig
91017index 723bbe0..ea624b1 100644
91018--- a/mm/Kconfig
91019+++ b/mm/Kconfig
91020@@ -326,10 +326,11 @@ config KSM
91021 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
91022
91023 config DEFAULT_MMAP_MIN_ADDR
91024- int "Low address space to protect from user allocation"
91025+ int "Low address space to protect from user allocation"
91026 depends on MMU
91027- default 4096
91028- help
91029+ default 32768 if ALPHA || ARM || PARISC || SPARC32
91030+ default 65536
91031+ help
91032 This is the portion of low virtual memory which should be protected
91033 from userspace allocation. Keeping a user from writing to low pages
91034 can help reduce the impact of kernel NULL pointer bugs.
91035@@ -360,7 +361,7 @@ config MEMORY_FAILURE
91036
91037 config HWPOISON_INJECT
91038 tristate "HWPoison pages injector"
91039- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
91040+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
91041 select PROC_PAGE_MONITOR
91042
91043 config NOMMU_INITIAL_TRIM_EXCESS
91044diff --git a/mm/backing-dev.c b/mm/backing-dev.c
91045index ce682f7..1fb54f9 100644
91046--- a/mm/backing-dev.c
91047+++ b/mm/backing-dev.c
91048@@ -12,7 +12,7 @@
91049 #include <linux/device.h>
91050 #include <trace/events/writeback.h>
91051
91052-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
91053+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
91054
91055 struct backing_dev_info default_backing_dev_info = {
91056 .name = "default",
91057@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
91058 return err;
91059
91060 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
91061- atomic_long_inc_return(&bdi_seq));
91062+ atomic_long_inc_return_unchecked(&bdi_seq));
91063 if (err) {
91064 bdi_destroy(bdi);
91065 return err;
91066diff --git a/mm/filemap.c b/mm/filemap.c
91067index b7749a9..50d1123 100644
91068--- a/mm/filemap.c
91069+++ b/mm/filemap.c
91070@@ -1768,7 +1768,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
91071 struct address_space *mapping = file->f_mapping;
91072
91073 if (!mapping->a_ops->readpage)
91074- return -ENOEXEC;
91075+ return -ENODEV;
91076 file_accessed(file);
91077 vma->vm_ops = &generic_file_vm_ops;
91078 return 0;
91079@@ -1950,7 +1950,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
91080
91081 while (bytes) {
91082 char __user *buf = iov->iov_base + base;
91083- int copy = min(bytes, iov->iov_len - base);
91084+ size_t copy = min(bytes, iov->iov_len - base);
91085
91086 base = 0;
91087 left = __copy_from_user_inatomic(vaddr, buf, copy);
91088@@ -1979,7 +1979,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
91089 BUG_ON(!in_atomic());
91090 kaddr = kmap_atomic(page);
91091 if (likely(i->nr_segs == 1)) {
91092- int left;
91093+ size_t left;
91094 char __user *buf = i->iov->iov_base + i->iov_offset;
91095 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
91096 copied = bytes - left;
91097@@ -2007,7 +2007,7 @@ size_t iov_iter_copy_from_user(struct page *page,
91098
91099 kaddr = kmap(page);
91100 if (likely(i->nr_segs == 1)) {
91101- int left;
91102+ size_t left;
91103 char __user *buf = i->iov->iov_base + i->iov_offset;
91104 left = __copy_from_user(kaddr + offset, buf, bytes);
91105 copied = bytes - left;
91106@@ -2037,7 +2037,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
91107 * zero-length segments (without overruning the iovec).
91108 */
91109 while (bytes || unlikely(i->count && !iov->iov_len)) {
91110- int copy;
91111+ size_t copy;
91112
91113 copy = min(bytes, iov->iov_len - base);
91114 BUG_ON(!i->count || i->count < copy);
91115@@ -2108,6 +2108,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
91116 *pos = i_size_read(inode);
91117
91118 if (limit != RLIM_INFINITY) {
91119+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
91120 if (*pos >= limit) {
91121 send_sig(SIGXFSZ, current, 0);
91122 return -EFBIG;
91123diff --git a/mm/fremap.c b/mm/fremap.c
91124index bbc4d66..117b798 100644
91125--- a/mm/fremap.c
91126+++ b/mm/fremap.c
91127@@ -163,6 +163,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
91128 retry:
91129 vma = find_vma(mm, start);
91130
91131+#ifdef CONFIG_PAX_SEGMEXEC
91132+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
91133+ goto out;
91134+#endif
91135+
91136 /*
91137 * Make sure the vma is shared, that it supports prefaulting,
91138 * and that the remapped range is valid and fully within
91139diff --git a/mm/highmem.c b/mm/highmem.c
91140index b32b70c..e512eb0 100644
91141--- a/mm/highmem.c
91142+++ b/mm/highmem.c
91143@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
91144 * So no dangers, even with speculative execution.
91145 */
91146 page = pte_page(pkmap_page_table[i]);
91147+ pax_open_kernel();
91148 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
91149-
91150+ pax_close_kernel();
91151 set_page_address(page, NULL);
91152 need_flush = 1;
91153 }
91154@@ -198,9 +199,11 @@ start:
91155 }
91156 }
91157 vaddr = PKMAP_ADDR(last_pkmap_nr);
91158+
91159+ pax_open_kernel();
91160 set_pte_at(&init_mm, vaddr,
91161 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
91162-
91163+ pax_close_kernel();
91164 pkmap_count[last_pkmap_nr] = 1;
91165 set_page_address(page, (void *)vaddr);
91166
91167diff --git a/mm/hugetlb.c b/mm/hugetlb.c
91168index dee6cf4..52b94f7 100644
91169--- a/mm/hugetlb.c
91170+++ b/mm/hugetlb.c
91171@@ -2077,15 +2077,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
91172 struct hstate *h = &default_hstate;
91173 unsigned long tmp;
91174 int ret;
91175+ ctl_table_no_const hugetlb_table;
91176
91177 tmp = h->max_huge_pages;
91178
91179 if (write && h->order >= MAX_ORDER)
91180 return -EINVAL;
91181
91182- table->data = &tmp;
91183- table->maxlen = sizeof(unsigned long);
91184- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
91185+ hugetlb_table = *table;
91186+ hugetlb_table.data = &tmp;
91187+ hugetlb_table.maxlen = sizeof(unsigned long);
91188+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
91189 if (ret)
91190 goto out;
91191
91192@@ -2130,15 +2132,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
91193 struct hstate *h = &default_hstate;
91194 unsigned long tmp;
91195 int ret;
91196+ ctl_table_no_const hugetlb_table;
91197
91198 tmp = h->nr_overcommit_huge_pages;
91199
91200 if (write && h->order >= MAX_ORDER)
91201 return -EINVAL;
91202
91203- table->data = &tmp;
91204- table->maxlen = sizeof(unsigned long);
91205- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
91206+ hugetlb_table = *table;
91207+ hugetlb_table.data = &tmp;
91208+ hugetlb_table.maxlen = sizeof(unsigned long);
91209+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
91210 if (ret)
91211 goto out;
91212
91213@@ -2596,6 +2600,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
91214 return 1;
91215 }
91216
91217+#ifdef CONFIG_PAX_SEGMEXEC
91218+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
91219+{
91220+ struct mm_struct *mm = vma->vm_mm;
91221+ struct vm_area_struct *vma_m;
91222+ unsigned long address_m;
91223+ pte_t *ptep_m;
91224+
91225+ vma_m = pax_find_mirror_vma(vma);
91226+ if (!vma_m)
91227+ return;
91228+
91229+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
91230+ address_m = address + SEGMEXEC_TASK_SIZE;
91231+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
91232+ get_page(page_m);
91233+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
91234+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
91235+}
91236+#endif
91237+
91238 /*
91239 * Hugetlb_cow() should be called with page lock of the original hugepage held.
91240 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
91241@@ -2712,6 +2737,11 @@ retry_avoidcopy:
91242 make_huge_pte(vma, new_page, 1));
91243 page_remove_rmap(old_page);
91244 hugepage_add_new_anon_rmap(new_page, vma, address);
91245+
91246+#ifdef CONFIG_PAX_SEGMEXEC
91247+ pax_mirror_huge_pte(vma, address, new_page);
91248+#endif
91249+
91250 /* Make the old page be freed below */
91251 new_page = old_page;
91252 }
91253@@ -2876,6 +2906,10 @@ retry:
91254 && (vma->vm_flags & VM_SHARED)));
91255 set_huge_pte_at(mm, address, ptep, new_pte);
91256
91257+#ifdef CONFIG_PAX_SEGMEXEC
91258+ pax_mirror_huge_pte(vma, address, page);
91259+#endif
91260+
91261 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
91262 /* Optimization, do the COW without a second fault */
91263 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
91264@@ -2906,6 +2940,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91265 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
91266 struct hstate *h = hstate_vma(vma);
91267
91268+#ifdef CONFIG_PAX_SEGMEXEC
91269+ struct vm_area_struct *vma_m;
91270+#endif
91271+
91272 address &= huge_page_mask(h);
91273
91274 ptep = huge_pte_offset(mm, address);
91275@@ -2919,6 +2957,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
91276 VM_FAULT_SET_HINDEX(hstate_index(h));
91277 }
91278
91279+#ifdef CONFIG_PAX_SEGMEXEC
91280+ vma_m = pax_find_mirror_vma(vma);
91281+ if (vma_m) {
91282+ unsigned long address_m;
91283+
91284+ if (vma->vm_start > vma_m->vm_start) {
91285+ address_m = address;
91286+ address -= SEGMEXEC_TASK_SIZE;
91287+ vma = vma_m;
91288+ h = hstate_vma(vma);
91289+ } else
91290+ address_m = address + SEGMEXEC_TASK_SIZE;
91291+
91292+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
91293+ return VM_FAULT_OOM;
91294+ address_m &= HPAGE_MASK;
91295+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
91296+ }
91297+#endif
91298+
91299 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
91300 if (!ptep)
91301 return VM_FAULT_OOM;
91302diff --git a/mm/internal.h b/mm/internal.h
91303index 8b6cfd6..ec809a6 100644
91304--- a/mm/internal.h
91305+++ b/mm/internal.h
91306@@ -96,6 +96,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
91307 * in mm/page_alloc.c
91308 */
91309 extern void __free_pages_bootmem(struct page *page, unsigned int order);
91310+extern void free_compound_page(struct page *page);
91311 extern void prep_compound_page(struct page *page, unsigned long order);
91312 #ifdef CONFIG_MEMORY_FAILURE
91313 extern bool is_free_buddy_page(struct page *page);
91314@@ -351,7 +352,7 @@ extern u32 hwpoison_filter_enable;
91315
91316 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
91317 unsigned long, unsigned long,
91318- unsigned long, unsigned long);
91319+ unsigned long, unsigned long) __intentional_overflow(-1);
91320
91321 extern void set_pageblock_order(void);
91322 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
91323diff --git a/mm/kmemleak.c b/mm/kmemleak.c
91324index 31f01c5..7015178 100644
91325--- a/mm/kmemleak.c
91326+++ b/mm/kmemleak.c
91327@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
91328
91329 for (i = 0; i < object->trace_len; i++) {
91330 void *ptr = (void *)object->trace[i];
91331- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
91332+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
91333 }
91334 }
91335
91336@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
91337 return -ENOMEM;
91338 }
91339
91340- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
91341+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
91342 &kmemleak_fops);
91343 if (!dentry)
91344 pr_warning("Failed to create the debugfs kmemleak file\n");
91345diff --git a/mm/maccess.c b/mm/maccess.c
91346index d53adf9..03a24bf 100644
91347--- a/mm/maccess.c
91348+++ b/mm/maccess.c
91349@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
91350 set_fs(KERNEL_DS);
91351 pagefault_disable();
91352 ret = __copy_from_user_inatomic(dst,
91353- (__force const void __user *)src, size);
91354+ (const void __force_user *)src, size);
91355 pagefault_enable();
91356 set_fs(old_fs);
91357
91358@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
91359
91360 set_fs(KERNEL_DS);
91361 pagefault_disable();
91362- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
91363+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
91364 pagefault_enable();
91365 set_fs(old_fs);
91366
91367diff --git a/mm/madvise.c b/mm/madvise.c
91368index 539eeb9..e24a987 100644
91369--- a/mm/madvise.c
91370+++ b/mm/madvise.c
91371@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
91372 pgoff_t pgoff;
91373 unsigned long new_flags = vma->vm_flags;
91374
91375+#ifdef CONFIG_PAX_SEGMEXEC
91376+ struct vm_area_struct *vma_m;
91377+#endif
91378+
91379 switch (behavior) {
91380 case MADV_NORMAL:
91381 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
91382@@ -126,6 +130,13 @@ success:
91383 /*
91384 * vm_flags is protected by the mmap_sem held in write mode.
91385 */
91386+
91387+#ifdef CONFIG_PAX_SEGMEXEC
91388+ vma_m = pax_find_mirror_vma(vma);
91389+ if (vma_m)
91390+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
91391+#endif
91392+
91393 vma->vm_flags = new_flags;
91394
91395 out:
91396@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
91397 struct vm_area_struct **prev,
91398 unsigned long start, unsigned long end)
91399 {
91400+
91401+#ifdef CONFIG_PAX_SEGMEXEC
91402+ struct vm_area_struct *vma_m;
91403+#endif
91404+
91405 *prev = vma;
91406 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
91407 return -EINVAL;
91408@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
91409 zap_page_range(vma, start, end - start, &details);
91410 } else
91411 zap_page_range(vma, start, end - start, NULL);
91412+
91413+#ifdef CONFIG_PAX_SEGMEXEC
91414+ vma_m = pax_find_mirror_vma(vma);
91415+ if (vma_m) {
91416+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
91417+ struct zap_details details = {
91418+ .nonlinear_vma = vma_m,
91419+ .last_index = ULONG_MAX,
91420+ };
91421+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
91422+ } else
91423+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
91424+ }
91425+#endif
91426+
91427 return 0;
91428 }
91429
91430@@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
91431 if (end < start)
91432 return error;
91433
91434+#ifdef CONFIG_PAX_SEGMEXEC
91435+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
91436+ if (end > SEGMEXEC_TASK_SIZE)
91437+ return error;
91438+ } else
91439+#endif
91440+
91441+ if (end > TASK_SIZE)
91442+ return error;
91443+
91444 error = 0;
91445 if (end == start)
91446 return error;
91447diff --git a/mm/memory-failure.c b/mm/memory-failure.c
91448index 90977ac..487ab84 100644
91449--- a/mm/memory-failure.c
91450+++ b/mm/memory-failure.c
91451@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
91452
91453 int sysctl_memory_failure_recovery __read_mostly = 1;
91454
91455-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
91456+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
91457
91458 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
91459
91460@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
91461 pfn, t->comm, t->pid);
91462 si.si_signo = SIGBUS;
91463 si.si_errno = 0;
91464- si.si_addr = (void *)addr;
91465+ si.si_addr = (void __user *)addr;
91466 #ifdef __ARCH_SI_TRAPNO
91467 si.si_trapno = trapno;
91468 #endif
91469@@ -762,7 +762,7 @@ static struct page_state {
91470 unsigned long res;
91471 char *msg;
91472 int (*action)(struct page *p, unsigned long pfn);
91473-} error_states[] = {
91474+} __do_const error_states[] = {
91475 { reserved, reserved, "reserved kernel", me_kernel },
91476 /*
91477 * free pages are specially detected outside this table:
91478@@ -1062,7 +1062,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
91479 nr_pages = 1 << compound_order(hpage);
91480 else /* normal page or thp */
91481 nr_pages = 1;
91482- atomic_long_add(nr_pages, &num_poisoned_pages);
91483+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
91484
91485 /*
91486 * We need/can do nothing about count=0 pages.
91487@@ -1092,7 +1092,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
91488 if (!PageHWPoison(hpage)
91489 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
91490 || (p != hpage && TestSetPageHWPoison(hpage))) {
91491- atomic_long_sub(nr_pages, &num_poisoned_pages);
91492+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
91493 return 0;
91494 }
91495 set_page_hwpoison_huge_page(hpage);
91496@@ -1161,7 +1161,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
91497 }
91498 if (hwpoison_filter(p)) {
91499 if (TestClearPageHWPoison(p))
91500- atomic_long_sub(nr_pages, &num_poisoned_pages);
91501+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
91502 unlock_page(hpage);
91503 put_page(hpage);
91504 return 0;
91505@@ -1383,7 +1383,7 @@ int unpoison_memory(unsigned long pfn)
91506 return 0;
91507 }
91508 if (TestClearPageHWPoison(p))
91509- atomic_long_dec(&num_poisoned_pages);
91510+ atomic_long_dec_unchecked(&num_poisoned_pages);
91511 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
91512 return 0;
91513 }
91514@@ -1397,7 +1397,7 @@ int unpoison_memory(unsigned long pfn)
91515 */
91516 if (TestClearPageHWPoison(page)) {
91517 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
91518- atomic_long_sub(nr_pages, &num_poisoned_pages);
91519+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
91520 freeit = 1;
91521 if (PageHuge(page))
91522 clear_page_hwpoison_huge_page(page);
91523@@ -1522,11 +1522,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
91524 if (PageHuge(page)) {
91525 set_page_hwpoison_huge_page(hpage);
91526 dequeue_hwpoisoned_huge_page(hpage);
91527- atomic_long_add(1 << compound_order(hpage),
91528+ atomic_long_add_unchecked(1 << compound_order(hpage),
91529 &num_poisoned_pages);
91530 } else {
91531 SetPageHWPoison(page);
91532- atomic_long_inc(&num_poisoned_pages);
91533+ atomic_long_inc_unchecked(&num_poisoned_pages);
91534 }
91535 }
91536 return ret;
91537@@ -1565,7 +1565,7 @@ static int __soft_offline_page(struct page *page, int flags)
91538 put_page(page);
91539 pr_info("soft_offline: %#lx: invalidated\n", pfn);
91540 SetPageHWPoison(page);
91541- atomic_long_inc(&num_poisoned_pages);
91542+ atomic_long_inc_unchecked(&num_poisoned_pages);
91543 return 0;
91544 }
91545
91546@@ -1610,7 +1610,7 @@ static int __soft_offline_page(struct page *page, int flags)
91547 if (!is_free_buddy_page(page))
91548 pr_info("soft offline: %#lx: page leaked\n",
91549 pfn);
91550- atomic_long_inc(&num_poisoned_pages);
91551+ atomic_long_inc_unchecked(&num_poisoned_pages);
91552 }
91553 } else {
91554 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
91555@@ -1684,11 +1684,11 @@ int soft_offline_page(struct page *page, int flags)
91556 if (PageHuge(page)) {
91557 set_page_hwpoison_huge_page(hpage);
91558 dequeue_hwpoisoned_huge_page(hpage);
91559- atomic_long_add(1 << compound_order(hpage),
91560+ atomic_long_add_unchecked(1 << compound_order(hpage),
91561 &num_poisoned_pages);
91562 } else {
91563 SetPageHWPoison(page);
91564- atomic_long_inc(&num_poisoned_pages);
91565+ atomic_long_inc_unchecked(&num_poisoned_pages);
91566 }
91567 }
91568 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
91569diff --git a/mm/memory.c b/mm/memory.c
91570index dda27b9..c56b9d6 100644
91571--- a/mm/memory.c
91572+++ b/mm/memory.c
91573@@ -402,6 +402,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
91574 free_pte_range(tlb, pmd, addr);
91575 } while (pmd++, addr = next, addr != end);
91576
91577+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
91578 start &= PUD_MASK;
91579 if (start < floor)
91580 return;
91581@@ -416,6 +417,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
91582 pmd = pmd_offset(pud, start);
91583 pud_clear(pud);
91584 pmd_free_tlb(tlb, pmd, start);
91585+#endif
91586+
91587 }
91588
91589 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
91590@@ -435,6 +438,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
91591 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
91592 } while (pud++, addr = next, addr != end);
91593
91594+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
91595 start &= PGDIR_MASK;
91596 if (start < floor)
91597 return;
91598@@ -449,6 +453,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
91599 pud = pud_offset(pgd, start);
91600 pgd_clear(pgd);
91601 pud_free_tlb(tlb, pud, start);
91602+#endif
91603+
91604 }
91605
91606 /*
91607@@ -1635,12 +1641,6 @@ no_page_table:
91608 return page;
91609 }
91610
91611-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
91612-{
91613- return stack_guard_page_start(vma, addr) ||
91614- stack_guard_page_end(vma, addr+PAGE_SIZE);
91615-}
91616-
91617 /**
91618 * __get_user_pages() - pin user pages in memory
91619 * @tsk: task_struct of target task
91620@@ -1727,10 +1727,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
91621
91622 i = 0;
91623
91624- do {
91625+ while (nr_pages) {
91626 struct vm_area_struct *vma;
91627
91628- vma = find_extend_vma(mm, start);
91629+ vma = find_vma(mm, start);
91630 if (!vma && in_gate_area(mm, start)) {
91631 unsigned long pg = start & PAGE_MASK;
91632 pgd_t *pgd;
91633@@ -1779,7 +1779,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
91634 goto next_page;
91635 }
91636
91637- if (!vma ||
91638+ if (!vma || start < vma->vm_start ||
91639 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
91640 !(vm_flags & vma->vm_flags))
91641 return i ? : -EFAULT;
91642@@ -1808,11 +1808,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
91643 int ret;
91644 unsigned int fault_flags = 0;
91645
91646- /* For mlock, just skip the stack guard page. */
91647- if (foll_flags & FOLL_MLOCK) {
91648- if (stack_guard_page(vma, start))
91649- goto next_page;
91650- }
91651 if (foll_flags & FOLL_WRITE)
91652 fault_flags |= FAULT_FLAG_WRITE;
91653 if (nonblocking)
91654@@ -1892,7 +1887,7 @@ next_page:
91655 start += page_increm * PAGE_SIZE;
91656 nr_pages -= page_increm;
91657 } while (nr_pages && start < vma->vm_end);
91658- } while (nr_pages);
91659+ }
91660 return i;
91661 }
91662 EXPORT_SYMBOL(__get_user_pages);
91663@@ -2099,6 +2094,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
91664 page_add_file_rmap(page);
91665 set_pte_at(mm, addr, pte, mk_pte(page, prot));
91666
91667+#ifdef CONFIG_PAX_SEGMEXEC
91668+ pax_mirror_file_pte(vma, addr, page, ptl);
91669+#endif
91670+
91671 retval = 0;
91672 pte_unmap_unlock(pte, ptl);
91673 return retval;
91674@@ -2143,9 +2142,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
91675 if (!page_count(page))
91676 return -EINVAL;
91677 if (!(vma->vm_flags & VM_MIXEDMAP)) {
91678+
91679+#ifdef CONFIG_PAX_SEGMEXEC
91680+ struct vm_area_struct *vma_m;
91681+#endif
91682+
91683 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
91684 BUG_ON(vma->vm_flags & VM_PFNMAP);
91685 vma->vm_flags |= VM_MIXEDMAP;
91686+
91687+#ifdef CONFIG_PAX_SEGMEXEC
91688+ vma_m = pax_find_mirror_vma(vma);
91689+ if (vma_m)
91690+ vma_m->vm_flags |= VM_MIXEDMAP;
91691+#endif
91692+
91693 }
91694 return insert_page(vma, addr, page, vma->vm_page_prot);
91695 }
91696@@ -2228,6 +2239,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
91697 unsigned long pfn)
91698 {
91699 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
91700+ BUG_ON(vma->vm_mirror);
91701
91702 if (addr < vma->vm_start || addr >= vma->vm_end)
91703 return -EFAULT;
91704@@ -2475,7 +2487,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
91705
91706 BUG_ON(pud_huge(*pud));
91707
91708- pmd = pmd_alloc(mm, pud, addr);
91709+ pmd = (mm == &init_mm) ?
91710+ pmd_alloc_kernel(mm, pud, addr) :
91711+ pmd_alloc(mm, pud, addr);
91712 if (!pmd)
91713 return -ENOMEM;
91714 do {
91715@@ -2495,7 +2509,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
91716 unsigned long next;
91717 int err;
91718
91719- pud = pud_alloc(mm, pgd, addr);
91720+ pud = (mm == &init_mm) ?
91721+ pud_alloc_kernel(mm, pgd, addr) :
91722+ pud_alloc(mm, pgd, addr);
91723 if (!pud)
91724 return -ENOMEM;
91725 do {
91726@@ -2583,6 +2599,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
91727 copy_user_highpage(dst, src, va, vma);
91728 }
91729
91730+#ifdef CONFIG_PAX_SEGMEXEC
91731+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
91732+{
91733+ struct mm_struct *mm = vma->vm_mm;
91734+ spinlock_t *ptl;
91735+ pte_t *pte, entry;
91736+
91737+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
91738+ entry = *pte;
91739+ if (!pte_present(entry)) {
91740+ if (!pte_none(entry)) {
91741+ BUG_ON(pte_file(entry));
91742+ free_swap_and_cache(pte_to_swp_entry(entry));
91743+ pte_clear_not_present_full(mm, address, pte, 0);
91744+ }
91745+ } else {
91746+ struct page *page;
91747+
91748+ flush_cache_page(vma, address, pte_pfn(entry));
91749+ entry = ptep_clear_flush(vma, address, pte);
91750+ BUG_ON(pte_dirty(entry));
91751+ page = vm_normal_page(vma, address, entry);
91752+ if (page) {
91753+ update_hiwater_rss(mm);
91754+ if (PageAnon(page))
91755+ dec_mm_counter_fast(mm, MM_ANONPAGES);
91756+ else
91757+ dec_mm_counter_fast(mm, MM_FILEPAGES);
91758+ page_remove_rmap(page);
91759+ page_cache_release(page);
91760+ }
91761+ }
91762+ pte_unmap_unlock(pte, ptl);
91763+}
91764+
91765+/* PaX: if vma is mirrored, synchronize the mirror's PTE
91766+ *
91767+ * the ptl of the lower mapped page is held on entry and is not released on exit
91768+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
91769+ */
91770+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
91771+{
91772+ struct mm_struct *mm = vma->vm_mm;
91773+ unsigned long address_m;
91774+ spinlock_t *ptl_m;
91775+ struct vm_area_struct *vma_m;
91776+ pmd_t *pmd_m;
91777+ pte_t *pte_m, entry_m;
91778+
91779+ BUG_ON(!page_m || !PageAnon(page_m));
91780+
91781+ vma_m = pax_find_mirror_vma(vma);
91782+ if (!vma_m)
91783+ return;
91784+
91785+ BUG_ON(!PageLocked(page_m));
91786+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
91787+ address_m = address + SEGMEXEC_TASK_SIZE;
91788+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
91789+ pte_m = pte_offset_map(pmd_m, address_m);
91790+ ptl_m = pte_lockptr(mm, pmd_m);
91791+ if (ptl != ptl_m) {
91792+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
91793+ if (!pte_none(*pte_m))
91794+ goto out;
91795+ }
91796+
91797+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
91798+ page_cache_get(page_m);
91799+ page_add_anon_rmap(page_m, vma_m, address_m);
91800+ inc_mm_counter_fast(mm, MM_ANONPAGES);
91801+ set_pte_at(mm, address_m, pte_m, entry_m);
91802+ update_mmu_cache(vma_m, address_m, pte_m);
91803+out:
91804+ if (ptl != ptl_m)
91805+ spin_unlock(ptl_m);
91806+ pte_unmap(pte_m);
91807+ unlock_page(page_m);
91808+}
91809+
91810+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
91811+{
91812+ struct mm_struct *mm = vma->vm_mm;
91813+ unsigned long address_m;
91814+ spinlock_t *ptl_m;
91815+ struct vm_area_struct *vma_m;
91816+ pmd_t *pmd_m;
91817+ pte_t *pte_m, entry_m;
91818+
91819+ BUG_ON(!page_m || PageAnon(page_m));
91820+
91821+ vma_m = pax_find_mirror_vma(vma);
91822+ if (!vma_m)
91823+ return;
91824+
91825+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
91826+ address_m = address + SEGMEXEC_TASK_SIZE;
91827+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
91828+ pte_m = pte_offset_map(pmd_m, address_m);
91829+ ptl_m = pte_lockptr(mm, pmd_m);
91830+ if (ptl != ptl_m) {
91831+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
91832+ if (!pte_none(*pte_m))
91833+ goto out;
91834+ }
91835+
91836+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
91837+ page_cache_get(page_m);
91838+ page_add_file_rmap(page_m);
91839+ inc_mm_counter_fast(mm, MM_FILEPAGES);
91840+ set_pte_at(mm, address_m, pte_m, entry_m);
91841+ update_mmu_cache(vma_m, address_m, pte_m);
91842+out:
91843+ if (ptl != ptl_m)
91844+ spin_unlock(ptl_m);
91845+ pte_unmap(pte_m);
91846+}
91847+
91848+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
91849+{
91850+ struct mm_struct *mm = vma->vm_mm;
91851+ unsigned long address_m;
91852+ spinlock_t *ptl_m;
91853+ struct vm_area_struct *vma_m;
91854+ pmd_t *pmd_m;
91855+ pte_t *pte_m, entry_m;
91856+
91857+ vma_m = pax_find_mirror_vma(vma);
91858+ if (!vma_m)
91859+ return;
91860+
91861+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
91862+ address_m = address + SEGMEXEC_TASK_SIZE;
91863+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
91864+ pte_m = pte_offset_map(pmd_m, address_m);
91865+ ptl_m = pte_lockptr(mm, pmd_m);
91866+ if (ptl != ptl_m) {
91867+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
91868+ if (!pte_none(*pte_m))
91869+ goto out;
91870+ }
91871+
91872+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
91873+ set_pte_at(mm, address_m, pte_m, entry_m);
91874+out:
91875+ if (ptl != ptl_m)
91876+ spin_unlock(ptl_m);
91877+ pte_unmap(pte_m);
91878+}
91879+
91880+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
91881+{
91882+ struct page *page_m;
91883+ pte_t entry;
91884+
91885+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
91886+ goto out;
91887+
91888+ entry = *pte;
91889+ page_m = vm_normal_page(vma, address, entry);
91890+ if (!page_m)
91891+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
91892+ else if (PageAnon(page_m)) {
91893+ if (pax_find_mirror_vma(vma)) {
91894+ pte_unmap_unlock(pte, ptl);
91895+ lock_page(page_m);
91896+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
91897+ if (pte_same(entry, *pte))
91898+ pax_mirror_anon_pte(vma, address, page_m, ptl);
91899+ else
91900+ unlock_page(page_m);
91901+ }
91902+ } else
91903+ pax_mirror_file_pte(vma, address, page_m, ptl);
91904+
91905+out:
91906+ pte_unmap_unlock(pte, ptl);
91907+}
91908+#endif
91909+
91910 /*
91911 * This routine handles present pages, when users try to write
91912 * to a shared page. It is done by copying the page to a new address
91913@@ -2807,6 +3003,12 @@ gotten:
91914 */
91915 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
91916 if (likely(pte_same(*page_table, orig_pte))) {
91917+
91918+#ifdef CONFIG_PAX_SEGMEXEC
91919+ if (pax_find_mirror_vma(vma))
91920+ BUG_ON(!trylock_page(new_page));
91921+#endif
91922+
91923 if (old_page) {
91924 if (!PageAnon(old_page)) {
91925 dec_mm_counter_fast(mm, MM_FILEPAGES);
91926@@ -2858,6 +3060,10 @@ gotten:
91927 page_remove_rmap(old_page);
91928 }
91929
91930+#ifdef CONFIG_PAX_SEGMEXEC
91931+ pax_mirror_anon_pte(vma, address, new_page, ptl);
91932+#endif
91933+
91934 /* Free the old page.. */
91935 new_page = old_page;
91936 ret |= VM_FAULT_WRITE;
91937@@ -3135,6 +3341,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
91938 swap_free(entry);
91939 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
91940 try_to_free_swap(page);
91941+
91942+#ifdef CONFIG_PAX_SEGMEXEC
91943+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
91944+#endif
91945+
91946 unlock_page(page);
91947 if (page != swapcache) {
91948 /*
91949@@ -3158,6 +3369,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
91950
91951 /* No need to invalidate - it was non-present before */
91952 update_mmu_cache(vma, address, page_table);
91953+
91954+#ifdef CONFIG_PAX_SEGMEXEC
91955+ pax_mirror_anon_pte(vma, address, page, ptl);
91956+#endif
91957+
91958 unlock:
91959 pte_unmap_unlock(page_table, ptl);
91960 out:
91961@@ -3177,40 +3393,6 @@ out_release:
91962 }
91963
91964 /*
91965- * This is like a special single-page "expand_{down|up}wards()",
91966- * except we must first make sure that 'address{-|+}PAGE_SIZE'
91967- * doesn't hit another vma.
91968- */
91969-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
91970-{
91971- address &= PAGE_MASK;
91972- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
91973- struct vm_area_struct *prev = vma->vm_prev;
91974-
91975- /*
91976- * Is there a mapping abutting this one below?
91977- *
91978- * That's only ok if it's the same stack mapping
91979- * that has gotten split..
91980- */
91981- if (prev && prev->vm_end == address)
91982- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
91983-
91984- expand_downwards(vma, address - PAGE_SIZE);
91985- }
91986- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
91987- struct vm_area_struct *next = vma->vm_next;
91988-
91989- /* As VM_GROWSDOWN but s/below/above/ */
91990- if (next && next->vm_start == address + PAGE_SIZE)
91991- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
91992-
91993- expand_upwards(vma, address + PAGE_SIZE);
91994- }
91995- return 0;
91996-}
91997-
91998-/*
91999 * We enter with non-exclusive mmap_sem (to exclude vma changes,
92000 * but allow concurrent faults), and pte mapped but not yet locked.
92001 * We return with mmap_sem still held, but pte unmapped and unlocked.
92002@@ -3219,27 +3401,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
92003 unsigned long address, pte_t *page_table, pmd_t *pmd,
92004 unsigned int flags)
92005 {
92006- struct page *page;
92007+ struct page *page = NULL;
92008 spinlock_t *ptl;
92009 pte_t entry;
92010
92011- pte_unmap(page_table);
92012-
92013- /* Check if we need to add a guard page to the stack */
92014- if (check_stack_guard_page(vma, address) < 0)
92015- return VM_FAULT_SIGBUS;
92016-
92017- /* Use the zero-page for reads */
92018 if (!(flags & FAULT_FLAG_WRITE)) {
92019 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
92020 vma->vm_page_prot));
92021- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
92022+ ptl = pte_lockptr(mm, pmd);
92023+ spin_lock(ptl);
92024 if (!pte_none(*page_table))
92025 goto unlock;
92026 goto setpte;
92027 }
92028
92029 /* Allocate our own private page. */
92030+ pte_unmap(page_table);
92031+
92032 if (unlikely(anon_vma_prepare(vma)))
92033 goto oom;
92034 page = alloc_zeroed_user_highpage_movable(vma, address);
92035@@ -3263,6 +3441,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
92036 if (!pte_none(*page_table))
92037 goto release;
92038
92039+#ifdef CONFIG_PAX_SEGMEXEC
92040+ if (pax_find_mirror_vma(vma))
92041+ BUG_ON(!trylock_page(page));
92042+#endif
92043+
92044 inc_mm_counter_fast(mm, MM_ANONPAGES);
92045 page_add_new_anon_rmap(page, vma, address);
92046 setpte:
92047@@ -3270,6 +3453,12 @@ setpte:
92048
92049 /* No need to invalidate - it was non-present before */
92050 update_mmu_cache(vma, address, page_table);
92051+
92052+#ifdef CONFIG_PAX_SEGMEXEC
92053+ if (page)
92054+ pax_mirror_anon_pte(vma, address, page, ptl);
92055+#endif
92056+
92057 unlock:
92058 pte_unmap_unlock(page_table, ptl);
92059 return 0;
92060@@ -3413,6 +3602,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
92061 */
92062 /* Only go through if we didn't race with anybody else... */
92063 if (likely(pte_same(*page_table, orig_pte))) {
92064+
92065+#ifdef CONFIG_PAX_SEGMEXEC
92066+ if (anon && pax_find_mirror_vma(vma))
92067+ BUG_ON(!trylock_page(page));
92068+#endif
92069+
92070 flush_icache_page(vma, page);
92071 entry = mk_pte(page, vma->vm_page_prot);
92072 if (flags & FAULT_FLAG_WRITE)
92073@@ -3434,6 +3629,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
92074
92075 /* no need to invalidate: a not-present page won't be cached */
92076 update_mmu_cache(vma, address, page_table);
92077+
92078+#ifdef CONFIG_PAX_SEGMEXEC
92079+ if (anon)
92080+ pax_mirror_anon_pte(vma, address, page, ptl);
92081+ else
92082+ pax_mirror_file_pte(vma, address, page, ptl);
92083+#endif
92084+
92085 } else {
92086 if (cow_page)
92087 mem_cgroup_uncharge_page(cow_page);
92088@@ -3681,6 +3884,12 @@ static int handle_pte_fault(struct mm_struct *mm,
92089 if (flags & FAULT_FLAG_WRITE)
92090 flush_tlb_fix_spurious_fault(vma, address);
92091 }
92092+
92093+#ifdef CONFIG_PAX_SEGMEXEC
92094+ pax_mirror_pte(vma, address, pte, pmd, ptl);
92095+ return 0;
92096+#endif
92097+
92098 unlock:
92099 pte_unmap_unlock(pte, ptl);
92100 return 0;
92101@@ -3697,9 +3906,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
92102 pmd_t *pmd;
92103 pte_t *pte;
92104
92105+#ifdef CONFIG_PAX_SEGMEXEC
92106+ struct vm_area_struct *vma_m;
92107+#endif
92108+
92109 if (unlikely(is_vm_hugetlb_page(vma)))
92110 return hugetlb_fault(mm, vma, address, flags);
92111
92112+#ifdef CONFIG_PAX_SEGMEXEC
92113+ vma_m = pax_find_mirror_vma(vma);
92114+ if (vma_m) {
92115+ unsigned long address_m;
92116+ pgd_t *pgd_m;
92117+ pud_t *pud_m;
92118+ pmd_t *pmd_m;
92119+
92120+ if (vma->vm_start > vma_m->vm_start) {
92121+ address_m = address;
92122+ address -= SEGMEXEC_TASK_SIZE;
92123+ vma = vma_m;
92124+ } else
92125+ address_m = address + SEGMEXEC_TASK_SIZE;
92126+
92127+ pgd_m = pgd_offset(mm, address_m);
92128+ pud_m = pud_alloc(mm, pgd_m, address_m);
92129+ if (!pud_m)
92130+ return VM_FAULT_OOM;
92131+ pmd_m = pmd_alloc(mm, pud_m, address_m);
92132+ if (!pmd_m)
92133+ return VM_FAULT_OOM;
92134+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
92135+ return VM_FAULT_OOM;
92136+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
92137+ }
92138+#endif
92139+
92140 pgd = pgd_offset(mm, address);
92141 pud = pud_alloc(mm, pgd, address);
92142 if (!pud)
92143@@ -3830,6 +4071,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
92144 spin_unlock(&mm->page_table_lock);
92145 return 0;
92146 }
92147+
92148+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
92149+{
92150+ pud_t *new = pud_alloc_one(mm, address);
92151+ if (!new)
92152+ return -ENOMEM;
92153+
92154+ smp_wmb(); /* See comment in __pte_alloc */
92155+
92156+ spin_lock(&mm->page_table_lock);
92157+ if (pgd_present(*pgd)) /* Another has populated it */
92158+ pud_free(mm, new);
92159+ else
92160+ pgd_populate_kernel(mm, pgd, new);
92161+ spin_unlock(&mm->page_table_lock);
92162+ return 0;
92163+}
92164 #endif /* __PAGETABLE_PUD_FOLDED */
92165
92166 #ifndef __PAGETABLE_PMD_FOLDED
92167@@ -3860,6 +4118,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
92168 spin_unlock(&mm->page_table_lock);
92169 return 0;
92170 }
92171+
92172+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
92173+{
92174+ pmd_t *new = pmd_alloc_one(mm, address);
92175+ if (!new)
92176+ return -ENOMEM;
92177+
92178+ smp_wmb(); /* See comment in __pte_alloc */
92179+
92180+ spin_lock(&mm->page_table_lock);
92181+#ifndef __ARCH_HAS_4LEVEL_HACK
92182+ if (pud_present(*pud)) /* Another has populated it */
92183+ pmd_free(mm, new);
92184+ else
92185+ pud_populate_kernel(mm, pud, new);
92186+#else
92187+ if (pgd_present(*pud)) /* Another has populated it */
92188+ pmd_free(mm, new);
92189+ else
92190+ pgd_populate_kernel(mm, pud, new);
92191+#endif /* __ARCH_HAS_4LEVEL_HACK */
92192+ spin_unlock(&mm->page_table_lock);
92193+ return 0;
92194+}
92195 #endif /* __PAGETABLE_PMD_FOLDED */
92196
92197 #if !defined(__HAVE_ARCH_GATE_AREA)
92198@@ -3873,7 +4155,7 @@ static int __init gate_vma_init(void)
92199 gate_vma.vm_start = FIXADDR_USER_START;
92200 gate_vma.vm_end = FIXADDR_USER_END;
92201 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
92202- gate_vma.vm_page_prot = __P101;
92203+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
92204
92205 return 0;
92206 }
92207@@ -4007,8 +4289,8 @@ out:
92208 return ret;
92209 }
92210
92211-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
92212- void *buf, int len, int write)
92213+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
92214+ void *buf, size_t len, int write)
92215 {
92216 resource_size_t phys_addr;
92217 unsigned long prot = 0;
92218@@ -4034,8 +4316,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
92219 * Access another process' address space as given in mm. If non-NULL, use the
92220 * given task for page fault accounting.
92221 */
92222-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
92223- unsigned long addr, void *buf, int len, int write)
92224+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
92225+ unsigned long addr, void *buf, size_t len, int write)
92226 {
92227 struct vm_area_struct *vma;
92228 void *old_buf = buf;
92229@@ -4043,7 +4325,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
92230 down_read(&mm->mmap_sem);
92231 /* ignore errors, just check how much was successfully transferred */
92232 while (len) {
92233- int bytes, ret, offset;
92234+ ssize_t bytes, ret, offset;
92235 void *maddr;
92236 struct page *page = NULL;
92237
92238@@ -4102,8 +4384,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
92239 *
92240 * The caller must hold a reference on @mm.
92241 */
92242-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
92243- void *buf, int len, int write)
92244+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
92245+ void *buf, size_t len, int write)
92246 {
92247 return __access_remote_vm(NULL, mm, addr, buf, len, write);
92248 }
92249@@ -4113,11 +4395,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
92250 * Source/target buffer must be kernel space,
92251 * Do not walk the page table directly, use get_user_pages
92252 */
92253-int access_process_vm(struct task_struct *tsk, unsigned long addr,
92254- void *buf, int len, int write)
92255+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
92256+ void *buf, size_t len, int write)
92257 {
92258 struct mm_struct *mm;
92259- int ret;
92260+ ssize_t ret;
92261
92262 mm = get_task_mm(tsk);
92263 if (!mm)
92264diff --git a/mm/mempolicy.c b/mm/mempolicy.c
92265index cb2f3dd..fb80468 100644
92266--- a/mm/mempolicy.c
92267+++ b/mm/mempolicy.c
92268@@ -747,6 +747,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
92269 unsigned long vmstart;
92270 unsigned long vmend;
92271
92272+#ifdef CONFIG_PAX_SEGMEXEC
92273+ struct vm_area_struct *vma_m;
92274+#endif
92275+
92276 vma = find_vma(mm, start);
92277 if (!vma || vma->vm_start > start)
92278 return -EFAULT;
92279@@ -790,6 +794,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
92280 err = vma_replace_policy(vma, new_pol);
92281 if (err)
92282 goto out;
92283+
92284+#ifdef CONFIG_PAX_SEGMEXEC
92285+ vma_m = pax_find_mirror_vma(vma);
92286+ if (vma_m) {
92287+ err = vma_replace_policy(vma_m, new_pol);
92288+ if (err)
92289+ goto out;
92290+ }
92291+#endif
92292+
92293 }
92294
92295 out:
92296@@ -1255,6 +1269,17 @@ static long do_mbind(unsigned long start, unsigned long len,
92297
92298 if (end < start)
92299 return -EINVAL;
92300+
92301+#ifdef CONFIG_PAX_SEGMEXEC
92302+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
92303+ if (end > SEGMEXEC_TASK_SIZE)
92304+ return -EINVAL;
92305+ } else
92306+#endif
92307+
92308+ if (end > TASK_SIZE)
92309+ return -EINVAL;
92310+
92311 if (end == start)
92312 return 0;
92313
92314@@ -1483,8 +1508,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
92315 */
92316 tcred = __task_cred(task);
92317 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
92318- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
92319- !capable(CAP_SYS_NICE)) {
92320+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
92321 rcu_read_unlock();
92322 err = -EPERM;
92323 goto out_put;
92324@@ -1515,6 +1539,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
92325 goto out;
92326 }
92327
92328+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
92329+ if (mm != current->mm &&
92330+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
92331+ mmput(mm);
92332+ err = -EPERM;
92333+ goto out;
92334+ }
92335+#endif
92336+
92337 err = do_migrate_pages(mm, old, new,
92338 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
92339
92340diff --git a/mm/migrate.c b/mm/migrate.c
92341index 9194375..75c81e2 100644
92342--- a/mm/migrate.c
92343+++ b/mm/migrate.c
92344@@ -1464,8 +1464,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
92345 */
92346 tcred = __task_cred(task);
92347 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
92348- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
92349- !capable(CAP_SYS_NICE)) {
92350+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
92351 rcu_read_unlock();
92352 err = -EPERM;
92353 goto out;
92354diff --git a/mm/mlock.c b/mm/mlock.c
92355index 192e6ee..b044449 100644
92356--- a/mm/mlock.c
92357+++ b/mm/mlock.c
92358@@ -14,6 +14,7 @@
92359 #include <linux/pagevec.h>
92360 #include <linux/mempolicy.h>
92361 #include <linux/syscalls.h>
92362+#include <linux/security.h>
92363 #include <linux/sched.h>
92364 #include <linux/export.h>
92365 #include <linux/rmap.h>
92366@@ -588,7 +589,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
92367 {
92368 unsigned long nstart, end, tmp;
92369 struct vm_area_struct * vma, * prev;
92370- int error;
92371+ int error = 0;
92372
92373 VM_BUG_ON(start & ~PAGE_MASK);
92374 VM_BUG_ON(len != PAGE_ALIGN(len));
92375@@ -597,6 +598,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
92376 return -EINVAL;
92377 if (end == start)
92378 return 0;
92379+ if (end > TASK_SIZE)
92380+ return -EINVAL;
92381+
92382 vma = find_vma(current->mm, start);
92383 if (!vma || vma->vm_start > start)
92384 return -ENOMEM;
92385@@ -608,6 +612,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
92386 for (nstart = start ; ; ) {
92387 vm_flags_t newflags;
92388
92389+#ifdef CONFIG_PAX_SEGMEXEC
92390+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
92391+ break;
92392+#endif
92393+
92394 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
92395
92396 newflags = vma->vm_flags & ~VM_LOCKED;
92397@@ -720,6 +729,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
92398 lock_limit >>= PAGE_SHIFT;
92399
92400 /* check against resource limits */
92401+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
92402 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
92403 error = do_mlock(start, len, 1);
92404 up_write(&current->mm->mmap_sem);
92405@@ -754,6 +764,11 @@ static int do_mlockall(int flags)
92406 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
92407 vm_flags_t newflags;
92408
92409+#ifdef CONFIG_PAX_SEGMEXEC
92410+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
92411+ break;
92412+#endif
92413+
92414 newflags = vma->vm_flags & ~VM_LOCKED;
92415 if (flags & MCL_CURRENT)
92416 newflags |= VM_LOCKED;
92417@@ -787,6 +802,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
92418 lock_limit >>= PAGE_SHIFT;
92419
92420 ret = -ENOMEM;
92421+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
92422 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
92423 capable(CAP_IPC_LOCK))
92424 ret = do_mlockall(flags);
92425diff --git a/mm/mmap.c b/mm/mmap.c
92426index 546db74..650d1b9 100644
92427--- a/mm/mmap.c
92428+++ b/mm/mmap.c
92429@@ -36,6 +36,7 @@
92430 #include <linux/sched/sysctl.h>
92431 #include <linux/notifier.h>
92432 #include <linux/memory.h>
92433+#include <linux/random.h>
92434
92435 #include <asm/uaccess.h>
92436 #include <asm/cacheflush.h>
92437@@ -52,6 +53,16 @@
92438 #define arch_rebalance_pgtables(addr, len) (addr)
92439 #endif
92440
92441+static inline void verify_mm_writelocked(struct mm_struct *mm)
92442+{
92443+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
92444+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
92445+ up_read(&mm->mmap_sem);
92446+ BUG();
92447+ }
92448+#endif
92449+}
92450+
92451 static void unmap_region(struct mm_struct *mm,
92452 struct vm_area_struct *vma, struct vm_area_struct *prev,
92453 unsigned long start, unsigned long end);
92454@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
92455 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
92456 *
92457 */
92458-pgprot_t protection_map[16] = {
92459+pgprot_t protection_map[16] __read_only = {
92460 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
92461 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
92462 };
92463
92464-pgprot_t vm_get_page_prot(unsigned long vm_flags)
92465+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
92466 {
92467- return __pgprot(pgprot_val(protection_map[vm_flags &
92468+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
92469 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
92470 pgprot_val(arch_vm_get_page_prot(vm_flags)));
92471+
92472+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
92473+ if (!(__supported_pte_mask & _PAGE_NX) &&
92474+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
92475+ (vm_flags & (VM_READ | VM_WRITE)))
92476+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
92477+#endif
92478+
92479+ return prot;
92480 }
92481 EXPORT_SYMBOL(vm_get_page_prot);
92482
92483@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
92484 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
92485 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
92486 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
92487+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
92488 /*
92489 * Make sure vm_committed_as in one cacheline and not cacheline shared with
92490 * other variables. It can be updated by several CPUs frequently.
92491@@ -245,6 +266,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
92492 struct vm_area_struct *next = vma->vm_next;
92493
92494 might_sleep();
92495+ BUG_ON(vma->vm_mirror);
92496 if (vma->vm_ops && vma->vm_ops->close)
92497 vma->vm_ops->close(vma);
92498 if (vma->vm_file)
92499@@ -289,6 +311,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
92500 * not page aligned -Ram Gupta
92501 */
92502 rlim = rlimit(RLIMIT_DATA);
92503+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
92504 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
92505 (mm->end_data - mm->start_data) > rlim)
92506 goto out;
92507@@ -939,6 +962,12 @@ static int
92508 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
92509 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
92510 {
92511+
92512+#ifdef CONFIG_PAX_SEGMEXEC
92513+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
92514+ return 0;
92515+#endif
92516+
92517 if (is_mergeable_vma(vma, file, vm_flags) &&
92518 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
92519 if (vma->vm_pgoff == vm_pgoff)
92520@@ -958,6 +987,12 @@ static int
92521 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
92522 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
92523 {
92524+
92525+#ifdef CONFIG_PAX_SEGMEXEC
92526+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
92527+ return 0;
92528+#endif
92529+
92530 if (is_mergeable_vma(vma, file, vm_flags) &&
92531 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
92532 pgoff_t vm_pglen;
92533@@ -1000,13 +1035,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
92534 struct vm_area_struct *vma_merge(struct mm_struct *mm,
92535 struct vm_area_struct *prev, unsigned long addr,
92536 unsigned long end, unsigned long vm_flags,
92537- struct anon_vma *anon_vma, struct file *file,
92538+ struct anon_vma *anon_vma, struct file *file,
92539 pgoff_t pgoff, struct mempolicy *policy)
92540 {
92541 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
92542 struct vm_area_struct *area, *next;
92543 int err;
92544
92545+#ifdef CONFIG_PAX_SEGMEXEC
92546+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
92547+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
92548+
92549+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
92550+#endif
92551+
92552 /*
92553 * We later require that vma->vm_flags == vm_flags,
92554 * so this tests vma->vm_flags & VM_SPECIAL, too.
92555@@ -1022,6 +1064,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
92556 if (next && next->vm_end == end) /* cases 6, 7, 8 */
92557 next = next->vm_next;
92558
92559+#ifdef CONFIG_PAX_SEGMEXEC
92560+ if (prev)
92561+ prev_m = pax_find_mirror_vma(prev);
92562+ if (area)
92563+ area_m = pax_find_mirror_vma(area);
92564+ if (next)
92565+ next_m = pax_find_mirror_vma(next);
92566+#endif
92567+
92568 /*
92569 * Can it merge with the predecessor?
92570 */
92571@@ -1041,9 +1092,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
92572 /* cases 1, 6 */
92573 err = vma_adjust(prev, prev->vm_start,
92574 next->vm_end, prev->vm_pgoff, NULL);
92575- } else /* cases 2, 5, 7 */
92576+
92577+#ifdef CONFIG_PAX_SEGMEXEC
92578+ if (!err && prev_m)
92579+ err = vma_adjust(prev_m, prev_m->vm_start,
92580+ next_m->vm_end, prev_m->vm_pgoff, NULL);
92581+#endif
92582+
92583+ } else { /* cases 2, 5, 7 */
92584 err = vma_adjust(prev, prev->vm_start,
92585 end, prev->vm_pgoff, NULL);
92586+
92587+#ifdef CONFIG_PAX_SEGMEXEC
92588+ if (!err && prev_m)
92589+ err = vma_adjust(prev_m, prev_m->vm_start,
92590+ end_m, prev_m->vm_pgoff, NULL);
92591+#endif
92592+
92593+ }
92594 if (err)
92595 return NULL;
92596 khugepaged_enter_vma_merge(prev);
92597@@ -1057,12 +1123,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
92598 mpol_equal(policy, vma_policy(next)) &&
92599 can_vma_merge_before(next, vm_flags,
92600 anon_vma, file, pgoff+pglen)) {
92601- if (prev && addr < prev->vm_end) /* case 4 */
92602+ if (prev && addr < prev->vm_end) { /* case 4 */
92603 err = vma_adjust(prev, prev->vm_start,
92604 addr, prev->vm_pgoff, NULL);
92605- else /* cases 3, 8 */
92606+
92607+#ifdef CONFIG_PAX_SEGMEXEC
92608+ if (!err && prev_m)
92609+ err = vma_adjust(prev_m, prev_m->vm_start,
92610+ addr_m, prev_m->vm_pgoff, NULL);
92611+#endif
92612+
92613+ } else { /* cases 3, 8 */
92614 err = vma_adjust(area, addr, next->vm_end,
92615 next->vm_pgoff - pglen, NULL);
92616+
92617+#ifdef CONFIG_PAX_SEGMEXEC
92618+ if (!err && area_m)
92619+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
92620+ next_m->vm_pgoff - pglen, NULL);
92621+#endif
92622+
92623+ }
92624 if (err)
92625 return NULL;
92626 khugepaged_enter_vma_merge(area);
92627@@ -1171,8 +1252,10 @@ none:
92628 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
92629 struct file *file, long pages)
92630 {
92631- const unsigned long stack_flags
92632- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
92633+
92634+#ifdef CONFIG_PAX_RANDMMAP
92635+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
92636+#endif
92637
92638 mm->total_vm += pages;
92639
92640@@ -1180,7 +1263,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
92641 mm->shared_vm += pages;
92642 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
92643 mm->exec_vm += pages;
92644- } else if (flags & stack_flags)
92645+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
92646 mm->stack_vm += pages;
92647 }
92648 #endif /* CONFIG_PROC_FS */
92649@@ -1218,7 +1301,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
92650 * (the exception is when the underlying filesystem is noexec
92651 * mounted, in which case we dont add PROT_EXEC.)
92652 */
92653- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
92654+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
92655 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
92656 prot |= PROT_EXEC;
92657
92658@@ -1244,7 +1327,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
92659 /* Obtain the address to map to. we verify (or select) it and ensure
92660 * that it represents a valid section of the address space.
92661 */
92662- addr = get_unmapped_area(file, addr, len, pgoff, flags);
92663+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
92664 if (addr & ~PAGE_MASK)
92665 return addr;
92666
92667@@ -1255,6 +1338,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
92668 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
92669 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
92670
92671+#ifdef CONFIG_PAX_MPROTECT
92672+ if (mm->pax_flags & MF_PAX_MPROTECT) {
92673+
92674+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
92675+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
92676+ mm->binfmt->handle_mmap)
92677+ mm->binfmt->handle_mmap(file);
92678+#endif
92679+
92680+#ifndef CONFIG_PAX_MPROTECT_COMPAT
92681+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
92682+ gr_log_rwxmmap(file);
92683+
92684+#ifdef CONFIG_PAX_EMUPLT
92685+ vm_flags &= ~VM_EXEC;
92686+#else
92687+ return -EPERM;
92688+#endif
92689+
92690+ }
92691+
92692+ if (!(vm_flags & VM_EXEC))
92693+ vm_flags &= ~VM_MAYEXEC;
92694+#else
92695+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
92696+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
92697+#endif
92698+ else
92699+ vm_flags &= ~VM_MAYWRITE;
92700+ }
92701+#endif
92702+
92703+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
92704+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
92705+ vm_flags &= ~VM_PAGEEXEC;
92706+#endif
92707+
92708 if (flags & MAP_LOCKED)
92709 if (!can_do_mlock())
92710 return -EPERM;
92711@@ -1266,6 +1386,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
92712 locked += mm->locked_vm;
92713 lock_limit = rlimit(RLIMIT_MEMLOCK);
92714 lock_limit >>= PAGE_SHIFT;
92715+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
92716 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
92717 return -EAGAIN;
92718 }
92719@@ -1350,6 +1471,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
92720 vm_flags |= VM_NORESERVE;
92721 }
92722
92723+ if (!gr_acl_handle_mmap(file, prot))
92724+ return -EACCES;
92725+
92726 addr = mmap_region(file, addr, len, vm_flags, pgoff);
92727 if (!IS_ERR_VALUE(addr) &&
92728 ((vm_flags & VM_LOCKED) ||
92729@@ -1443,7 +1567,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
92730 vm_flags_t vm_flags = vma->vm_flags;
92731
92732 /* If it was private or non-writable, the write bit is already clear */
92733- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
92734+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
92735 return 0;
92736
92737 /* The backer wishes to know when pages are first written to? */
92738@@ -1489,7 +1613,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
92739 struct rb_node **rb_link, *rb_parent;
92740 unsigned long charged = 0;
92741
92742+#ifdef CONFIG_PAX_SEGMEXEC
92743+ struct vm_area_struct *vma_m = NULL;
92744+#endif
92745+
92746+ /*
92747+ * mm->mmap_sem is required to protect against another thread
92748+ * changing the mappings in case we sleep.
92749+ */
92750+ verify_mm_writelocked(mm);
92751+
92752 /* Check against address space limit. */
92753+
92754+#ifdef CONFIG_PAX_RANDMMAP
92755+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
92756+#endif
92757+
92758 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
92759 unsigned long nr_pages;
92760
92761@@ -1508,11 +1647,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
92762
92763 /* Clear old maps */
92764 error = -ENOMEM;
92765-munmap_back:
92766 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
92767 if (do_munmap(mm, addr, len))
92768 return -ENOMEM;
92769- goto munmap_back;
92770+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
92771 }
92772
92773 /*
92774@@ -1543,6 +1681,16 @@ munmap_back:
92775 goto unacct_error;
92776 }
92777
92778+#ifdef CONFIG_PAX_SEGMEXEC
92779+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
92780+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
92781+ if (!vma_m) {
92782+ error = -ENOMEM;
92783+ goto free_vma;
92784+ }
92785+ }
92786+#endif
92787+
92788 vma->vm_mm = mm;
92789 vma->vm_start = addr;
92790 vma->vm_end = addr + len;
92791@@ -1562,6 +1710,13 @@ munmap_back:
92792 if (error)
92793 goto unmap_and_free_vma;
92794
92795+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
92796+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
92797+ vma->vm_flags |= VM_PAGEEXEC;
92798+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
92799+ }
92800+#endif
92801+
92802 /* Can addr have changed??
92803 *
92804 * Answer: Yes, several device drivers can do it in their
92805@@ -1595,6 +1750,12 @@ munmap_back:
92806 }
92807
92808 vma_link(mm, vma, prev, rb_link, rb_parent);
92809+
92810+#ifdef CONFIG_PAX_SEGMEXEC
92811+ if (vma_m)
92812+ BUG_ON(pax_mirror_vma(vma_m, vma));
92813+#endif
92814+
92815 /* Once vma denies write, undo our temporary denial count */
92816 if (vm_flags & VM_DENYWRITE)
92817 allow_write_access(file);
92818@@ -1603,6 +1764,7 @@ out:
92819 perf_event_mmap(vma);
92820
92821 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
92822+ track_exec_limit(mm, addr, addr + len, vm_flags);
92823 if (vm_flags & VM_LOCKED) {
92824 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
92825 vma == get_gate_vma(current->mm)))
92826@@ -1635,6 +1797,12 @@ unmap_and_free_vma:
92827 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
92828 charged = 0;
92829 free_vma:
92830+
92831+#ifdef CONFIG_PAX_SEGMEXEC
92832+ if (vma_m)
92833+ kmem_cache_free(vm_area_cachep, vma_m);
92834+#endif
92835+
92836 kmem_cache_free(vm_area_cachep, vma);
92837 unacct_error:
92838 if (charged)
92839@@ -1642,7 +1810,63 @@ unacct_error:
92840 return error;
92841 }
92842
92843-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
92844+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
92845+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
92846+{
92847+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
92848+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
92849+
92850+ return 0;
92851+}
92852+#endif
92853+
92854+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
92855+{
92856+ if (!vma) {
92857+#ifdef CONFIG_STACK_GROWSUP
92858+ if (addr > sysctl_heap_stack_gap)
92859+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
92860+ else
92861+ vma = find_vma(current->mm, 0);
92862+ if (vma && (vma->vm_flags & VM_GROWSUP))
92863+ return false;
92864+#endif
92865+ return true;
92866+ }
92867+
92868+ if (addr + len > vma->vm_start)
92869+ return false;
92870+
92871+ if (vma->vm_flags & VM_GROWSDOWN)
92872+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
92873+#ifdef CONFIG_STACK_GROWSUP
92874+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
92875+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
92876+#endif
92877+ else if (offset)
92878+ return offset <= vma->vm_start - addr - len;
92879+
92880+ return true;
92881+}
92882+
92883+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
92884+{
92885+ if (vma->vm_start < len)
92886+ return -ENOMEM;
92887+
92888+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
92889+ if (offset <= vma->vm_start - len)
92890+ return vma->vm_start - len - offset;
92891+ else
92892+ return -ENOMEM;
92893+ }
92894+
92895+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
92896+ return vma->vm_start - len - sysctl_heap_stack_gap;
92897+ return -ENOMEM;
92898+}
92899+
92900+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
92901 {
92902 /*
92903 * We implement the search by looking for an rbtree node that
92904@@ -1690,11 +1914,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
92905 }
92906 }
92907
92908- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
92909+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
92910 check_current:
92911 /* Check if current node has a suitable gap */
92912 if (gap_start > high_limit)
92913 return -ENOMEM;
92914+
92915+ if (gap_end - gap_start > info->threadstack_offset)
92916+ gap_start += info->threadstack_offset;
92917+ else
92918+ gap_start = gap_end;
92919+
92920+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
92921+ if (gap_end - gap_start > sysctl_heap_stack_gap)
92922+ gap_start += sysctl_heap_stack_gap;
92923+ else
92924+ gap_start = gap_end;
92925+ }
92926+ if (vma->vm_flags & VM_GROWSDOWN) {
92927+ if (gap_end - gap_start > sysctl_heap_stack_gap)
92928+ gap_end -= sysctl_heap_stack_gap;
92929+ else
92930+ gap_end = gap_start;
92931+ }
92932 if (gap_end >= low_limit && gap_end - gap_start >= length)
92933 goto found;
92934
92935@@ -1744,7 +1986,7 @@ found:
92936 return gap_start;
92937 }
92938
92939-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
92940+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
92941 {
92942 struct mm_struct *mm = current->mm;
92943 struct vm_area_struct *vma;
92944@@ -1798,6 +2040,24 @@ check_current:
92945 gap_end = vma->vm_start;
92946 if (gap_end < low_limit)
92947 return -ENOMEM;
92948+
92949+ if (gap_end - gap_start > info->threadstack_offset)
92950+ gap_end -= info->threadstack_offset;
92951+ else
92952+ gap_end = gap_start;
92953+
92954+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
92955+ if (gap_end - gap_start > sysctl_heap_stack_gap)
92956+ gap_start += sysctl_heap_stack_gap;
92957+ else
92958+ gap_start = gap_end;
92959+ }
92960+ if (vma->vm_flags & VM_GROWSDOWN) {
92961+ if (gap_end - gap_start > sysctl_heap_stack_gap)
92962+ gap_end -= sysctl_heap_stack_gap;
92963+ else
92964+ gap_end = gap_start;
92965+ }
92966 if (gap_start <= high_limit && gap_end - gap_start >= length)
92967 goto found;
92968
92969@@ -1861,6 +2121,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
92970 struct mm_struct *mm = current->mm;
92971 struct vm_area_struct *vma;
92972 struct vm_unmapped_area_info info;
92973+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
92974
92975 if (len > TASK_SIZE - mmap_min_addr)
92976 return -ENOMEM;
92977@@ -1868,11 +2129,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
92978 if (flags & MAP_FIXED)
92979 return addr;
92980
92981+#ifdef CONFIG_PAX_RANDMMAP
92982+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
92983+#endif
92984+
92985 if (addr) {
92986 addr = PAGE_ALIGN(addr);
92987 vma = find_vma(mm, addr);
92988 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
92989- (!vma || addr + len <= vma->vm_start))
92990+ check_heap_stack_gap(vma, addr, len, offset))
92991 return addr;
92992 }
92993
92994@@ -1881,6 +2146,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
92995 info.low_limit = mm->mmap_base;
92996 info.high_limit = TASK_SIZE;
92997 info.align_mask = 0;
92998+ info.threadstack_offset = offset;
92999 return vm_unmapped_area(&info);
93000 }
93001 #endif
93002@@ -1899,6 +2165,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
93003 struct mm_struct *mm = current->mm;
93004 unsigned long addr = addr0;
93005 struct vm_unmapped_area_info info;
93006+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
93007
93008 /* requested length too big for entire address space */
93009 if (len > TASK_SIZE - mmap_min_addr)
93010@@ -1907,12 +2174,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
93011 if (flags & MAP_FIXED)
93012 return addr;
93013
93014+#ifdef CONFIG_PAX_RANDMMAP
93015+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
93016+#endif
93017+
93018 /* requesting a specific address */
93019 if (addr) {
93020 addr = PAGE_ALIGN(addr);
93021 vma = find_vma(mm, addr);
93022 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
93023- (!vma || addr + len <= vma->vm_start))
93024+ check_heap_stack_gap(vma, addr, len, offset))
93025 return addr;
93026 }
93027
93028@@ -1921,6 +2192,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
93029 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
93030 info.high_limit = mm->mmap_base;
93031 info.align_mask = 0;
93032+ info.threadstack_offset = offset;
93033 addr = vm_unmapped_area(&info);
93034
93035 /*
93036@@ -1933,6 +2205,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
93037 VM_BUG_ON(addr != -ENOMEM);
93038 info.flags = 0;
93039 info.low_limit = TASK_UNMAPPED_BASE;
93040+
93041+#ifdef CONFIG_PAX_RANDMMAP
93042+ if (mm->pax_flags & MF_PAX_RANDMMAP)
93043+ info.low_limit += mm->delta_mmap;
93044+#endif
93045+
93046 info.high_limit = TASK_SIZE;
93047 addr = vm_unmapped_area(&info);
93048 }
93049@@ -2034,6 +2312,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
93050 return vma;
93051 }
93052
93053+#ifdef CONFIG_PAX_SEGMEXEC
93054+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
93055+{
93056+ struct vm_area_struct *vma_m;
93057+
93058+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
93059+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
93060+ BUG_ON(vma->vm_mirror);
93061+ return NULL;
93062+ }
93063+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
93064+ vma_m = vma->vm_mirror;
93065+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
93066+ BUG_ON(vma->vm_file != vma_m->vm_file);
93067+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
93068+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
93069+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
93070+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
93071+ return vma_m;
93072+}
93073+#endif
93074+
93075 /*
93076 * Verify that the stack growth is acceptable and
93077 * update accounting. This is shared with both the
93078@@ -2050,6 +2350,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
93079 return -ENOMEM;
93080
93081 /* Stack limit test */
93082+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
93083 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
93084 return -ENOMEM;
93085
93086@@ -2060,6 +2361,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
93087 locked = mm->locked_vm + grow;
93088 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
93089 limit >>= PAGE_SHIFT;
93090+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
93091 if (locked > limit && !capable(CAP_IPC_LOCK))
93092 return -ENOMEM;
93093 }
93094@@ -2089,37 +2391,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
93095 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
93096 * vma is the last one with address > vma->vm_end. Have to extend vma.
93097 */
93098+#ifndef CONFIG_IA64
93099+static
93100+#endif
93101 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
93102 {
93103 int error;
93104+ bool locknext;
93105
93106 if (!(vma->vm_flags & VM_GROWSUP))
93107 return -EFAULT;
93108
93109+ /* Also guard against wrapping around to address 0. */
93110+ if (address < PAGE_ALIGN(address+1))
93111+ address = PAGE_ALIGN(address+1);
93112+ else
93113+ return -ENOMEM;
93114+
93115 /*
93116 * We must make sure the anon_vma is allocated
93117 * so that the anon_vma locking is not a noop.
93118 */
93119 if (unlikely(anon_vma_prepare(vma)))
93120 return -ENOMEM;
93121+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
93122+ if (locknext && anon_vma_prepare(vma->vm_next))
93123+ return -ENOMEM;
93124 vma_lock_anon_vma(vma);
93125+ if (locknext)
93126+ vma_lock_anon_vma(vma->vm_next);
93127
93128 /*
93129 * vma->vm_start/vm_end cannot change under us because the caller
93130 * is required to hold the mmap_sem in read mode. We need the
93131- * anon_vma lock to serialize against concurrent expand_stacks.
93132- * Also guard against wrapping around to address 0.
93133+ * anon_vma locks to serialize against concurrent expand_stacks
93134+ * and expand_upwards.
93135 */
93136- if (address < PAGE_ALIGN(address+4))
93137- address = PAGE_ALIGN(address+4);
93138- else {
93139- vma_unlock_anon_vma(vma);
93140- return -ENOMEM;
93141- }
93142 error = 0;
93143
93144 /* Somebody else might have raced and expanded it already */
93145- if (address > vma->vm_end) {
93146+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
93147+ error = -ENOMEM;
93148+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
93149 unsigned long size, grow;
93150
93151 size = address - vma->vm_start;
93152@@ -2154,6 +2467,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
93153 }
93154 }
93155 }
93156+ if (locknext)
93157+ vma_unlock_anon_vma(vma->vm_next);
93158 vma_unlock_anon_vma(vma);
93159 khugepaged_enter_vma_merge(vma);
93160 validate_mm(vma->vm_mm);
93161@@ -2168,6 +2483,8 @@ int expand_downwards(struct vm_area_struct *vma,
93162 unsigned long address)
93163 {
93164 int error;
93165+ bool lockprev = false;
93166+ struct vm_area_struct *prev;
93167
93168 /*
93169 * We must make sure the anon_vma is allocated
93170@@ -2181,6 +2498,15 @@ int expand_downwards(struct vm_area_struct *vma,
93171 if (error)
93172 return error;
93173
93174+ prev = vma->vm_prev;
93175+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
93176+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
93177+#endif
93178+ if (lockprev && anon_vma_prepare(prev))
93179+ return -ENOMEM;
93180+ if (lockprev)
93181+ vma_lock_anon_vma(prev);
93182+
93183 vma_lock_anon_vma(vma);
93184
93185 /*
93186@@ -2190,9 +2516,17 @@ int expand_downwards(struct vm_area_struct *vma,
93187 */
93188
93189 /* Somebody else might have raced and expanded it already */
93190- if (address < vma->vm_start) {
93191+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
93192+ error = -ENOMEM;
93193+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
93194 unsigned long size, grow;
93195
93196+#ifdef CONFIG_PAX_SEGMEXEC
93197+ struct vm_area_struct *vma_m;
93198+
93199+ vma_m = pax_find_mirror_vma(vma);
93200+#endif
93201+
93202 size = vma->vm_end - address;
93203 grow = (vma->vm_start - address) >> PAGE_SHIFT;
93204
93205@@ -2217,13 +2551,27 @@ int expand_downwards(struct vm_area_struct *vma,
93206 vma->vm_pgoff -= grow;
93207 anon_vma_interval_tree_post_update_vma(vma);
93208 vma_gap_update(vma);
93209+
93210+#ifdef CONFIG_PAX_SEGMEXEC
93211+ if (vma_m) {
93212+ anon_vma_interval_tree_pre_update_vma(vma_m);
93213+ vma_m->vm_start -= grow << PAGE_SHIFT;
93214+ vma_m->vm_pgoff -= grow;
93215+ anon_vma_interval_tree_post_update_vma(vma_m);
93216+ vma_gap_update(vma_m);
93217+ }
93218+#endif
93219+
93220 spin_unlock(&vma->vm_mm->page_table_lock);
93221
93222+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
93223 perf_event_mmap(vma);
93224 }
93225 }
93226 }
93227 vma_unlock_anon_vma(vma);
93228+ if (lockprev)
93229+ vma_unlock_anon_vma(prev);
93230 khugepaged_enter_vma_merge(vma);
93231 validate_mm(vma->vm_mm);
93232 return error;
93233@@ -2321,6 +2669,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
93234 do {
93235 long nrpages = vma_pages(vma);
93236
93237+#ifdef CONFIG_PAX_SEGMEXEC
93238+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
93239+ vma = remove_vma(vma);
93240+ continue;
93241+ }
93242+#endif
93243+
93244 if (vma->vm_flags & VM_ACCOUNT)
93245 nr_accounted += nrpages;
93246 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
93247@@ -2365,6 +2720,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
93248 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
93249 vma->vm_prev = NULL;
93250 do {
93251+
93252+#ifdef CONFIG_PAX_SEGMEXEC
93253+ if (vma->vm_mirror) {
93254+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
93255+ vma->vm_mirror->vm_mirror = NULL;
93256+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
93257+ vma->vm_mirror = NULL;
93258+ }
93259+#endif
93260+
93261 vma_rb_erase(vma, &mm->mm_rb);
93262 mm->map_count--;
93263 tail_vma = vma;
93264@@ -2390,14 +2755,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
93265 struct vm_area_struct *new;
93266 int err = -ENOMEM;
93267
93268+#ifdef CONFIG_PAX_SEGMEXEC
93269+ struct vm_area_struct *vma_m, *new_m = NULL;
93270+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
93271+#endif
93272+
93273 if (is_vm_hugetlb_page(vma) && (addr &
93274 ~(huge_page_mask(hstate_vma(vma)))))
93275 return -EINVAL;
93276
93277+#ifdef CONFIG_PAX_SEGMEXEC
93278+ vma_m = pax_find_mirror_vma(vma);
93279+#endif
93280+
93281 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
93282 if (!new)
93283 goto out_err;
93284
93285+#ifdef CONFIG_PAX_SEGMEXEC
93286+ if (vma_m) {
93287+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
93288+ if (!new_m) {
93289+ kmem_cache_free(vm_area_cachep, new);
93290+ goto out_err;
93291+ }
93292+ }
93293+#endif
93294+
93295 /* most fields are the same, copy all, and then fixup */
93296 *new = *vma;
93297
93298@@ -2410,6 +2794,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
93299 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
93300 }
93301
93302+#ifdef CONFIG_PAX_SEGMEXEC
93303+ if (vma_m) {
93304+ *new_m = *vma_m;
93305+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
93306+ new_m->vm_mirror = new;
93307+ new->vm_mirror = new_m;
93308+
93309+ if (new_below)
93310+ new_m->vm_end = addr_m;
93311+ else {
93312+ new_m->vm_start = addr_m;
93313+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
93314+ }
93315+ }
93316+#endif
93317+
93318 err = vma_dup_policy(vma, new);
93319 if (err)
93320 goto out_free_vma;
93321@@ -2429,6 +2829,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
93322 else
93323 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
93324
93325+#ifdef CONFIG_PAX_SEGMEXEC
93326+ if (!err && vma_m) {
93327+ struct mempolicy *pol = vma_policy(new);
93328+
93329+ if (anon_vma_clone(new_m, vma_m))
93330+ goto out_free_mpol;
93331+
93332+ mpol_get(pol);
93333+ set_vma_policy(new_m, pol);
93334+
93335+ if (new_m->vm_file)
93336+ get_file(new_m->vm_file);
93337+
93338+ if (new_m->vm_ops && new_m->vm_ops->open)
93339+ new_m->vm_ops->open(new_m);
93340+
93341+ if (new_below)
93342+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
93343+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
93344+ else
93345+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
93346+
93347+ if (err) {
93348+ if (new_m->vm_ops && new_m->vm_ops->close)
93349+ new_m->vm_ops->close(new_m);
93350+ if (new_m->vm_file)
93351+ fput(new_m->vm_file);
93352+ mpol_put(pol);
93353+ }
93354+ }
93355+#endif
93356+
93357 /* Success. */
93358 if (!err)
93359 return 0;
93360@@ -2438,10 +2870,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
93361 new->vm_ops->close(new);
93362 if (new->vm_file)
93363 fput(new->vm_file);
93364- unlink_anon_vmas(new);
93365 out_free_mpol:
93366 mpol_put(vma_policy(new));
93367 out_free_vma:
93368+
93369+#ifdef CONFIG_PAX_SEGMEXEC
93370+ if (new_m) {
93371+ unlink_anon_vmas(new_m);
93372+ kmem_cache_free(vm_area_cachep, new_m);
93373+ }
93374+#endif
93375+
93376+ unlink_anon_vmas(new);
93377 kmem_cache_free(vm_area_cachep, new);
93378 out_err:
93379 return err;
93380@@ -2454,6 +2894,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
93381 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
93382 unsigned long addr, int new_below)
93383 {
93384+
93385+#ifdef CONFIG_PAX_SEGMEXEC
93386+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
93387+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
93388+ if (mm->map_count >= sysctl_max_map_count-1)
93389+ return -ENOMEM;
93390+ } else
93391+#endif
93392+
93393 if (mm->map_count >= sysctl_max_map_count)
93394 return -ENOMEM;
93395
93396@@ -2465,11 +2914,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
93397 * work. This now handles partial unmappings.
93398 * Jeremy Fitzhardinge <jeremy@goop.org>
93399 */
93400+#ifdef CONFIG_PAX_SEGMEXEC
93401 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
93402 {
93403+ int ret = __do_munmap(mm, start, len);
93404+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
93405+ return ret;
93406+
93407+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
93408+}
93409+
93410+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
93411+#else
93412+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
93413+#endif
93414+{
93415 unsigned long end;
93416 struct vm_area_struct *vma, *prev, *last;
93417
93418+ /*
93419+ * mm->mmap_sem is required to protect against another thread
93420+ * changing the mappings in case we sleep.
93421+ */
93422+ verify_mm_writelocked(mm);
93423+
93424 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
93425 return -EINVAL;
93426
93427@@ -2544,6 +3012,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
93428 /* Fix up all other VM information */
93429 remove_vma_list(mm, vma);
93430
93431+ track_exec_limit(mm, start, end, 0UL);
93432+
93433 return 0;
93434 }
93435
93436@@ -2552,6 +3022,13 @@ int vm_munmap(unsigned long start, size_t len)
93437 int ret;
93438 struct mm_struct *mm = current->mm;
93439
93440+
93441+#ifdef CONFIG_PAX_SEGMEXEC
93442+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
93443+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
93444+ return -EINVAL;
93445+#endif
93446+
93447 down_write(&mm->mmap_sem);
93448 ret = do_munmap(mm, start, len);
93449 up_write(&mm->mmap_sem);
93450@@ -2565,16 +3042,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
93451 return vm_munmap(addr, len);
93452 }
93453
93454-static inline void verify_mm_writelocked(struct mm_struct *mm)
93455-{
93456-#ifdef CONFIG_DEBUG_VM
93457- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
93458- WARN_ON(1);
93459- up_read(&mm->mmap_sem);
93460- }
93461-#endif
93462-}
93463-
93464 /*
93465 * this is really a simplified "do_mmap". it only handles
93466 * anonymous maps. eventually we may be able to do some
93467@@ -2588,6 +3055,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
93468 struct rb_node ** rb_link, * rb_parent;
93469 pgoff_t pgoff = addr >> PAGE_SHIFT;
93470 int error;
93471+ unsigned long charged;
93472
93473 len = PAGE_ALIGN(len);
93474 if (!len)
93475@@ -2595,16 +3063,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
93476
93477 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
93478
93479+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
93480+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
93481+ flags &= ~VM_EXEC;
93482+
93483+#ifdef CONFIG_PAX_MPROTECT
93484+ if (mm->pax_flags & MF_PAX_MPROTECT)
93485+ flags &= ~VM_MAYEXEC;
93486+#endif
93487+
93488+ }
93489+#endif
93490+
93491 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
93492 if (error & ~PAGE_MASK)
93493 return error;
93494
93495+ charged = len >> PAGE_SHIFT;
93496+
93497 /*
93498 * mlock MCL_FUTURE?
93499 */
93500 if (mm->def_flags & VM_LOCKED) {
93501 unsigned long locked, lock_limit;
93502- locked = len >> PAGE_SHIFT;
93503+ locked = charged;
93504 locked += mm->locked_vm;
93505 lock_limit = rlimit(RLIMIT_MEMLOCK);
93506 lock_limit >>= PAGE_SHIFT;
93507@@ -2621,21 +3103,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
93508 /*
93509 * Clear old maps. this also does some error checking for us
93510 */
93511- munmap_back:
93512 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
93513 if (do_munmap(mm, addr, len))
93514 return -ENOMEM;
93515- goto munmap_back;
93516+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
93517 }
93518
93519 /* Check against address space limits *after* clearing old maps... */
93520- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
93521+ if (!may_expand_vm(mm, charged))
93522 return -ENOMEM;
93523
93524 if (mm->map_count > sysctl_max_map_count)
93525 return -ENOMEM;
93526
93527- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
93528+ if (security_vm_enough_memory_mm(mm, charged))
93529 return -ENOMEM;
93530
93531 /* Can we just expand an old private anonymous mapping? */
93532@@ -2649,7 +3130,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
93533 */
93534 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
93535 if (!vma) {
93536- vm_unacct_memory(len >> PAGE_SHIFT);
93537+ vm_unacct_memory(charged);
93538 return -ENOMEM;
93539 }
93540
93541@@ -2663,10 +3144,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
93542 vma_link(mm, vma, prev, rb_link, rb_parent);
93543 out:
93544 perf_event_mmap(vma);
93545- mm->total_vm += len >> PAGE_SHIFT;
93546+ mm->total_vm += charged;
93547 if (flags & VM_LOCKED)
93548- mm->locked_vm += (len >> PAGE_SHIFT);
93549+ mm->locked_vm += charged;
93550 vma->vm_flags |= VM_SOFTDIRTY;
93551+ track_exec_limit(mm, addr, addr + len, flags);
93552 return addr;
93553 }
93554
93555@@ -2728,6 +3210,7 @@ void exit_mmap(struct mm_struct *mm)
93556 while (vma) {
93557 if (vma->vm_flags & VM_ACCOUNT)
93558 nr_accounted += vma_pages(vma);
93559+ vma->vm_mirror = NULL;
93560 vma = remove_vma(vma);
93561 }
93562 vm_unacct_memory(nr_accounted);
93563@@ -2745,6 +3228,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
93564 struct vm_area_struct *prev;
93565 struct rb_node **rb_link, *rb_parent;
93566
93567+#ifdef CONFIG_PAX_SEGMEXEC
93568+ struct vm_area_struct *vma_m = NULL;
93569+#endif
93570+
93571+ if (security_mmap_addr(vma->vm_start))
93572+ return -EPERM;
93573+
93574 /*
93575 * The vm_pgoff of a purely anonymous vma should be irrelevant
93576 * until its first write fault, when page's anon_vma and index
93577@@ -2768,7 +3258,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
93578 security_vm_enough_memory_mm(mm, vma_pages(vma)))
93579 return -ENOMEM;
93580
93581+#ifdef CONFIG_PAX_SEGMEXEC
93582+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
93583+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
93584+ if (!vma_m)
93585+ return -ENOMEM;
93586+ }
93587+#endif
93588+
93589 vma_link(mm, vma, prev, rb_link, rb_parent);
93590+
93591+#ifdef CONFIG_PAX_SEGMEXEC
93592+ if (vma_m)
93593+ BUG_ON(pax_mirror_vma(vma_m, vma));
93594+#endif
93595+
93596 return 0;
93597 }
93598
93599@@ -2787,6 +3291,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
93600 struct rb_node **rb_link, *rb_parent;
93601 bool faulted_in_anon_vma = true;
93602
93603+ BUG_ON(vma->vm_mirror);
93604+
93605 /*
93606 * If anonymous vma has not yet been faulted, update new pgoff
93607 * to match new location, to increase its chance of merging.
93608@@ -2851,6 +3357,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
93609 return NULL;
93610 }
93611
93612+#ifdef CONFIG_PAX_SEGMEXEC
93613+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
93614+{
93615+ struct vm_area_struct *prev_m;
93616+ struct rb_node **rb_link_m, *rb_parent_m;
93617+ struct mempolicy *pol_m;
93618+
93619+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
93620+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
93621+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
93622+ *vma_m = *vma;
93623+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
93624+ if (anon_vma_clone(vma_m, vma))
93625+ return -ENOMEM;
93626+ pol_m = vma_policy(vma_m);
93627+ mpol_get(pol_m);
93628+ set_vma_policy(vma_m, pol_m);
93629+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
93630+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
93631+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
93632+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
93633+ if (vma_m->vm_file)
93634+ get_file(vma_m->vm_file);
93635+ if (vma_m->vm_ops && vma_m->vm_ops->open)
93636+ vma_m->vm_ops->open(vma_m);
93637+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
93638+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
93639+ vma_m->vm_mirror = vma;
93640+ vma->vm_mirror = vma_m;
93641+ return 0;
93642+}
93643+#endif
93644+
93645 /*
93646 * Return true if the calling process may expand its vm space by the passed
93647 * number of pages
93648@@ -2862,6 +3401,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
93649
93650 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
93651
93652+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
93653 if (cur + npages > lim)
93654 return 0;
93655 return 1;
93656@@ -2932,6 +3472,22 @@ int install_special_mapping(struct mm_struct *mm,
93657 vma->vm_start = addr;
93658 vma->vm_end = addr + len;
93659
93660+#ifdef CONFIG_PAX_MPROTECT
93661+ if (mm->pax_flags & MF_PAX_MPROTECT) {
93662+#ifndef CONFIG_PAX_MPROTECT_COMPAT
93663+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
93664+ return -EPERM;
93665+ if (!(vm_flags & VM_EXEC))
93666+ vm_flags &= ~VM_MAYEXEC;
93667+#else
93668+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
93669+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
93670+#endif
93671+ else
93672+ vm_flags &= ~VM_MAYWRITE;
93673+ }
93674+#endif
93675+
93676 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
93677 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
93678
93679diff --git a/mm/mprotect.c b/mm/mprotect.c
93680index bb53a65..249c052 100644
93681--- a/mm/mprotect.c
93682+++ b/mm/mprotect.c
93683@@ -23,10 +23,18 @@
93684 #include <linux/mmu_notifier.h>
93685 #include <linux/migrate.h>
93686 #include <linux/perf_event.h>
93687+#include <linux/sched/sysctl.h>
93688+
93689+#ifdef CONFIG_PAX_MPROTECT
93690+#include <linux/elf.h>
93691+#include <linux/binfmts.h>
93692+#endif
93693+
93694 #include <asm/uaccess.h>
93695 #include <asm/pgtable.h>
93696 #include <asm/cacheflush.h>
93697 #include <asm/tlbflush.h>
93698+#include <asm/mmu_context.h>
93699
93700 #ifndef pgprot_modify
93701 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
93702@@ -222,6 +230,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
93703 return pages;
93704 }
93705
93706+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
93707+/* called while holding the mmap semaphor for writing except stack expansion */
93708+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
93709+{
93710+ unsigned long oldlimit, newlimit = 0UL;
93711+
93712+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
93713+ return;
93714+
93715+ spin_lock(&mm->page_table_lock);
93716+ oldlimit = mm->context.user_cs_limit;
93717+ if ((prot & VM_EXEC) && oldlimit < end)
93718+ /* USER_CS limit moved up */
93719+ newlimit = end;
93720+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
93721+ /* USER_CS limit moved down */
93722+ newlimit = start;
93723+
93724+ if (newlimit) {
93725+ mm->context.user_cs_limit = newlimit;
93726+
93727+#ifdef CONFIG_SMP
93728+ wmb();
93729+ cpus_clear(mm->context.cpu_user_cs_mask);
93730+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
93731+#endif
93732+
93733+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
93734+ }
93735+ spin_unlock(&mm->page_table_lock);
93736+ if (newlimit == end) {
93737+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
93738+
93739+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
93740+ if (is_vm_hugetlb_page(vma))
93741+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
93742+ else
93743+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
93744+ }
93745+}
93746+#endif
93747+
93748 int
93749 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
93750 unsigned long start, unsigned long end, unsigned long newflags)
93751@@ -234,11 +284,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
93752 int error;
93753 int dirty_accountable = 0;
93754
93755+#ifdef CONFIG_PAX_SEGMEXEC
93756+ struct vm_area_struct *vma_m = NULL;
93757+ unsigned long start_m, end_m;
93758+
93759+ start_m = start + SEGMEXEC_TASK_SIZE;
93760+ end_m = end + SEGMEXEC_TASK_SIZE;
93761+#endif
93762+
93763 if (newflags == oldflags) {
93764 *pprev = vma;
93765 return 0;
93766 }
93767
93768+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
93769+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
93770+
93771+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
93772+ return -ENOMEM;
93773+
93774+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
93775+ return -ENOMEM;
93776+ }
93777+
93778 /*
93779 * If we make a private mapping writable we increase our commit;
93780 * but (without finer accounting) cannot reduce our commit if we
93781@@ -255,6 +323,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
93782 }
93783 }
93784
93785+#ifdef CONFIG_PAX_SEGMEXEC
93786+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
93787+ if (start != vma->vm_start) {
93788+ error = split_vma(mm, vma, start, 1);
93789+ if (error)
93790+ goto fail;
93791+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
93792+ *pprev = (*pprev)->vm_next;
93793+ }
93794+
93795+ if (end != vma->vm_end) {
93796+ error = split_vma(mm, vma, end, 0);
93797+ if (error)
93798+ goto fail;
93799+ }
93800+
93801+ if (pax_find_mirror_vma(vma)) {
93802+ error = __do_munmap(mm, start_m, end_m - start_m);
93803+ if (error)
93804+ goto fail;
93805+ } else {
93806+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
93807+ if (!vma_m) {
93808+ error = -ENOMEM;
93809+ goto fail;
93810+ }
93811+ vma->vm_flags = newflags;
93812+ error = pax_mirror_vma(vma_m, vma);
93813+ if (error) {
93814+ vma->vm_flags = oldflags;
93815+ goto fail;
93816+ }
93817+ }
93818+ }
93819+#endif
93820+
93821 /*
93822 * First try to merge with previous and/or next vma.
93823 */
93824@@ -285,9 +389,21 @@ success:
93825 * vm_flags and vm_page_prot are protected by the mmap_sem
93826 * held in write mode.
93827 */
93828+
93829+#ifdef CONFIG_PAX_SEGMEXEC
93830+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
93831+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
93832+#endif
93833+
93834 vma->vm_flags = newflags;
93835+
93836+#ifdef CONFIG_PAX_MPROTECT
93837+ if (mm->binfmt && mm->binfmt->handle_mprotect)
93838+ mm->binfmt->handle_mprotect(vma, newflags);
93839+#endif
93840+
93841 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
93842- vm_get_page_prot(newflags));
93843+ vm_get_page_prot(vma->vm_flags));
93844
93845 if (vma_wants_writenotify(vma)) {
93846 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
93847@@ -326,6 +442,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93848 end = start + len;
93849 if (end <= start)
93850 return -ENOMEM;
93851+
93852+#ifdef CONFIG_PAX_SEGMEXEC
93853+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
93854+ if (end > SEGMEXEC_TASK_SIZE)
93855+ return -EINVAL;
93856+ } else
93857+#endif
93858+
93859+ if (end > TASK_SIZE)
93860+ return -EINVAL;
93861+
93862 if (!arch_validate_prot(prot))
93863 return -EINVAL;
93864
93865@@ -333,7 +460,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93866 /*
93867 * Does the application expect PROT_READ to imply PROT_EXEC:
93868 */
93869- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
93870+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
93871 prot |= PROT_EXEC;
93872
93873 vm_flags = calc_vm_prot_bits(prot);
93874@@ -365,6 +492,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93875 if (start > vma->vm_start)
93876 prev = vma;
93877
93878+#ifdef CONFIG_PAX_MPROTECT
93879+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
93880+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
93881+#endif
93882+
93883 for (nstart = start ; ; ) {
93884 unsigned long newflags;
93885
93886@@ -375,6 +507,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93887
93888 /* newflags >> 4 shift VM_MAY% in place of VM_% */
93889 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
93890+ if (prot & (PROT_WRITE | PROT_EXEC))
93891+ gr_log_rwxmprotect(vma);
93892+
93893+ error = -EACCES;
93894+ goto out;
93895+ }
93896+
93897+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
93898 error = -EACCES;
93899 goto out;
93900 }
93901@@ -389,6 +529,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
93902 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
93903 if (error)
93904 goto out;
93905+
93906+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
93907+
93908 nstart = tmp;
93909
93910 if (nstart < prev->vm_end)
93911diff --git a/mm/mremap.c b/mm/mremap.c
93912index 0843feb..4f5b2e6 100644
93913--- a/mm/mremap.c
93914+++ b/mm/mremap.c
93915@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
93916 continue;
93917 pte = ptep_get_and_clear(mm, old_addr, old_pte);
93918 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
93919+
93920+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
93921+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
93922+ pte = pte_exprotect(pte);
93923+#endif
93924+
93925 pte = move_soft_dirty_pte(pte);
93926 set_pte_at(mm, new_addr, new_pte, pte);
93927 }
93928@@ -337,6 +343,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
93929 if (is_vm_hugetlb_page(vma))
93930 goto Einval;
93931
93932+#ifdef CONFIG_PAX_SEGMEXEC
93933+ if (pax_find_mirror_vma(vma))
93934+ goto Einval;
93935+#endif
93936+
93937 /* We can't remap across vm area boundaries */
93938 if (old_len > vma->vm_end - addr)
93939 goto Efault;
93940@@ -392,20 +403,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
93941 unsigned long ret = -EINVAL;
93942 unsigned long charged = 0;
93943 unsigned long map_flags;
93944+ unsigned long pax_task_size = TASK_SIZE;
93945
93946 if (new_addr & ~PAGE_MASK)
93947 goto out;
93948
93949- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
93950+#ifdef CONFIG_PAX_SEGMEXEC
93951+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
93952+ pax_task_size = SEGMEXEC_TASK_SIZE;
93953+#endif
93954+
93955+ pax_task_size -= PAGE_SIZE;
93956+
93957+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
93958 goto out;
93959
93960 /* Check if the location we're moving into overlaps the
93961 * old location at all, and fail if it does.
93962 */
93963- if ((new_addr <= addr) && (new_addr+new_len) > addr)
93964- goto out;
93965-
93966- if ((addr <= new_addr) && (addr+old_len) > new_addr)
93967+ if (addr + old_len > new_addr && new_addr + new_len > addr)
93968 goto out;
93969
93970 ret = do_munmap(mm, new_addr, new_len);
93971@@ -474,6 +490,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
93972 unsigned long ret = -EINVAL;
93973 unsigned long charged = 0;
93974 bool locked = false;
93975+ unsigned long pax_task_size = TASK_SIZE;
93976
93977 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
93978 return ret;
93979@@ -495,6 +512,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
93980 if (!new_len)
93981 return ret;
93982
93983+#ifdef CONFIG_PAX_SEGMEXEC
93984+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
93985+ pax_task_size = SEGMEXEC_TASK_SIZE;
93986+#endif
93987+
93988+ pax_task_size -= PAGE_SIZE;
93989+
93990+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
93991+ old_len > pax_task_size || addr > pax_task_size-old_len)
93992+ return ret;
93993+
93994 down_write(&current->mm->mmap_sem);
93995
93996 if (flags & MREMAP_FIXED) {
93997@@ -545,6 +573,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
93998 new_addr = addr;
93999 }
94000 ret = addr;
94001+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
94002 goto out;
94003 }
94004 }
94005@@ -568,7 +597,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
94006 goto out;
94007 }
94008
94009+ map_flags = vma->vm_flags;
94010 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
94011+ if (!(ret & ~PAGE_MASK)) {
94012+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
94013+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
94014+ }
94015 }
94016 out:
94017 if (ret & ~PAGE_MASK)
94018diff --git a/mm/nommu.c b/mm/nommu.c
94019index fec093a..8162f74 100644
94020--- a/mm/nommu.c
94021+++ b/mm/nommu.c
94022@@ -64,7 +64,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
94023 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
94024 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
94025 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
94026-int heap_stack_gap = 0;
94027
94028 atomic_long_t mmap_pages_allocated;
94029
94030@@ -844,15 +843,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
94031 EXPORT_SYMBOL(find_vma);
94032
94033 /*
94034- * find a VMA
94035- * - we don't extend stack VMAs under NOMMU conditions
94036- */
94037-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
94038-{
94039- return find_vma(mm, addr);
94040-}
94041-
94042-/*
94043 * expand a stack to a given address
94044 * - not supported under NOMMU conditions
94045 */
94046@@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
94047
94048 /* most fields are the same, copy all, and then fixup */
94049 *new = *vma;
94050+ INIT_LIST_HEAD(&new->anon_vma_chain);
94051 *region = *vma->vm_region;
94052 new->vm_region = region;
94053
94054@@ -1992,8 +1983,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
94055 }
94056 EXPORT_SYMBOL(generic_file_remap_pages);
94057
94058-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
94059- unsigned long addr, void *buf, int len, int write)
94060+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
94061+ unsigned long addr, void *buf, size_t len, int write)
94062 {
94063 struct vm_area_struct *vma;
94064
94065@@ -2034,8 +2025,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
94066 *
94067 * The caller must hold a reference on @mm.
94068 */
94069-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
94070- void *buf, int len, int write)
94071+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
94072+ void *buf, size_t len, int write)
94073 {
94074 return __access_remote_vm(NULL, mm, addr, buf, len, write);
94075 }
94076@@ -2044,7 +2035,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
94077 * Access another process' address space.
94078 * - source/target buffer must be kernel space
94079 */
94080-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
94081+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
94082 {
94083 struct mm_struct *mm;
94084
94085diff --git a/mm/page-writeback.c b/mm/page-writeback.c
94086index 7106cb1..0805f48 100644
94087--- a/mm/page-writeback.c
94088+++ b/mm/page-writeback.c
94089@@ -685,7 +685,7 @@ static inline long long pos_ratio_polynom(unsigned long setpoint,
94090 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
94091 * - the bdi dirty thresh drops quickly due to change of JBOD workload
94092 */
94093-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
94094+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
94095 unsigned long thresh,
94096 unsigned long bg_thresh,
94097 unsigned long dirty,
94098diff --git a/mm/page_alloc.c b/mm/page_alloc.c
94099index 56f268d..4d35ec4 100644
94100--- a/mm/page_alloc.c
94101+++ b/mm/page_alloc.c
94102@@ -61,6 +61,7 @@
94103 #include <linux/page-debug-flags.h>
94104 #include <linux/hugetlb.h>
94105 #include <linux/sched/rt.h>
94106+#include <linux/random.h>
94107
94108 #include <asm/sections.h>
94109 #include <asm/tlbflush.h>
94110@@ -354,7 +355,7 @@ out:
94111 * This usage means that zero-order pages may not be compound.
94112 */
94113
94114-static void free_compound_page(struct page *page)
94115+void free_compound_page(struct page *page)
94116 {
94117 __free_pages_ok(page, compound_order(page));
94118 }
94119@@ -712,6 +713,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
94120 int i;
94121 int bad = 0;
94122
94123+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94124+ unsigned long index = 1UL << order;
94125+#endif
94126+
94127 trace_mm_page_free(page, order);
94128 kmemcheck_free_shadow(page, order);
94129
94130@@ -728,6 +733,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
94131 debug_check_no_obj_freed(page_address(page),
94132 PAGE_SIZE << order);
94133 }
94134+
94135+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94136+ for (; index; --index)
94137+ sanitize_highpage(page + index - 1);
94138+#endif
94139+
94140 arch_free_page(page, order);
94141 kernel_map_pages(page, 1 << order, 0);
94142
94143@@ -750,6 +761,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
94144 local_irq_restore(flags);
94145 }
94146
94147+#ifdef CONFIG_PAX_LATENT_ENTROPY
94148+bool __meminitdata extra_latent_entropy;
94149+
94150+static int __init setup_pax_extra_latent_entropy(char *str)
94151+{
94152+ extra_latent_entropy = true;
94153+ return 0;
94154+}
94155+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
94156+
94157+volatile u64 latent_entropy __latent_entropy;
94158+EXPORT_SYMBOL(latent_entropy);
94159+#endif
94160+
94161 void __init __free_pages_bootmem(struct page *page, unsigned int order)
94162 {
94163 unsigned int nr_pages = 1 << order;
94164@@ -765,6 +790,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
94165 __ClearPageReserved(p);
94166 set_page_count(p, 0);
94167
94168+#ifdef CONFIG_PAX_LATENT_ENTROPY
94169+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
94170+ u64 hash = 0;
94171+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
94172+ const u64 *data = lowmem_page_address(page);
94173+
94174+ for (index = 0; index < end; index++)
94175+ hash ^= hash + data[index];
94176+ latent_entropy ^= hash;
94177+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
94178+ }
94179+#endif
94180+
94181 page_zone(page)->managed_pages += nr_pages;
94182 set_page_refcounted(page);
94183 __free_pages(page, order);
94184@@ -870,8 +908,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
94185 arch_alloc_page(page, order);
94186 kernel_map_pages(page, 1 << order, 1);
94187
94188+#ifndef CONFIG_PAX_MEMORY_SANITIZE
94189 if (gfp_flags & __GFP_ZERO)
94190 prep_zero_page(page, order, gfp_flags);
94191+#endif
94192
94193 if (order && (gfp_flags & __GFP_COMP))
94194 prep_compound_page(page, order);
94195diff --git a/mm/page_io.c b/mm/page_io.c
94196index 8c79a47..a689e0d 100644
94197--- a/mm/page_io.c
94198+++ b/mm/page_io.c
94199@@ -260,7 +260,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
94200 struct file *swap_file = sis->swap_file;
94201 struct address_space *mapping = swap_file->f_mapping;
94202 struct iovec iov = {
94203- .iov_base = kmap(page),
94204+ .iov_base = (void __force_user *)kmap(page),
94205 .iov_len = PAGE_SIZE,
94206 };
94207
94208diff --git a/mm/percpu.c b/mm/percpu.c
94209index 0d10def..6dc822d 100644
94210--- a/mm/percpu.c
94211+++ b/mm/percpu.c
94212@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
94213 static unsigned int pcpu_high_unit_cpu __read_mostly;
94214
94215 /* the address of the first chunk which starts with the kernel static area */
94216-void *pcpu_base_addr __read_mostly;
94217+void *pcpu_base_addr __read_only;
94218 EXPORT_SYMBOL_GPL(pcpu_base_addr);
94219
94220 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
94221diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
94222index fd26d04..0cea1b0 100644
94223--- a/mm/process_vm_access.c
94224+++ b/mm/process_vm_access.c
94225@@ -13,6 +13,7 @@
94226 #include <linux/uio.h>
94227 #include <linux/sched.h>
94228 #include <linux/highmem.h>
94229+#include <linux/security.h>
94230 #include <linux/ptrace.h>
94231 #include <linux/slab.h>
94232 #include <linux/syscalls.h>
94233@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
94234 size_t iov_l_curr_offset = 0;
94235 ssize_t iov_len;
94236
94237+ return -ENOSYS; // PaX: until properly audited
94238+
94239 /*
94240 * Work out how many pages of struct pages we're going to need
94241 * when eventually calling get_user_pages
94242 */
94243 for (i = 0; i < riovcnt; i++) {
94244 iov_len = rvec[i].iov_len;
94245- if (iov_len > 0) {
94246- nr_pages_iov = ((unsigned long)rvec[i].iov_base
94247- + iov_len)
94248- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
94249- / PAGE_SIZE + 1;
94250- nr_pages = max(nr_pages, nr_pages_iov);
94251- }
94252+ if (iov_len <= 0)
94253+ continue;
94254+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
94255+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
94256+ nr_pages = max(nr_pages, nr_pages_iov);
94257 }
94258
94259 if (nr_pages == 0)
94260@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
94261 goto free_proc_pages;
94262 }
94263
94264+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
94265+ rc = -EPERM;
94266+ goto put_task_struct;
94267+ }
94268+
94269 mm = mm_access(task, PTRACE_MODE_ATTACH);
94270 if (!mm || IS_ERR(mm)) {
94271 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
94272diff --git a/mm/rmap.c b/mm/rmap.c
94273index 068522d..f539f21 100644
94274--- a/mm/rmap.c
94275+++ b/mm/rmap.c
94276@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
94277 struct anon_vma *anon_vma = vma->anon_vma;
94278 struct anon_vma_chain *avc;
94279
94280+#ifdef CONFIG_PAX_SEGMEXEC
94281+ struct anon_vma_chain *avc_m = NULL;
94282+#endif
94283+
94284 might_sleep();
94285 if (unlikely(!anon_vma)) {
94286 struct mm_struct *mm = vma->vm_mm;
94287@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
94288 if (!avc)
94289 goto out_enomem;
94290
94291+#ifdef CONFIG_PAX_SEGMEXEC
94292+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
94293+ if (!avc_m)
94294+ goto out_enomem_free_avc;
94295+#endif
94296+
94297 anon_vma = find_mergeable_anon_vma(vma);
94298 allocated = NULL;
94299 if (!anon_vma) {
94300@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
94301 /* page_table_lock to protect against threads */
94302 spin_lock(&mm->page_table_lock);
94303 if (likely(!vma->anon_vma)) {
94304+
94305+#ifdef CONFIG_PAX_SEGMEXEC
94306+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
94307+
94308+ if (vma_m) {
94309+ BUG_ON(vma_m->anon_vma);
94310+ vma_m->anon_vma = anon_vma;
94311+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
94312+ avc_m = NULL;
94313+ }
94314+#endif
94315+
94316 vma->anon_vma = anon_vma;
94317 anon_vma_chain_link(vma, avc, anon_vma);
94318 allocated = NULL;
94319@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
94320
94321 if (unlikely(allocated))
94322 put_anon_vma(allocated);
94323+
94324+#ifdef CONFIG_PAX_SEGMEXEC
94325+ if (unlikely(avc_m))
94326+ anon_vma_chain_free(avc_m);
94327+#endif
94328+
94329 if (unlikely(avc))
94330 anon_vma_chain_free(avc);
94331 }
94332 return 0;
94333
94334 out_enomem_free_avc:
94335+
94336+#ifdef CONFIG_PAX_SEGMEXEC
94337+ if (avc_m)
94338+ anon_vma_chain_free(avc_m);
94339+#endif
94340+
94341 anon_vma_chain_free(avc);
94342 out_enomem:
94343 return -ENOMEM;
94344@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
94345 * Attach the anon_vmas from src to dst.
94346 * Returns 0 on success, -ENOMEM on failure.
94347 */
94348-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
94349+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
94350 {
94351 struct anon_vma_chain *avc, *pavc;
94352 struct anon_vma *root = NULL;
94353@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
94354 * the corresponding VMA in the parent process is attached to.
94355 * Returns 0 on success, non-zero on failure.
94356 */
94357-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
94358+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
94359 {
94360 struct anon_vma_chain *avc;
94361 struct anon_vma *anon_vma;
94362@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
94363 void __init anon_vma_init(void)
94364 {
94365 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
94366- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
94367- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
94368+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
94369+ anon_vma_ctor);
94370+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
94371+ SLAB_PANIC|SLAB_NO_SANITIZE);
94372 }
94373
94374 /*
94375diff --git a/mm/shmem.c b/mm/shmem.c
94376index 902a148..58f9d59 100644
94377--- a/mm/shmem.c
94378+++ b/mm/shmem.c
94379@@ -33,7 +33,7 @@
94380 #include <linux/swap.h>
94381 #include <linux/aio.h>
94382
94383-static struct vfsmount *shm_mnt;
94384+struct vfsmount *shm_mnt;
94385
94386 #ifdef CONFIG_SHMEM
94387 /*
94388@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
94389 #define BOGO_DIRENT_SIZE 20
94390
94391 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
94392-#define SHORT_SYMLINK_LEN 128
94393+#define SHORT_SYMLINK_LEN 64
94394
94395 /*
94396 * shmem_fallocate and shmem_writepage communicate via inode->i_private
94397@@ -2232,6 +2232,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
94398 static int shmem_xattr_validate(const char *name)
94399 {
94400 struct { const char *prefix; size_t len; } arr[] = {
94401+
94402+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
94403+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
94404+#endif
94405+
94406 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
94407 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
94408 };
94409@@ -2287,6 +2292,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
94410 if (err)
94411 return err;
94412
94413+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
94414+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
94415+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
94416+ return -EOPNOTSUPP;
94417+ if (size > 8)
94418+ return -EINVAL;
94419+ }
94420+#endif
94421+
94422 return simple_xattr_set(&info->xattrs, name, value, size, flags);
94423 }
94424
94425@@ -2599,8 +2613,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
94426 int err = -ENOMEM;
94427
94428 /* Round up to L1_CACHE_BYTES to resist false sharing */
94429- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
94430- L1_CACHE_BYTES), GFP_KERNEL);
94431+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
94432 if (!sbinfo)
94433 return -ENOMEM;
94434
94435diff --git a/mm/slab.c b/mm/slab.c
94436index eb043bf..d82f5a8 100644
94437--- a/mm/slab.c
94438+++ b/mm/slab.c
94439@@ -300,10 +300,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
94440 if ((x)->max_freeable < i) \
94441 (x)->max_freeable = i; \
94442 } while (0)
94443-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
94444-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
94445-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
94446-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
94447+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
94448+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
94449+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
94450+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
94451+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
94452+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
94453 #else
94454 #define STATS_INC_ACTIVE(x) do { } while (0)
94455 #define STATS_DEC_ACTIVE(x) do { } while (0)
94456@@ -320,6 +322,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
94457 #define STATS_INC_ALLOCMISS(x) do { } while (0)
94458 #define STATS_INC_FREEHIT(x) do { } while (0)
94459 #define STATS_INC_FREEMISS(x) do { } while (0)
94460+#define STATS_INC_SANITIZED(x) do { } while (0)
94461+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
94462 #endif
94463
94464 #if DEBUG
94465@@ -403,7 +407,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
94466 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
94467 */
94468 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
94469- const struct page *page, void *obj)
94470+ const struct page *page, const void *obj)
94471 {
94472 u32 offset = (obj - page->s_mem);
94473 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
94474@@ -1489,12 +1493,12 @@ void __init kmem_cache_init(void)
94475 */
94476
94477 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
94478- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
94479+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
94480
94481 if (INDEX_AC != INDEX_NODE)
94482 kmalloc_caches[INDEX_NODE] =
94483 create_kmalloc_cache("kmalloc-node",
94484- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
94485+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
94486
94487 slab_early_init = 0;
94488
94489@@ -3428,6 +3432,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
94490 struct array_cache *ac = cpu_cache_get(cachep);
94491
94492 check_irq_off();
94493+
94494+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94495+ if (pax_sanitize_slab) {
94496+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
94497+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
94498+
94499+ if (cachep->ctor)
94500+ cachep->ctor(objp);
94501+
94502+ STATS_INC_SANITIZED(cachep);
94503+ } else
94504+ STATS_INC_NOT_SANITIZED(cachep);
94505+ }
94506+#endif
94507+
94508 kmemleak_free_recursive(objp, cachep->flags);
94509 objp = cache_free_debugcheck(cachep, objp, caller);
94510
94511@@ -3656,6 +3675,7 @@ void kfree(const void *objp)
94512
94513 if (unlikely(ZERO_OR_NULL_PTR(objp)))
94514 return;
94515+ VM_BUG_ON(!virt_addr_valid(objp));
94516 local_irq_save(flags);
94517 kfree_debugcheck(objp);
94518 c = virt_to_cache(objp);
94519@@ -4097,14 +4117,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
94520 }
94521 /* cpu stats */
94522 {
94523- unsigned long allochit = atomic_read(&cachep->allochit);
94524- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
94525- unsigned long freehit = atomic_read(&cachep->freehit);
94526- unsigned long freemiss = atomic_read(&cachep->freemiss);
94527+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
94528+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
94529+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
94530+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
94531
94532 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
94533 allochit, allocmiss, freehit, freemiss);
94534 }
94535+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94536+ {
94537+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
94538+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
94539+
94540+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
94541+ }
94542+#endif
94543 #endif
94544 }
94545
94546@@ -4334,13 +4362,69 @@ static const struct file_operations proc_slabstats_operations = {
94547 static int __init slab_proc_init(void)
94548 {
94549 #ifdef CONFIG_DEBUG_SLAB_LEAK
94550- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
94551+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
94552 #endif
94553 return 0;
94554 }
94555 module_init(slab_proc_init);
94556 #endif
94557
94558+bool is_usercopy_object(const void *ptr)
94559+{
94560+ struct page *page;
94561+ struct kmem_cache *cachep;
94562+
94563+ if (ZERO_OR_NULL_PTR(ptr))
94564+ return false;
94565+
94566+ if (!slab_is_available())
94567+ return false;
94568+
94569+ if (!virt_addr_valid(ptr))
94570+ return false;
94571+
94572+ page = virt_to_head_page(ptr);
94573+
94574+ if (!PageSlab(page))
94575+ return false;
94576+
94577+ cachep = page->slab_cache;
94578+ return cachep->flags & SLAB_USERCOPY;
94579+}
94580+
94581+#ifdef CONFIG_PAX_USERCOPY
94582+const char *check_heap_object(const void *ptr, unsigned long n)
94583+{
94584+ struct page *page;
94585+ struct kmem_cache *cachep;
94586+ unsigned int objnr;
94587+ unsigned long offset;
94588+
94589+ if (ZERO_OR_NULL_PTR(ptr))
94590+ return "<null>";
94591+
94592+ if (!virt_addr_valid(ptr))
94593+ return NULL;
94594+
94595+ page = virt_to_head_page(ptr);
94596+
94597+ if (!PageSlab(page))
94598+ return NULL;
94599+
94600+ cachep = page->slab_cache;
94601+ if (!(cachep->flags & SLAB_USERCOPY))
94602+ return cachep->name;
94603+
94604+ objnr = obj_to_index(cachep, page, ptr);
94605+ BUG_ON(objnr >= cachep->num);
94606+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
94607+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
94608+ return NULL;
94609+
94610+ return cachep->name;
94611+}
94612+#endif
94613+
94614 /**
94615 * ksize - get the actual amount of memory allocated for a given object
94616 * @objp: Pointer to the object
94617diff --git a/mm/slab.h b/mm/slab.h
94618index 0859c42..2f7b737 100644
94619--- a/mm/slab.h
94620+++ b/mm/slab.h
94621@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
94622 /* The slab cache that manages slab cache information */
94623 extern struct kmem_cache *kmem_cache;
94624
94625+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94626+#ifdef CONFIG_X86_64
94627+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
94628+#else
94629+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
94630+#endif
94631+extern bool pax_sanitize_slab;
94632+#endif
94633+
94634 unsigned long calculate_alignment(unsigned long flags,
94635 unsigned long align, unsigned long size);
94636
94637@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
94638
94639 /* Legal flag mask for kmem_cache_create(), for various configurations */
94640 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
94641- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
94642+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
94643+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
94644
94645 #if defined(CONFIG_DEBUG_SLAB)
94646 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
94647@@ -233,6 +243,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
94648 return s;
94649
94650 page = virt_to_head_page(x);
94651+
94652+ BUG_ON(!PageSlab(page));
94653+
94654 cachep = page->slab_cache;
94655 if (slab_equal_or_root(cachep, s))
94656 return cachep;
94657diff --git a/mm/slab_common.c b/mm/slab_common.c
94658index 0b7bb39..334c328 100644
94659--- a/mm/slab_common.c
94660+++ b/mm/slab_common.c
94661@@ -23,11 +23,22 @@
94662
94663 #include "slab.h"
94664
94665-enum slab_state slab_state;
94666+enum slab_state slab_state __read_only;
94667 LIST_HEAD(slab_caches);
94668 DEFINE_MUTEX(slab_mutex);
94669 struct kmem_cache *kmem_cache;
94670
94671+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94672+bool pax_sanitize_slab __read_only = true;
94673+static int __init pax_sanitize_slab_setup(char *str)
94674+{
94675+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
94676+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
94677+ return 1;
94678+}
94679+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
94680+#endif
94681+
94682 #ifdef CONFIG_DEBUG_VM
94683 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
94684 size_t size)
94685@@ -212,7 +223,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
94686
94687 err = __kmem_cache_create(s, flags);
94688 if (!err) {
94689- s->refcount = 1;
94690+ atomic_set(&s->refcount, 1);
94691 list_add(&s->list, &slab_caches);
94692 memcg_cache_list_add(memcg, s);
94693 } else {
94694@@ -258,8 +269,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
94695
94696 get_online_cpus();
94697 mutex_lock(&slab_mutex);
94698- s->refcount--;
94699- if (!s->refcount) {
94700+ if (atomic_dec_and_test(&s->refcount)) {
94701 list_del(&s->list);
94702
94703 if (!__kmem_cache_shutdown(s)) {
94704@@ -305,7 +315,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
94705 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
94706 name, size, err);
94707
94708- s->refcount = -1; /* Exempt from merging for now */
94709+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
94710 }
94711
94712 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
94713@@ -318,7 +328,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
94714
94715 create_boot_cache(s, name, size, flags);
94716 list_add(&s->list, &slab_caches);
94717- s->refcount = 1;
94718+ atomic_set(&s->refcount, 1);
94719 return s;
94720 }
94721
94722@@ -330,6 +340,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
94723 EXPORT_SYMBOL(kmalloc_dma_caches);
94724 #endif
94725
94726+#ifdef CONFIG_PAX_USERCOPY_SLABS
94727+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
94728+EXPORT_SYMBOL(kmalloc_usercopy_caches);
94729+#endif
94730+
94731 /*
94732 * Conversion table for small slabs sizes / 8 to the index in the
94733 * kmalloc array. This is necessary for slabs < 192 since we have non power
94734@@ -394,6 +409,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
94735 return kmalloc_dma_caches[index];
94736
94737 #endif
94738+
94739+#ifdef CONFIG_PAX_USERCOPY_SLABS
94740+ if (unlikely((flags & GFP_USERCOPY)))
94741+ return kmalloc_usercopy_caches[index];
94742+
94743+#endif
94744+
94745 return kmalloc_caches[index];
94746 }
94747
94748@@ -450,7 +472,7 @@ void __init create_kmalloc_caches(unsigned long flags)
94749 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
94750 if (!kmalloc_caches[i]) {
94751 kmalloc_caches[i] = create_kmalloc_cache(NULL,
94752- 1 << i, flags);
94753+ 1 << i, SLAB_USERCOPY | flags);
94754 }
94755
94756 /*
94757@@ -459,10 +481,10 @@ void __init create_kmalloc_caches(unsigned long flags)
94758 * earlier power of two caches
94759 */
94760 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
94761- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
94762+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
94763
94764 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
94765- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
94766+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
94767 }
94768
94769 /* Kmalloc array is now usable */
94770@@ -495,6 +517,23 @@ void __init create_kmalloc_caches(unsigned long flags)
94771 }
94772 }
94773 #endif
94774+
94775+#ifdef CONFIG_PAX_USERCOPY_SLABS
94776+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
94777+ struct kmem_cache *s = kmalloc_caches[i];
94778+
94779+ if (s) {
94780+ int size = kmalloc_size(i);
94781+ char *n = kasprintf(GFP_NOWAIT,
94782+ "usercopy-kmalloc-%d", size);
94783+
94784+ BUG_ON(!n);
94785+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
94786+ size, SLAB_USERCOPY | flags);
94787+ }
94788+ }
94789+#endif
94790+
94791 }
94792 #endif /* !CONFIG_SLOB */
94793
94794@@ -535,6 +574,9 @@ void print_slabinfo_header(struct seq_file *m)
94795 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
94796 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
94797 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
94798+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94799+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
94800+#endif
94801 #endif
94802 seq_putc(m, '\n');
94803 }
94804diff --git a/mm/slob.c b/mm/slob.c
94805index 4bf8809..98a6914 100644
94806--- a/mm/slob.c
94807+++ b/mm/slob.c
94808@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
94809 /*
94810 * Return the size of a slob block.
94811 */
94812-static slobidx_t slob_units(slob_t *s)
94813+static slobidx_t slob_units(const slob_t *s)
94814 {
94815 if (s->units > 0)
94816 return s->units;
94817@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
94818 /*
94819 * Return the next free slob block pointer after this one.
94820 */
94821-static slob_t *slob_next(slob_t *s)
94822+static slob_t *slob_next(const slob_t *s)
94823 {
94824 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
94825 slobidx_t next;
94826@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
94827 /*
94828 * Returns true if s is the last free block in its page.
94829 */
94830-static int slob_last(slob_t *s)
94831+static int slob_last(const slob_t *s)
94832 {
94833 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
94834 }
94835
94836-static void *slob_new_pages(gfp_t gfp, int order, int node)
94837+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
94838 {
94839- void *page;
94840+ struct page *page;
94841
94842 #ifdef CONFIG_NUMA
94843 if (node != NUMA_NO_NODE)
94844@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
94845 if (!page)
94846 return NULL;
94847
94848- return page_address(page);
94849+ __SetPageSlab(page);
94850+ return page;
94851 }
94852
94853-static void slob_free_pages(void *b, int order)
94854+static void slob_free_pages(struct page *sp, int order)
94855 {
94856 if (current->reclaim_state)
94857 current->reclaim_state->reclaimed_slab += 1 << order;
94858- free_pages((unsigned long)b, order);
94859+ __ClearPageSlab(sp);
94860+ page_mapcount_reset(sp);
94861+ sp->private = 0;
94862+ __free_pages(sp, order);
94863 }
94864
94865 /*
94866@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
94867
94868 /* Not enough space: must allocate a new page */
94869 if (!b) {
94870- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
94871- if (!b)
94872+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
94873+ if (!sp)
94874 return NULL;
94875- sp = virt_to_page(b);
94876- __SetPageSlab(sp);
94877+ b = page_address(sp);
94878
94879 spin_lock_irqsave(&slob_lock, flags);
94880 sp->units = SLOB_UNITS(PAGE_SIZE);
94881 sp->freelist = b;
94882+ sp->private = 0;
94883 INIT_LIST_HEAD(&sp->list);
94884 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
94885 set_slob_page_free(sp, slob_list);
94886@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
94887 if (slob_page_free(sp))
94888 clear_slob_page_free(sp);
94889 spin_unlock_irqrestore(&slob_lock, flags);
94890- __ClearPageSlab(sp);
94891- page_mapcount_reset(sp);
94892- slob_free_pages(b, 0);
94893+ slob_free_pages(sp, 0);
94894 return;
94895 }
94896
94897+#ifdef CONFIG_PAX_MEMORY_SANITIZE
94898+ if (pax_sanitize_slab)
94899+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
94900+#endif
94901+
94902 if (!slob_page_free(sp)) {
94903 /* This slob page is about to become partially free. Easy! */
94904 sp->units = units;
94905@@ -424,11 +431,10 @@ out:
94906 */
94907
94908 static __always_inline void *
94909-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
94910+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
94911 {
94912- unsigned int *m;
94913- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
94914- void *ret;
94915+ slob_t *m;
94916+ void *ret = NULL;
94917
94918 gfp &= gfp_allowed_mask;
94919
94920@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
94921
94922 if (!m)
94923 return NULL;
94924- *m = size;
94925+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
94926+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
94927+ m[0].units = size;
94928+ m[1].units = align;
94929 ret = (void *)m + align;
94930
94931 trace_kmalloc_node(caller, ret,
94932 size, size + align, gfp, node);
94933 } else {
94934 unsigned int order = get_order(size);
94935+ struct page *page;
94936
94937 if (likely(order))
94938 gfp |= __GFP_COMP;
94939- ret = slob_new_pages(gfp, order, node);
94940+ page = slob_new_pages(gfp, order, node);
94941+ if (page) {
94942+ ret = page_address(page);
94943+ page->private = size;
94944+ }
94945
94946 trace_kmalloc_node(caller, ret,
94947 size, PAGE_SIZE << order, gfp, node);
94948 }
94949
94950- kmemleak_alloc(ret, size, 1, gfp);
94951+ return ret;
94952+}
94953+
94954+static __always_inline void *
94955+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
94956+{
94957+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
94958+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
94959+
94960+ if (!ZERO_OR_NULL_PTR(ret))
94961+ kmemleak_alloc(ret, size, 1, gfp);
94962 return ret;
94963 }
94964
94965@@ -493,34 +517,112 @@ void kfree(const void *block)
94966 return;
94967 kmemleak_free(block);
94968
94969+ VM_BUG_ON(!virt_addr_valid(block));
94970 sp = virt_to_page(block);
94971- if (PageSlab(sp)) {
94972+ VM_BUG_ON(!PageSlab(sp));
94973+ if (!sp->private) {
94974 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
94975- unsigned int *m = (unsigned int *)(block - align);
94976- slob_free(m, *m + align);
94977- } else
94978+ slob_t *m = (slob_t *)(block - align);
94979+ slob_free(m, m[0].units + align);
94980+ } else {
94981+ __ClearPageSlab(sp);
94982+ page_mapcount_reset(sp);
94983+ sp->private = 0;
94984 __free_pages(sp, compound_order(sp));
94985+ }
94986 }
94987 EXPORT_SYMBOL(kfree);
94988
94989+bool is_usercopy_object(const void *ptr)
94990+{
94991+ if (!slab_is_available())
94992+ return false;
94993+
94994+ // PAX: TODO
94995+
94996+ return false;
94997+}
94998+
94999+#ifdef CONFIG_PAX_USERCOPY
95000+const char *check_heap_object(const void *ptr, unsigned long n)
95001+{
95002+ struct page *page;
95003+ const slob_t *free;
95004+ const void *base;
95005+ unsigned long flags;
95006+
95007+ if (ZERO_OR_NULL_PTR(ptr))
95008+ return "<null>";
95009+
95010+ if (!virt_addr_valid(ptr))
95011+ return NULL;
95012+
95013+ page = virt_to_head_page(ptr);
95014+ if (!PageSlab(page))
95015+ return NULL;
95016+
95017+ if (page->private) {
95018+ base = page;
95019+ if (base <= ptr && n <= page->private - (ptr - base))
95020+ return NULL;
95021+ return "<slob>";
95022+ }
95023+
95024+ /* some tricky double walking to find the chunk */
95025+ spin_lock_irqsave(&slob_lock, flags);
95026+ base = (void *)((unsigned long)ptr & PAGE_MASK);
95027+ free = page->freelist;
95028+
95029+ while (!slob_last(free) && (void *)free <= ptr) {
95030+ base = free + slob_units(free);
95031+ free = slob_next(free);
95032+ }
95033+
95034+ while (base < (void *)free) {
95035+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
95036+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
95037+ int offset;
95038+
95039+ if (ptr < base + align)
95040+ break;
95041+
95042+ offset = ptr - base - align;
95043+ if (offset >= m) {
95044+ base += size;
95045+ continue;
95046+ }
95047+
95048+ if (n > m - offset)
95049+ break;
95050+
95051+ spin_unlock_irqrestore(&slob_lock, flags);
95052+ return NULL;
95053+ }
95054+
95055+ spin_unlock_irqrestore(&slob_lock, flags);
95056+ return "<slob>";
95057+}
95058+#endif
95059+
95060 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
95061 size_t ksize(const void *block)
95062 {
95063 struct page *sp;
95064 int align;
95065- unsigned int *m;
95066+ slob_t *m;
95067
95068 BUG_ON(!block);
95069 if (unlikely(block == ZERO_SIZE_PTR))
95070 return 0;
95071
95072 sp = virt_to_page(block);
95073- if (unlikely(!PageSlab(sp)))
95074- return PAGE_SIZE << compound_order(sp);
95075+ VM_BUG_ON(!PageSlab(sp));
95076+ if (sp->private)
95077+ return sp->private;
95078
95079 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
95080- m = (unsigned int *)(block - align);
95081- return SLOB_UNITS(*m) * SLOB_UNIT;
95082+ m = (slob_t *)(block - align);
95083+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
95084 }
95085 EXPORT_SYMBOL(ksize);
95086
95087@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
95088
95089 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
95090 {
95091- void *b;
95092+ void *b = NULL;
95093
95094 flags &= gfp_allowed_mask;
95095
95096 lockdep_trace_alloc(flags);
95097
95098+#ifdef CONFIG_PAX_USERCOPY_SLABS
95099+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
95100+#else
95101 if (c->size < PAGE_SIZE) {
95102 b = slob_alloc(c->size, flags, c->align, node);
95103 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
95104 SLOB_UNITS(c->size) * SLOB_UNIT,
95105 flags, node);
95106 } else {
95107- b = slob_new_pages(flags, get_order(c->size), node);
95108+ struct page *sp;
95109+
95110+ sp = slob_new_pages(flags, get_order(c->size), node);
95111+ if (sp) {
95112+ b = page_address(sp);
95113+ sp->private = c->size;
95114+ }
95115 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
95116 PAGE_SIZE << get_order(c->size),
95117 flags, node);
95118 }
95119+#endif
95120
95121 if (b && c->ctor)
95122 c->ctor(b);
95123@@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
95124
95125 static void __kmem_cache_free(void *b, int size)
95126 {
95127- if (size < PAGE_SIZE)
95128+ struct page *sp;
95129+
95130+ sp = virt_to_page(b);
95131+ BUG_ON(!PageSlab(sp));
95132+ if (!sp->private)
95133 slob_free(b, size);
95134 else
95135- slob_free_pages(b, get_order(size));
95136+ slob_free_pages(sp, get_order(size));
95137 }
95138
95139 static void kmem_rcu_free(struct rcu_head *head)
95140@@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
95141
95142 void kmem_cache_free(struct kmem_cache *c, void *b)
95143 {
95144+ int size = c->size;
95145+
95146+#ifdef CONFIG_PAX_USERCOPY_SLABS
95147+ if (size + c->align < PAGE_SIZE) {
95148+ size += c->align;
95149+ b -= c->align;
95150+ }
95151+#endif
95152+
95153 kmemleak_free_recursive(b, c->flags);
95154 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
95155 struct slob_rcu *slob_rcu;
95156- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
95157- slob_rcu->size = c->size;
95158+ slob_rcu = b + (size - sizeof(struct slob_rcu));
95159+ slob_rcu->size = size;
95160 call_rcu(&slob_rcu->head, kmem_rcu_free);
95161 } else {
95162- __kmem_cache_free(b, c->size);
95163+ __kmem_cache_free(b, size);
95164 }
95165
95166+#ifdef CONFIG_PAX_USERCOPY_SLABS
95167+ trace_kfree(_RET_IP_, b);
95168+#else
95169 trace_kmem_cache_free(_RET_IP_, b);
95170+#endif
95171+
95172 }
95173 EXPORT_SYMBOL(kmem_cache_free);
95174
95175diff --git a/mm/slub.c b/mm/slub.c
95176index 89490d9..c7b226a 100644
95177--- a/mm/slub.c
95178+++ b/mm/slub.c
95179@@ -207,7 +207,7 @@ struct track {
95180
95181 enum track_item { TRACK_ALLOC, TRACK_FREE };
95182
95183-#ifdef CONFIG_SYSFS
95184+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
95185 static int sysfs_slab_add(struct kmem_cache *);
95186 static int sysfs_slab_alias(struct kmem_cache *, const char *);
95187 static void sysfs_slab_remove(struct kmem_cache *);
95188@@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
95189 if (!t->addr)
95190 return;
95191
95192- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
95193+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
95194 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
95195 #ifdef CONFIG_STACKTRACE
95196 {
95197@@ -2643,6 +2643,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
95198
95199 slab_free_hook(s, x);
95200
95201+#ifdef CONFIG_PAX_MEMORY_SANITIZE
95202+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
95203+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
95204+ if (s->ctor)
95205+ s->ctor(x);
95206+ }
95207+#endif
95208+
95209 redo:
95210 /*
95211 * Determine the currently cpus per cpu slab.
95212@@ -2710,7 +2718,7 @@ static int slub_min_objects;
95213 * Merge control. If this is set then no merging of slab caches will occur.
95214 * (Could be removed. This was introduced to pacify the merge skeptics.)
95215 */
95216-static int slub_nomerge;
95217+static int slub_nomerge = 1;
95218
95219 /*
95220 * Calculate the order of allocation given an slab object size.
95221@@ -2987,6 +2995,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
95222 s->inuse = size;
95223
95224 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
95225+#ifdef CONFIG_PAX_MEMORY_SANITIZE
95226+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
95227+#endif
95228 s->ctor)) {
95229 /*
95230 * Relocate free pointer after the object if it is not
95231@@ -3332,6 +3343,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
95232 EXPORT_SYMBOL(__kmalloc_node);
95233 #endif
95234
95235+bool is_usercopy_object(const void *ptr)
95236+{
95237+ struct page *page;
95238+ struct kmem_cache *s;
95239+
95240+ if (ZERO_OR_NULL_PTR(ptr))
95241+ return false;
95242+
95243+ if (!slab_is_available())
95244+ return false;
95245+
95246+ if (!virt_addr_valid(ptr))
95247+ return false;
95248+
95249+ page = virt_to_head_page(ptr);
95250+
95251+ if (!PageSlab(page))
95252+ return false;
95253+
95254+ s = page->slab_cache;
95255+ return s->flags & SLAB_USERCOPY;
95256+}
95257+
95258+#ifdef CONFIG_PAX_USERCOPY
95259+const char *check_heap_object(const void *ptr, unsigned long n)
95260+{
95261+ struct page *page;
95262+ struct kmem_cache *s;
95263+ unsigned long offset;
95264+
95265+ if (ZERO_OR_NULL_PTR(ptr))
95266+ return "<null>";
95267+
95268+ if (!virt_addr_valid(ptr))
95269+ return NULL;
95270+
95271+ page = virt_to_head_page(ptr);
95272+
95273+ if (!PageSlab(page))
95274+ return NULL;
95275+
95276+ s = page->slab_cache;
95277+ if (!(s->flags & SLAB_USERCOPY))
95278+ return s->name;
95279+
95280+ offset = (ptr - page_address(page)) % s->size;
95281+ if (offset <= s->object_size && n <= s->object_size - offset)
95282+ return NULL;
95283+
95284+ return s->name;
95285+}
95286+#endif
95287+
95288 size_t ksize(const void *object)
95289 {
95290 struct page *page;
95291@@ -3360,6 +3424,7 @@ void kfree(const void *x)
95292 if (unlikely(ZERO_OR_NULL_PTR(x)))
95293 return;
95294
95295+ VM_BUG_ON(!virt_addr_valid(x));
95296 page = virt_to_head_page(x);
95297 if (unlikely(!PageSlab(page))) {
95298 BUG_ON(!PageCompound(page));
95299@@ -3665,7 +3730,7 @@ static int slab_unmergeable(struct kmem_cache *s)
95300 /*
95301 * We may have set a slab to be unmergeable during bootstrap.
95302 */
95303- if (s->refcount < 0)
95304+ if (atomic_read(&s->refcount) < 0)
95305 return 1;
95306
95307 return 0;
95308@@ -3723,7 +3788,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
95309
95310 s = find_mergeable(memcg, size, align, flags, name, ctor);
95311 if (s) {
95312- s->refcount++;
95313+ atomic_inc(&s->refcount);
95314 /*
95315 * Adjust the object sizes so that we clear
95316 * the complete object on kzalloc.
95317@@ -3732,7 +3797,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
95318 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
95319
95320 if (sysfs_slab_alias(s, name)) {
95321- s->refcount--;
95322+ atomic_dec(&s->refcount);
95323 s = NULL;
95324 }
95325 }
95326@@ -3852,7 +3917,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
95327 }
95328 #endif
95329
95330-#ifdef CONFIG_SYSFS
95331+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
95332 static int count_inuse(struct page *page)
95333 {
95334 return page->inuse;
95335@@ -4241,12 +4306,12 @@ static void resiliency_test(void)
95336 validate_slab_cache(kmalloc_caches[9]);
95337 }
95338 #else
95339-#ifdef CONFIG_SYSFS
95340+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
95341 static void resiliency_test(void) {};
95342 #endif
95343 #endif
95344
95345-#ifdef CONFIG_SYSFS
95346+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
95347 enum slab_stat_type {
95348 SL_ALL, /* All slabs */
95349 SL_PARTIAL, /* Only partially allocated slabs */
95350@@ -4492,7 +4557,7 @@ SLAB_ATTR_RO(ctor);
95351
95352 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
95353 {
95354- return sprintf(buf, "%d\n", s->refcount - 1);
95355+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
95356 }
95357 SLAB_ATTR_RO(aliases);
95358
95359@@ -4580,6 +4645,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
95360 SLAB_ATTR_RO(cache_dma);
95361 #endif
95362
95363+#ifdef CONFIG_PAX_USERCOPY_SLABS
95364+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
95365+{
95366+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
95367+}
95368+SLAB_ATTR_RO(usercopy);
95369+#endif
95370+
95371 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
95372 {
95373 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
95374@@ -4914,6 +4987,9 @@ static struct attribute *slab_attrs[] = {
95375 #ifdef CONFIG_ZONE_DMA
95376 &cache_dma_attr.attr,
95377 #endif
95378+#ifdef CONFIG_PAX_USERCOPY_SLABS
95379+ &usercopy_attr.attr,
95380+#endif
95381 #ifdef CONFIG_NUMA
95382 &remote_node_defrag_ratio_attr.attr,
95383 #endif
95384@@ -5146,6 +5222,7 @@ static char *create_unique_id(struct kmem_cache *s)
95385 return name;
95386 }
95387
95388+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
95389 static int sysfs_slab_add(struct kmem_cache *s)
95390 {
95391 int err;
95392@@ -5169,7 +5246,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
95393 }
95394
95395 s->kobj.kset = slab_kset;
95396- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
95397+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
95398 if (err) {
95399 kobject_put(&s->kobj);
95400 return err;
95401@@ -5203,6 +5280,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
95402 kobject_del(&s->kobj);
95403 kobject_put(&s->kobj);
95404 }
95405+#endif
95406
95407 /*
95408 * Need to buffer aliases during bootup until sysfs becomes
95409@@ -5216,6 +5294,7 @@ struct saved_alias {
95410
95411 static struct saved_alias *alias_list;
95412
95413+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
95414 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
95415 {
95416 struct saved_alias *al;
95417@@ -5238,6 +5317,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
95418 alias_list = al;
95419 return 0;
95420 }
95421+#endif
95422
95423 static int __init slab_sysfs_init(void)
95424 {
95425diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
95426index 27eeab3..7c3f7f2 100644
95427--- a/mm/sparse-vmemmap.c
95428+++ b/mm/sparse-vmemmap.c
95429@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
95430 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
95431 if (!p)
95432 return NULL;
95433- pud_populate(&init_mm, pud, p);
95434+ pud_populate_kernel(&init_mm, pud, p);
95435 }
95436 return pud;
95437 }
95438@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
95439 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
95440 if (!p)
95441 return NULL;
95442- pgd_populate(&init_mm, pgd, p);
95443+ pgd_populate_kernel(&init_mm, pgd, p);
95444 }
95445 return pgd;
95446 }
95447diff --git a/mm/sparse.c b/mm/sparse.c
95448index 8cc7be0..d0f7d7a 100644
95449--- a/mm/sparse.c
95450+++ b/mm/sparse.c
95451@@ -745,7 +745,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
95452
95453 for (i = 0; i < PAGES_PER_SECTION; i++) {
95454 if (PageHWPoison(&memmap[i])) {
95455- atomic_long_sub(1, &num_poisoned_pages);
95456+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
95457 ClearPageHWPoison(&memmap[i]);
95458 }
95459 }
95460diff --git a/mm/swap.c b/mm/swap.c
95461index 84b26aa..ce39899 100644
95462--- a/mm/swap.c
95463+++ b/mm/swap.c
95464@@ -77,6 +77,8 @@ static void __put_compound_page(struct page *page)
95465
95466 __page_cache_release(page);
95467 dtor = get_compound_page_dtor(page);
95468+ if (!PageHuge(page))
95469+ BUG_ON(dtor != free_compound_page);
95470 (*dtor)(page);
95471 }
95472
95473diff --git a/mm/swapfile.c b/mm/swapfile.c
95474index 461fce2..363ae44 100644
95475--- a/mm/swapfile.c
95476+++ b/mm/swapfile.c
95477@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
95478
95479 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
95480 /* Activity counter to indicate that a swapon or swapoff has occurred */
95481-static atomic_t proc_poll_event = ATOMIC_INIT(0);
95482+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
95483
95484 static inline unsigned char swap_count(unsigned char ent)
95485 {
95486@@ -1958,7 +1958,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
95487 spin_unlock(&swap_lock);
95488
95489 err = 0;
95490- atomic_inc(&proc_poll_event);
95491+ atomic_inc_unchecked(&proc_poll_event);
95492 wake_up_interruptible(&proc_poll_wait);
95493
95494 out_dput:
95495@@ -1975,8 +1975,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
95496
95497 poll_wait(file, &proc_poll_wait, wait);
95498
95499- if (seq->poll_event != atomic_read(&proc_poll_event)) {
95500- seq->poll_event = atomic_read(&proc_poll_event);
95501+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
95502+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
95503 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
95504 }
95505
95506@@ -2074,7 +2074,7 @@ static int swaps_open(struct inode *inode, struct file *file)
95507 return ret;
95508
95509 seq = file->private_data;
95510- seq->poll_event = atomic_read(&proc_poll_event);
95511+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
95512 return 0;
95513 }
95514
95515@@ -2533,7 +2533,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
95516 (frontswap_map) ? "FS" : "");
95517
95518 mutex_unlock(&swapon_mutex);
95519- atomic_inc(&proc_poll_event);
95520+ atomic_inc_unchecked(&proc_poll_event);
95521 wake_up_interruptible(&proc_poll_wait);
95522
95523 if (S_ISREG(inode->i_mode))
95524diff --git a/mm/util.c b/mm/util.c
95525index 808f375..e4764b5 100644
95526--- a/mm/util.c
95527+++ b/mm/util.c
95528@@ -297,6 +297,12 @@ done:
95529 void arch_pick_mmap_layout(struct mm_struct *mm)
95530 {
95531 mm->mmap_base = TASK_UNMAPPED_BASE;
95532+
95533+#ifdef CONFIG_PAX_RANDMMAP
95534+ if (mm->pax_flags & MF_PAX_RANDMMAP)
95535+ mm->mmap_base += mm->delta_mmap;
95536+#endif
95537+
95538 mm->get_unmapped_area = arch_get_unmapped_area;
95539 }
95540 #endif
95541diff --git a/mm/vmalloc.c b/mm/vmalloc.c
95542index 0fdf968..d6686e8 100644
95543--- a/mm/vmalloc.c
95544+++ b/mm/vmalloc.c
95545@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
95546
95547 pte = pte_offset_kernel(pmd, addr);
95548 do {
95549- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
95550- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
95551+
95552+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
95553+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
95554+ BUG_ON(!pte_exec(*pte));
95555+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
95556+ continue;
95557+ }
95558+#endif
95559+
95560+ {
95561+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
95562+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
95563+ }
95564 } while (pte++, addr += PAGE_SIZE, addr != end);
95565 }
95566
95567@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
95568 pte = pte_alloc_kernel(pmd, addr);
95569 if (!pte)
95570 return -ENOMEM;
95571+
95572+ pax_open_kernel();
95573 do {
95574 struct page *page = pages[*nr];
95575
95576- if (WARN_ON(!pte_none(*pte)))
95577+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
95578+ if (pgprot_val(prot) & _PAGE_NX)
95579+#endif
95580+
95581+ if (!pte_none(*pte)) {
95582+ pax_close_kernel();
95583+ WARN_ON(1);
95584 return -EBUSY;
95585- if (WARN_ON(!page))
95586+ }
95587+ if (!page) {
95588+ pax_close_kernel();
95589+ WARN_ON(1);
95590 return -ENOMEM;
95591+ }
95592 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
95593 (*nr)++;
95594 } while (pte++, addr += PAGE_SIZE, addr != end);
95595+ pax_close_kernel();
95596 return 0;
95597 }
95598
95599@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
95600 pmd_t *pmd;
95601 unsigned long next;
95602
95603- pmd = pmd_alloc(&init_mm, pud, addr);
95604+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
95605 if (!pmd)
95606 return -ENOMEM;
95607 do {
95608@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
95609 pud_t *pud;
95610 unsigned long next;
95611
95612- pud = pud_alloc(&init_mm, pgd, addr);
95613+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
95614 if (!pud)
95615 return -ENOMEM;
95616 do {
95617@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
95618 if (addr >= MODULES_VADDR && addr < MODULES_END)
95619 return 1;
95620 #endif
95621+
95622+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
95623+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
95624+ return 1;
95625+#endif
95626+
95627 return is_vmalloc_addr(x);
95628 }
95629
95630@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
95631
95632 if (!pgd_none(*pgd)) {
95633 pud_t *pud = pud_offset(pgd, addr);
95634+#ifdef CONFIG_X86
95635+ if (!pud_large(*pud))
95636+#endif
95637 if (!pud_none(*pud)) {
95638 pmd_t *pmd = pmd_offset(pud, addr);
95639+#ifdef CONFIG_X86
95640+ if (!pmd_large(*pmd))
95641+#endif
95642 if (!pmd_none(*pmd)) {
95643 pte_t *ptep, pte;
95644
95645@@ -1309,6 +1345,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
95646 struct vm_struct *area;
95647
95648 BUG_ON(in_interrupt());
95649+
95650+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
95651+ if (flags & VM_KERNEXEC) {
95652+ if (start != VMALLOC_START || end != VMALLOC_END)
95653+ return NULL;
95654+ start = (unsigned long)MODULES_EXEC_VADDR;
95655+ end = (unsigned long)MODULES_EXEC_END;
95656+ }
95657+#endif
95658+
95659 if (flags & VM_IOREMAP)
95660 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
95661
95662@@ -1534,6 +1580,11 @@ void *vmap(struct page **pages, unsigned int count,
95663 if (count > totalram_pages)
95664 return NULL;
95665
95666+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
95667+ if (!(pgprot_val(prot) & _PAGE_NX))
95668+ flags |= VM_KERNEXEC;
95669+#endif
95670+
95671 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
95672 __builtin_return_address(0));
95673 if (!area)
95674@@ -1634,6 +1685,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
95675 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
95676 goto fail;
95677
95678+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
95679+ if (!(pgprot_val(prot) & _PAGE_NX))
95680+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
95681+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
95682+ else
95683+#endif
95684+
95685 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
95686 start, end, node, gfp_mask, caller);
95687 if (!area)
95688@@ -1810,10 +1868,9 @@ EXPORT_SYMBOL(vzalloc_node);
95689 * For tight control over page level allocator and protection flags
95690 * use __vmalloc() instead.
95691 */
95692-
95693 void *vmalloc_exec(unsigned long size)
95694 {
95695- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
95696+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
95697 NUMA_NO_NODE, __builtin_return_address(0));
95698 }
95699
95700@@ -2120,6 +2177,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
95701 {
95702 struct vm_struct *area;
95703
95704+ BUG_ON(vma->vm_mirror);
95705+
95706 size = PAGE_ALIGN(size);
95707
95708 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
95709@@ -2602,7 +2661,11 @@ static int s_show(struct seq_file *m, void *p)
95710 v->addr, v->addr + v->size, v->size);
95711
95712 if (v->caller)
95713+#ifdef CONFIG_GRKERNSEC_HIDESYM
95714+ seq_printf(m, " %pK", v->caller);
95715+#else
95716 seq_printf(m, " %pS", v->caller);
95717+#endif
95718
95719 if (v->nr_pages)
95720 seq_printf(m, " pages=%d", v->nr_pages);
95721diff --git a/mm/vmstat.c b/mm/vmstat.c
95722index 7249614..2639fc7 100644
95723--- a/mm/vmstat.c
95724+++ b/mm/vmstat.c
95725@@ -20,6 +20,7 @@
95726 #include <linux/writeback.h>
95727 #include <linux/compaction.h>
95728 #include <linux/mm_inline.h>
95729+#include <linux/grsecurity.h>
95730
95731 #include "internal.h"
95732
95733@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
95734 *
95735 * vm_stat contains the global counters
95736 */
95737-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
95738+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
95739 EXPORT_SYMBOL(vm_stat);
95740
95741 #ifdef CONFIG_SMP
95742@@ -423,7 +424,7 @@ static inline void fold_diff(int *diff)
95743
95744 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
95745 if (diff[i])
95746- atomic_long_add(diff[i], &vm_stat[i]);
95747+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
95748 }
95749
95750 /*
95751@@ -455,7 +456,7 @@ static void refresh_cpu_vm_stats(void)
95752 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
95753 if (v) {
95754
95755- atomic_long_add(v, &zone->vm_stat[i]);
95756+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
95757 global_diff[i] += v;
95758 #ifdef CONFIG_NUMA
95759 /* 3 seconds idle till flush */
95760@@ -517,7 +518,7 @@ void cpu_vm_stats_fold(int cpu)
95761
95762 v = p->vm_stat_diff[i];
95763 p->vm_stat_diff[i] = 0;
95764- atomic_long_add(v, &zone->vm_stat[i]);
95765+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
95766 global_diff[i] += v;
95767 }
95768 }
95769@@ -537,8 +538,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
95770 if (pset->vm_stat_diff[i]) {
95771 int v = pset->vm_stat_diff[i];
95772 pset->vm_stat_diff[i] = 0;
95773- atomic_long_add(v, &zone->vm_stat[i]);
95774- atomic_long_add(v, &vm_stat[i]);
95775+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
95776+ atomic_long_add_unchecked(v, &vm_stat[i]);
95777 }
95778 }
95779 #endif
95780@@ -1148,10 +1149,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
95781 stat_items_size += sizeof(struct vm_event_state);
95782 #endif
95783
95784- v = kmalloc(stat_items_size, GFP_KERNEL);
95785+ v = kzalloc(stat_items_size, GFP_KERNEL);
95786 m->private = v;
95787 if (!v)
95788 return ERR_PTR(-ENOMEM);
95789+
95790+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95791+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
95792+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
95793+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
95794+ && !in_group_p(grsec_proc_gid)
95795+#endif
95796+ )
95797+ return (unsigned long *)m->private + *pos;
95798+#endif
95799+#endif
95800+
95801 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
95802 v[i] = global_page_state(i);
95803 v += NR_VM_ZONE_STAT_ITEMS;
95804@@ -1300,10 +1313,16 @@ static int __init setup_vmstat(void)
95805 put_online_cpus();
95806 #endif
95807 #ifdef CONFIG_PROC_FS
95808- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
95809- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
95810- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
95811- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
95812+ {
95813+ mode_t gr_mode = S_IRUGO;
95814+#ifdef CONFIG_GRKERNSEC_PROC_ADD
95815+ gr_mode = S_IRUSR;
95816+#endif
95817+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
95818+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
95819+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
95820+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
95821+ }
95822 #endif
95823 return 0;
95824 }
95825diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
95826index b3d17d1..e8e4cdd 100644
95827--- a/net/8021q/vlan.c
95828+++ b/net/8021q/vlan.c
95829@@ -472,7 +472,7 @@ out:
95830 return NOTIFY_DONE;
95831 }
95832
95833-static struct notifier_block vlan_notifier_block __read_mostly = {
95834+static struct notifier_block vlan_notifier_block = {
95835 .notifier_call = vlan_device_event,
95836 };
95837
95838@@ -547,8 +547,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
95839 err = -EPERM;
95840 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
95841 break;
95842- if ((args.u.name_type >= 0) &&
95843- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
95844+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
95845 struct vlan_net *vn;
95846
95847 vn = net_generic(net, vlan_net_id);
95848diff --git a/net/9p/client.c b/net/9p/client.c
95849index ee8fd6b..0469d50 100644
95850--- a/net/9p/client.c
95851+++ b/net/9p/client.c
95852@@ -588,7 +588,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
95853 len - inline_len);
95854 } else {
95855 err = copy_from_user(ename + inline_len,
95856- uidata, len - inline_len);
95857+ (char __force_user *)uidata, len - inline_len);
95858 if (err) {
95859 err = -EFAULT;
95860 goto out_err;
95861@@ -1563,7 +1563,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
95862 kernel_buf = 1;
95863 indata = data;
95864 } else
95865- indata = (__force char *)udata;
95866+ indata = (__force_kernel char *)udata;
95867 /*
95868 * response header len is 11
95869 * PDU Header(7) + IO Size (4)
95870@@ -1638,7 +1638,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
95871 kernel_buf = 1;
95872 odata = data;
95873 } else
95874- odata = (char *)udata;
95875+ odata = (char __force_kernel *)udata;
95876 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
95877 P9_ZC_HDR_SZ, kernel_buf, "dqd",
95878 fid->fid, offset, rsize);
95879diff --git a/net/9p/mod.c b/net/9p/mod.c
95880index 6ab36ae..6f1841b 100644
95881--- a/net/9p/mod.c
95882+++ b/net/9p/mod.c
95883@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
95884 void v9fs_register_trans(struct p9_trans_module *m)
95885 {
95886 spin_lock(&v9fs_trans_lock);
95887- list_add_tail(&m->list, &v9fs_trans_list);
95888+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
95889 spin_unlock(&v9fs_trans_lock);
95890 }
95891 EXPORT_SYMBOL(v9fs_register_trans);
95892@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
95893 void v9fs_unregister_trans(struct p9_trans_module *m)
95894 {
95895 spin_lock(&v9fs_trans_lock);
95896- list_del_init(&m->list);
95897+ pax_list_del_init((struct list_head *)&m->list);
95898 spin_unlock(&v9fs_trans_lock);
95899 }
95900 EXPORT_SYMBOL(v9fs_unregister_trans);
95901diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
95902index 9321a77..ed2f256 100644
95903--- a/net/9p/trans_fd.c
95904+++ b/net/9p/trans_fd.c
95905@@ -432,7 +432,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
95906 oldfs = get_fs();
95907 set_fs(get_ds());
95908 /* The cast to a user pointer is valid due to the set_fs() */
95909- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
95910+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
95911 set_fs(oldfs);
95912
95913 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
95914diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
95915index 876fbe8..8bbea9f 100644
95916--- a/net/atm/atm_misc.c
95917+++ b/net/atm/atm_misc.c
95918@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
95919 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
95920 return 1;
95921 atm_return(vcc, truesize);
95922- atomic_inc(&vcc->stats->rx_drop);
95923+ atomic_inc_unchecked(&vcc->stats->rx_drop);
95924 return 0;
95925 }
95926 EXPORT_SYMBOL(atm_charge);
95927@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
95928 }
95929 }
95930 atm_return(vcc, guess);
95931- atomic_inc(&vcc->stats->rx_drop);
95932+ atomic_inc_unchecked(&vcc->stats->rx_drop);
95933 return NULL;
95934 }
95935 EXPORT_SYMBOL(atm_alloc_charge);
95936@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
95937
95938 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
95939 {
95940-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
95941+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
95942 __SONET_ITEMS
95943 #undef __HANDLE_ITEM
95944 }
95945@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
95946
95947 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
95948 {
95949-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
95950+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
95951 __SONET_ITEMS
95952 #undef __HANDLE_ITEM
95953 }
95954diff --git a/net/atm/lec.c b/net/atm/lec.c
95955index f23916b..dd4d26b 100644
95956--- a/net/atm/lec.c
95957+++ b/net/atm/lec.c
95958@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
95959 }
95960
95961 static struct lane2_ops lane2_ops = {
95962- lane2_resolve, /* resolve, spec 3.1.3 */
95963- lane2_associate_req, /* associate_req, spec 3.1.4 */
95964- NULL /* associate indicator, spec 3.1.5 */
95965+ .resolve = lane2_resolve,
95966+ .associate_req = lane2_associate_req,
95967+ .associate_indicator = NULL
95968 };
95969
95970 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
95971diff --git a/net/atm/lec.h b/net/atm/lec.h
95972index 4149db1..f2ab682 100644
95973--- a/net/atm/lec.h
95974+++ b/net/atm/lec.h
95975@@ -48,7 +48,7 @@ struct lane2_ops {
95976 const u8 *tlvs, u32 sizeoftlvs);
95977 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
95978 const u8 *tlvs, u32 sizeoftlvs);
95979-};
95980+} __no_const;
95981
95982 /*
95983 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
95984diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
95985index d1b2d9a..d549f7f 100644
95986--- a/net/atm/mpoa_caches.c
95987+++ b/net/atm/mpoa_caches.c
95988@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
95989
95990
95991 static struct in_cache_ops ingress_ops = {
95992- in_cache_add_entry, /* add_entry */
95993- in_cache_get, /* get */
95994- in_cache_get_with_mask, /* get_with_mask */
95995- in_cache_get_by_vcc, /* get_by_vcc */
95996- in_cache_put, /* put */
95997- in_cache_remove_entry, /* remove_entry */
95998- cache_hit, /* cache_hit */
95999- clear_count_and_expired, /* clear_count */
96000- check_resolving_entries, /* check_resolving */
96001- refresh_entries, /* refresh */
96002- in_destroy_cache /* destroy_cache */
96003+ .add_entry = in_cache_add_entry,
96004+ .get = in_cache_get,
96005+ .get_with_mask = in_cache_get_with_mask,
96006+ .get_by_vcc = in_cache_get_by_vcc,
96007+ .put = in_cache_put,
96008+ .remove_entry = in_cache_remove_entry,
96009+ .cache_hit = cache_hit,
96010+ .clear_count = clear_count_and_expired,
96011+ .check_resolving = check_resolving_entries,
96012+ .refresh = refresh_entries,
96013+ .destroy_cache = in_destroy_cache
96014 };
96015
96016 static struct eg_cache_ops egress_ops = {
96017- eg_cache_add_entry, /* add_entry */
96018- eg_cache_get_by_cache_id, /* get_by_cache_id */
96019- eg_cache_get_by_tag, /* get_by_tag */
96020- eg_cache_get_by_vcc, /* get_by_vcc */
96021- eg_cache_get_by_src_ip, /* get_by_src_ip */
96022- eg_cache_put, /* put */
96023- eg_cache_remove_entry, /* remove_entry */
96024- update_eg_cache_entry, /* update */
96025- clear_expired, /* clear_expired */
96026- eg_destroy_cache /* destroy_cache */
96027+ .add_entry = eg_cache_add_entry,
96028+ .get_by_cache_id = eg_cache_get_by_cache_id,
96029+ .get_by_tag = eg_cache_get_by_tag,
96030+ .get_by_vcc = eg_cache_get_by_vcc,
96031+ .get_by_src_ip = eg_cache_get_by_src_ip,
96032+ .put = eg_cache_put,
96033+ .remove_entry = eg_cache_remove_entry,
96034+ .update = update_eg_cache_entry,
96035+ .clear_expired = clear_expired,
96036+ .destroy_cache = eg_destroy_cache
96037 };
96038
96039
96040diff --git a/net/atm/proc.c b/net/atm/proc.c
96041index bbb6461..cf04016 100644
96042--- a/net/atm/proc.c
96043+++ b/net/atm/proc.c
96044@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
96045 const struct k_atm_aal_stats *stats)
96046 {
96047 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
96048- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
96049- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
96050- atomic_read(&stats->rx_drop));
96051+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
96052+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
96053+ atomic_read_unchecked(&stats->rx_drop));
96054 }
96055
96056 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
96057diff --git a/net/atm/resources.c b/net/atm/resources.c
96058index 0447d5d..3cf4728 100644
96059--- a/net/atm/resources.c
96060+++ b/net/atm/resources.c
96061@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
96062 static void copy_aal_stats(struct k_atm_aal_stats *from,
96063 struct atm_aal_stats *to)
96064 {
96065-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
96066+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
96067 __AAL_STAT_ITEMS
96068 #undef __HANDLE_ITEM
96069 }
96070@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
96071 static void subtract_aal_stats(struct k_atm_aal_stats *from,
96072 struct atm_aal_stats *to)
96073 {
96074-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
96075+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
96076 __AAL_STAT_ITEMS
96077 #undef __HANDLE_ITEM
96078 }
96079diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
96080index 919a5ce..cc6b444 100644
96081--- a/net/ax25/sysctl_net_ax25.c
96082+++ b/net/ax25/sysctl_net_ax25.c
96083@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
96084 {
96085 char path[sizeof("net/ax25/") + IFNAMSIZ];
96086 int k;
96087- struct ctl_table *table;
96088+ ctl_table_no_const *table;
96089
96090 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
96091 if (!table)
96092diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
96093index f7270b9..cd0d879 100644
96094--- a/net/batman-adv/bat_iv_ogm.c
96095+++ b/net/batman-adv/bat_iv_ogm.c
96096@@ -307,7 +307,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
96097
96098 /* randomize initial seqno to avoid collision */
96099 get_random_bytes(&random_seqno, sizeof(random_seqno));
96100- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
96101+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
96102
96103 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
96104 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
96105@@ -894,9 +894,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
96106 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
96107
96108 /* change sequence number to network order */
96109- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
96110+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
96111 batadv_ogm_packet->seqno = htonl(seqno);
96112- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
96113+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
96114
96115 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
96116 batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff,
96117@@ -1261,7 +1261,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
96118 return;
96119
96120 /* could be changed by schedule_own_packet() */
96121- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
96122+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
96123
96124 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
96125 has_directlink_flag = 1;
96126diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
96127index 6ddb614..ca7e886 100644
96128--- a/net/batman-adv/fragmentation.c
96129+++ b/net/batman-adv/fragmentation.c
96130@@ -447,7 +447,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
96131 frag_header.packet_type = BATADV_UNICAST_FRAG;
96132 frag_header.version = BATADV_COMPAT_VERSION;
96133 frag_header.ttl = BATADV_TTL;
96134- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
96135+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
96136 frag_header.reserved = 0;
96137 frag_header.no = 0;
96138 frag_header.total_size = htons(skb->len);
96139diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
96140index a8f99d1..11797ef 100644
96141--- a/net/batman-adv/soft-interface.c
96142+++ b/net/batman-adv/soft-interface.c
96143@@ -278,7 +278,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
96144 primary_if->net_dev->dev_addr, ETH_ALEN);
96145
96146 /* set broadcast sequence number */
96147- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
96148+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
96149 bcast_packet->seqno = htonl(seqno);
96150
96151 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
96152@@ -688,7 +688,7 @@ static int batadv_softif_init_late(struct net_device *dev)
96153 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
96154
96155 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
96156- atomic_set(&bat_priv->bcast_seqno, 1);
96157+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
96158 atomic_set(&bat_priv->tt.vn, 0);
96159 atomic_set(&bat_priv->tt.local_changes, 0);
96160 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
96161@@ -700,7 +700,7 @@ static int batadv_softif_init_late(struct net_device *dev)
96162
96163 /* randomize initial seqno to avoid collision */
96164 get_random_bytes(&random_seqno, sizeof(random_seqno));
96165- atomic_set(&bat_priv->frag_seqno, random_seqno);
96166+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
96167
96168 bat_priv->primary_if = NULL;
96169 bat_priv->num_ifaces = 0;
96170diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
96171index 91dd369..9c25750 100644
96172--- a/net/batman-adv/types.h
96173+++ b/net/batman-adv/types.h
96174@@ -56,7 +56,7 @@
96175 struct batadv_hard_iface_bat_iv {
96176 unsigned char *ogm_buff;
96177 int ogm_buff_len;
96178- atomic_t ogm_seqno;
96179+ atomic_unchecked_t ogm_seqno;
96180 };
96181
96182 /**
96183@@ -673,7 +673,7 @@ struct batadv_priv {
96184 atomic_t bonding;
96185 atomic_t fragmentation;
96186 atomic_t packet_size_max;
96187- atomic_t frag_seqno;
96188+ atomic_unchecked_t frag_seqno;
96189 #ifdef CONFIG_BATMAN_ADV_BLA
96190 atomic_t bridge_loop_avoidance;
96191 #endif
96192@@ -687,7 +687,7 @@ struct batadv_priv {
96193 #ifdef CONFIG_BATMAN_ADV_DEBUG
96194 atomic_t log_level;
96195 #endif
96196- atomic_t bcast_seqno;
96197+ atomic_unchecked_t bcast_seqno;
96198 atomic_t bcast_queue_left;
96199 atomic_t batman_queue_left;
96200 char num_ifaces;
96201diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
96202index 7552f9e..074ce29 100644
96203--- a/net/bluetooth/hci_sock.c
96204+++ b/net/bluetooth/hci_sock.c
96205@@ -1052,7 +1052,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
96206 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
96207 }
96208
96209- len = min_t(unsigned int, len, sizeof(uf));
96210+ len = min((size_t)len, sizeof(uf));
96211 if (copy_from_user(&uf, optval, len)) {
96212 err = -EFAULT;
96213 break;
96214diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
96215index 4af3821..f2ba46c 100644
96216--- a/net/bluetooth/l2cap_core.c
96217+++ b/net/bluetooth/l2cap_core.c
96218@@ -3500,8 +3500,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
96219 break;
96220
96221 case L2CAP_CONF_RFC:
96222- if (olen == sizeof(rfc))
96223- memcpy(&rfc, (void *)val, olen);
96224+ if (olen != sizeof(rfc))
96225+ break;
96226+
96227+ memcpy(&rfc, (void *)val, olen);
96228
96229 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
96230 rfc.mode != chan->mode)
96231diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
96232index 7cc24d2..e83f531 100644
96233--- a/net/bluetooth/l2cap_sock.c
96234+++ b/net/bluetooth/l2cap_sock.c
96235@@ -545,7 +545,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
96236 struct sock *sk = sock->sk;
96237 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
96238 struct l2cap_options opts;
96239- int len, err = 0;
96240+ int err = 0;
96241+ size_t len = optlen;
96242 u32 opt;
96243
96244 BT_DBG("sk %p", sk);
96245@@ -567,7 +568,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
96246 opts.max_tx = chan->max_tx;
96247 opts.txwin_size = chan->tx_win;
96248
96249- len = min_t(unsigned int, sizeof(opts), optlen);
96250+ len = min(sizeof(opts), len);
96251 if (copy_from_user((char *) &opts, optval, len)) {
96252 err = -EFAULT;
96253 break;
96254@@ -647,7 +648,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
96255 struct bt_security sec;
96256 struct bt_power pwr;
96257 struct l2cap_conn *conn;
96258- int len, err = 0;
96259+ int err = 0;
96260+ size_t len = optlen;
96261 u32 opt;
96262
96263 BT_DBG("sk %p", sk);
96264@@ -670,7 +672,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
96265
96266 sec.level = BT_SECURITY_LOW;
96267
96268- len = min_t(unsigned int, sizeof(sec), optlen);
96269+ len = min(sizeof(sec), len);
96270 if (copy_from_user((char *) &sec, optval, len)) {
96271 err = -EFAULT;
96272 break;
96273@@ -770,7 +772,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
96274
96275 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
96276
96277- len = min_t(unsigned int, sizeof(pwr), optlen);
96278+ len = min(sizeof(pwr), len);
96279 if (copy_from_user((char *) &pwr, optval, len)) {
96280 err = -EFAULT;
96281 break;
96282diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
96283index 3c2d3e4..884855a 100644
96284--- a/net/bluetooth/rfcomm/sock.c
96285+++ b/net/bluetooth/rfcomm/sock.c
96286@@ -672,7 +672,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
96287 struct sock *sk = sock->sk;
96288 struct bt_security sec;
96289 int err = 0;
96290- size_t len;
96291+ size_t len = optlen;
96292 u32 opt;
96293
96294 BT_DBG("sk %p", sk);
96295@@ -694,7 +694,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
96296
96297 sec.level = BT_SECURITY_LOW;
96298
96299- len = min_t(unsigned int, sizeof(sec), optlen);
96300+ len = min(sizeof(sec), len);
96301 if (copy_from_user((char *) &sec, optval, len)) {
96302 err = -EFAULT;
96303 break;
96304diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
96305index 84fcf9f..e389b27 100644
96306--- a/net/bluetooth/rfcomm/tty.c
96307+++ b/net/bluetooth/rfcomm/tty.c
96308@@ -684,7 +684,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
96309 BT_DBG("tty %p id %d", tty, tty->index);
96310
96311 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
96312- dev->channel, dev->port.count);
96313+ dev->channel, atomic_read(&dev->port.count));
96314
96315 err = tty_port_open(&dev->port, tty, filp);
96316 if (err)
96317@@ -707,7 +707,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
96318 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
96319
96320 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
96321- dev->port.count);
96322+ atomic_read(&dev->port.count));
96323
96324 tty_port_close(&dev->port, tty, filp);
96325 }
96326diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
96327index ac78024..161a80c 100644
96328--- a/net/bridge/netfilter/ebtables.c
96329+++ b/net/bridge/netfilter/ebtables.c
96330@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
96331 tmp.valid_hooks = t->table->valid_hooks;
96332 }
96333 mutex_unlock(&ebt_mutex);
96334- if (copy_to_user(user, &tmp, *len) != 0){
96335+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
96336 BUGPRINT("c2u Didn't work\n");
96337 ret = -EFAULT;
96338 break;
96339@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
96340 goto out;
96341 tmp.valid_hooks = t->valid_hooks;
96342
96343- if (copy_to_user(user, &tmp, *len) != 0) {
96344+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
96345 ret = -EFAULT;
96346 break;
96347 }
96348@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
96349 tmp.entries_size = t->table->entries_size;
96350 tmp.valid_hooks = t->table->valid_hooks;
96351
96352- if (copy_to_user(user, &tmp, *len) != 0) {
96353+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
96354 ret = -EFAULT;
96355 break;
96356 }
96357diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
96358index 0f45522..dab651f 100644
96359--- a/net/caif/cfctrl.c
96360+++ b/net/caif/cfctrl.c
96361@@ -10,6 +10,7 @@
96362 #include <linux/spinlock.h>
96363 #include <linux/slab.h>
96364 #include <linux/pkt_sched.h>
96365+#include <linux/sched.h>
96366 #include <net/caif/caif_layer.h>
96367 #include <net/caif/cfpkt.h>
96368 #include <net/caif/cfctrl.h>
96369@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
96370 memset(&dev_info, 0, sizeof(dev_info));
96371 dev_info.id = 0xff;
96372 cfsrvl_init(&this->serv, 0, &dev_info, false);
96373- atomic_set(&this->req_seq_no, 1);
96374- atomic_set(&this->rsp_seq_no, 1);
96375+ atomic_set_unchecked(&this->req_seq_no, 1);
96376+ atomic_set_unchecked(&this->rsp_seq_no, 1);
96377 this->serv.layer.receive = cfctrl_recv;
96378 sprintf(this->serv.layer.name, "ctrl");
96379 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
96380@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
96381 struct cfctrl_request_info *req)
96382 {
96383 spin_lock_bh(&ctrl->info_list_lock);
96384- atomic_inc(&ctrl->req_seq_no);
96385- req->sequence_no = atomic_read(&ctrl->req_seq_no);
96386+ atomic_inc_unchecked(&ctrl->req_seq_no);
96387+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
96388 list_add_tail(&req->list, &ctrl->list);
96389 spin_unlock_bh(&ctrl->info_list_lock);
96390 }
96391@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
96392 if (p != first)
96393 pr_warn("Requests are not received in order\n");
96394
96395- atomic_set(&ctrl->rsp_seq_no,
96396+ atomic_set_unchecked(&ctrl->rsp_seq_no,
96397 p->sequence_no);
96398 list_del(&p->list);
96399 goto out;
96400diff --git a/net/can/af_can.c b/net/can/af_can.c
96401index a27f8aa..67174a3 100644
96402--- a/net/can/af_can.c
96403+++ b/net/can/af_can.c
96404@@ -863,7 +863,7 @@ static const struct net_proto_family can_family_ops = {
96405 };
96406
96407 /* notifier block for netdevice event */
96408-static struct notifier_block can_netdev_notifier __read_mostly = {
96409+static struct notifier_block can_netdev_notifier = {
96410 .notifier_call = can_notifier,
96411 };
96412
96413diff --git a/net/can/gw.c b/net/can/gw.c
96414index 3f9b0f3..fc6d4fa 100644
96415--- a/net/can/gw.c
96416+++ b/net/can/gw.c
96417@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
96418 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
96419
96420 static HLIST_HEAD(cgw_list);
96421-static struct notifier_block notifier;
96422
96423 static struct kmem_cache *cgw_cache __read_mostly;
96424
96425@@ -954,6 +953,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
96426 return err;
96427 }
96428
96429+static struct notifier_block notifier = {
96430+ .notifier_call = cgw_notifier
96431+};
96432+
96433 static __init int cgw_module_init(void)
96434 {
96435 /* sanitize given module parameter */
96436@@ -969,7 +972,6 @@ static __init int cgw_module_init(void)
96437 return -ENOMEM;
96438
96439 /* set notifier */
96440- notifier.notifier_call = cgw_notifier;
96441 register_netdevice_notifier(&notifier);
96442
96443 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
96444diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
96445index 4a5df7b..9ad1f1d 100644
96446--- a/net/ceph/messenger.c
96447+++ b/net/ceph/messenger.c
96448@@ -186,7 +186,7 @@ static void con_fault(struct ceph_connection *con);
96449 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
96450
96451 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
96452-static atomic_t addr_str_seq = ATOMIC_INIT(0);
96453+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
96454
96455 static struct page *zero_page; /* used in certain error cases */
96456
96457@@ -197,7 +197,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
96458 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
96459 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
96460
96461- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
96462+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
96463 s = addr_str[i];
96464
96465 switch (ss->ss_family) {
96466diff --git a/net/compat.c b/net/compat.c
96467index f50161f..94fa415 100644
96468--- a/net/compat.c
96469+++ b/net/compat.c
96470@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
96471 return -EFAULT;
96472 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
96473 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
96474- kmsg->msg_name = compat_ptr(tmp1);
96475- kmsg->msg_iov = compat_ptr(tmp2);
96476- kmsg->msg_control = compat_ptr(tmp3);
96477+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
96478+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
96479+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
96480 return 0;
96481 }
96482
96483@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
96484
96485 if (kern_msg->msg_namelen) {
96486 if (mode == VERIFY_READ) {
96487- int err = move_addr_to_kernel(kern_msg->msg_name,
96488+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
96489 kern_msg->msg_namelen,
96490 kern_address);
96491 if (err < 0)
96492@@ -99,7 +99,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
96493 kern_msg->msg_name = NULL;
96494
96495 tot_len = iov_from_user_compat_to_kern(kern_iov,
96496- (struct compat_iovec __user *)kern_msg->msg_iov,
96497+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
96498 kern_msg->msg_iovlen);
96499 if (tot_len >= 0)
96500 kern_msg->msg_iov = kern_iov;
96501@@ -119,20 +119,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
96502
96503 #define CMSG_COMPAT_FIRSTHDR(msg) \
96504 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
96505- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
96506+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
96507 (struct compat_cmsghdr __user *)NULL)
96508
96509 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
96510 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
96511 (ucmlen) <= (unsigned long) \
96512 ((mhdr)->msg_controllen - \
96513- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
96514+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
96515
96516 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
96517 struct compat_cmsghdr __user *cmsg, int cmsg_len)
96518 {
96519 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
96520- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
96521+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
96522 msg->msg_controllen)
96523 return NULL;
96524 return (struct compat_cmsghdr __user *)ptr;
96525@@ -222,7 +222,7 @@ Efault:
96526
96527 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
96528 {
96529- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
96530+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
96531 struct compat_cmsghdr cmhdr;
96532 struct compat_timeval ctv;
96533 struct compat_timespec cts[3];
96534@@ -278,7 +278,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
96535
96536 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
96537 {
96538- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
96539+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
96540 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
96541 int fdnum = scm->fp->count;
96542 struct file **fp = scm->fp->fp;
96543@@ -366,7 +366,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
96544 return -EFAULT;
96545 old_fs = get_fs();
96546 set_fs(KERNEL_DS);
96547- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
96548+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
96549 set_fs(old_fs);
96550
96551 return err;
96552@@ -427,7 +427,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
96553 len = sizeof(ktime);
96554 old_fs = get_fs();
96555 set_fs(KERNEL_DS);
96556- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
96557+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
96558 set_fs(old_fs);
96559
96560 if (!err) {
96561@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
96562 case MCAST_JOIN_GROUP:
96563 case MCAST_LEAVE_GROUP:
96564 {
96565- struct compat_group_req __user *gr32 = (void *)optval;
96566+ struct compat_group_req __user *gr32 = (void __user *)optval;
96567 struct group_req __user *kgr =
96568 compat_alloc_user_space(sizeof(struct group_req));
96569 u32 interface;
96570@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
96571 case MCAST_BLOCK_SOURCE:
96572 case MCAST_UNBLOCK_SOURCE:
96573 {
96574- struct compat_group_source_req __user *gsr32 = (void *)optval;
96575+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
96576 struct group_source_req __user *kgsr = compat_alloc_user_space(
96577 sizeof(struct group_source_req));
96578 u32 interface;
96579@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
96580 }
96581 case MCAST_MSFILTER:
96582 {
96583- struct compat_group_filter __user *gf32 = (void *)optval;
96584+ struct compat_group_filter __user *gf32 = (void __user *)optval;
96585 struct group_filter __user *kgf;
96586 u32 interface, fmode, numsrc;
96587
96588@@ -650,7 +650,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
96589 char __user *optval, int __user *optlen,
96590 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
96591 {
96592- struct compat_group_filter __user *gf32 = (void *)optval;
96593+ struct compat_group_filter __user *gf32 = (void __user *)optval;
96594 struct group_filter __user *kgf;
96595 int __user *koptlen;
96596 u32 interface, fmode, numsrc;
96597@@ -803,7 +803,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
96598
96599 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
96600 return -EINVAL;
96601- if (copy_from_user(a, args, nas[call]))
96602+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
96603 return -EFAULT;
96604 a0 = a[0];
96605 a1 = a[1];
96606diff --git a/net/core/datagram.c b/net/core/datagram.c
96607index a16ed7b..eb44d17 100644
96608--- a/net/core/datagram.c
96609+++ b/net/core/datagram.c
96610@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
96611 }
96612
96613 kfree_skb(skb);
96614- atomic_inc(&sk->sk_drops);
96615+ atomic_inc_unchecked(&sk->sk_drops);
96616 sk_mem_reclaim_partial(sk);
96617
96618 return err;
96619diff --git a/net/core/dev.c b/net/core/dev.c
96620index 616eccf..31832d38 100644
96621--- a/net/core/dev.c
96622+++ b/net/core/dev.c
96623@@ -1684,14 +1684,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
96624 {
96625 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
96626 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
96627- atomic_long_inc(&dev->rx_dropped);
96628+ atomic_long_inc_unchecked(&dev->rx_dropped);
96629 kfree_skb(skb);
96630 return NET_RX_DROP;
96631 }
96632 }
96633
96634 if (unlikely(!is_skb_forwardable(dev, skb))) {
96635- atomic_long_inc(&dev->rx_dropped);
96636+ atomic_long_inc_unchecked(&dev->rx_dropped);
96637 kfree_skb(skb);
96638 return NET_RX_DROP;
96639 }
96640@@ -2434,7 +2434,7 @@ static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
96641
96642 struct dev_gso_cb {
96643 void (*destructor)(struct sk_buff *skb);
96644-};
96645+} __no_const;
96646
96647 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
96648
96649@@ -3224,7 +3224,7 @@ enqueue:
96650
96651 local_irq_restore(flags);
96652
96653- atomic_long_inc(&skb->dev->rx_dropped);
96654+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
96655 kfree_skb(skb);
96656 return NET_RX_DROP;
96657 }
96658@@ -3296,7 +3296,7 @@ int netif_rx_ni(struct sk_buff *skb)
96659 }
96660 EXPORT_SYMBOL(netif_rx_ni);
96661
96662-static void net_tx_action(struct softirq_action *h)
96663+static __latent_entropy void net_tx_action(void)
96664 {
96665 struct softnet_data *sd = &__get_cpu_var(softnet_data);
96666
96667@@ -3630,7 +3630,7 @@ ncls:
96668 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
96669 } else {
96670 drop:
96671- atomic_long_inc(&skb->dev->rx_dropped);
96672+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
96673 kfree_skb(skb);
96674 /* Jamal, now you will not able to escape explaining
96675 * me how you were going to use this. :-)
96676@@ -4290,7 +4290,7 @@ void netif_napi_del(struct napi_struct *napi)
96677 }
96678 EXPORT_SYMBOL(netif_napi_del);
96679
96680-static void net_rx_action(struct softirq_action *h)
96681+static __latent_entropy void net_rx_action(void)
96682 {
96683 struct softnet_data *sd = &__get_cpu_var(softnet_data);
96684 unsigned long time_limit = jiffies + 2;
96685@@ -6179,7 +6179,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
96686 } else {
96687 netdev_stats_to_stats64(storage, &dev->stats);
96688 }
96689- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
96690+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
96691 return storage;
96692 }
96693 EXPORT_SYMBOL(dev_get_stats);
96694diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
96695index 5b7d0e1..cb960fc 100644
96696--- a/net/core/dev_ioctl.c
96697+++ b/net/core/dev_ioctl.c
96698@@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
96699 if (no_module && capable(CAP_NET_ADMIN))
96700 no_module = request_module("netdev-%s", name);
96701 if (no_module && capable(CAP_SYS_MODULE)) {
96702+#ifdef CONFIG_GRKERNSEC_MODHARDEN
96703+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
96704+#else
96705 if (!request_module("%s", name))
96706 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
96707 name);
96708+#endif
96709 }
96710 }
96711 EXPORT_SYMBOL(dev_load);
96712diff --git a/net/core/filter.c b/net/core/filter.c
96713index ad30d62..c2757df 100644
96714--- a/net/core/filter.c
96715+++ b/net/core/filter.c
96716@@ -679,7 +679,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
96717 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
96718 if (!fp)
96719 return -ENOMEM;
96720- memcpy(fp->insns, fprog->filter, fsize);
96721+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
96722
96723 atomic_set(&fp->refcnt, 1);
96724 fp->len = fprog->len;
96725diff --git a/net/core/flow.c b/net/core/flow.c
96726index dfa602c..3103d88 100644
96727--- a/net/core/flow.c
96728+++ b/net/core/flow.c
96729@@ -61,7 +61,7 @@ struct flow_cache {
96730 struct timer_list rnd_timer;
96731 };
96732
96733-atomic_t flow_cache_genid = ATOMIC_INIT(0);
96734+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
96735 EXPORT_SYMBOL(flow_cache_genid);
96736 static struct flow_cache flow_cache_global;
96737 static struct kmem_cache *flow_cachep __read_mostly;
96738@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
96739
96740 static int flow_entry_valid(struct flow_cache_entry *fle)
96741 {
96742- if (atomic_read(&flow_cache_genid) != fle->genid)
96743+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
96744 return 0;
96745 if (fle->object && !fle->object->ops->check(fle->object))
96746 return 0;
96747@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
96748 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
96749 fcp->hash_count++;
96750 }
96751- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
96752+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
96753 flo = fle->object;
96754 if (!flo)
96755 goto ret_object;
96756@@ -279,7 +279,7 @@ nocache:
96757 }
96758 flo = resolver(net, key, family, dir, flo, ctx);
96759 if (fle) {
96760- fle->genid = atomic_read(&flow_cache_genid);
96761+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
96762 if (!IS_ERR(flo))
96763 fle->object = flo;
96764 else
96765diff --git a/net/core/iovec.c b/net/core/iovec.c
96766index b618694..192bbba 100644
96767--- a/net/core/iovec.c
96768+++ b/net/core/iovec.c
96769@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
96770 if (m->msg_namelen) {
96771 if (mode == VERIFY_READ) {
96772 void __user *namep;
96773- namep = (void __user __force *) m->msg_name;
96774+ namep = (void __force_user *) m->msg_name;
96775 err = move_addr_to_kernel(namep, m->msg_namelen,
96776 address);
96777 if (err < 0)
96778@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
96779 }
96780
96781 size = m->msg_iovlen * sizeof(struct iovec);
96782- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
96783+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
96784 return -EFAULT;
96785
96786 m->msg_iov = iov;
96787diff --git a/net/core/neighbour.c b/net/core/neighbour.c
96788index 43128dd..e4d4311 100644
96789--- a/net/core/neighbour.c
96790+++ b/net/core/neighbour.c
96791@@ -2775,7 +2775,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
96792 void __user *buffer, size_t *lenp, loff_t *ppos)
96793 {
96794 int size, ret;
96795- struct ctl_table tmp = *ctl;
96796+ ctl_table_no_const tmp = *ctl;
96797
96798 tmp.extra1 = &zero;
96799 tmp.extra2 = &unres_qlen_max;
96800@@ -2983,11 +2983,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
96801 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
96802 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
96803 } else {
96804+ struct neigh_table *ntable = container_of(p, struct neigh_table, parms);
96805 dev_name_source = "default";
96806- t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
96807- t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
96808- t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
96809- t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
96810+ t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &ntable->gc_interval;
96811+ t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &ntable->gc_thresh1;
96812+ t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &ntable->gc_thresh2;
96813+ t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &ntable->gc_thresh3;
96814 }
96815
96816
96817diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
96818index 2bf8329..7960607 100644
96819--- a/net/core/net-procfs.c
96820+++ b/net/core/net-procfs.c
96821@@ -283,8 +283,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
96822 else
96823 seq_printf(seq, "%04x", ntohs(pt->type));
96824
96825+#ifdef CONFIG_GRKERNSEC_HIDESYM
96826+ seq_printf(seq, " %-8s %pf\n",
96827+ pt->dev ? pt->dev->name : "", NULL);
96828+#else
96829 seq_printf(seq, " %-8s %pf\n",
96830 pt->dev ? pt->dev->name : "", pt->func);
96831+#endif
96832 }
96833
96834 return 0;
96835diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
96836index f3edf96..3cd8b40 100644
96837--- a/net/core/net-sysfs.c
96838+++ b/net/core/net-sysfs.c
96839@@ -1358,7 +1358,7 @@ void netdev_class_remove_file_ns(struct class_attribute *class_attr,
96840 }
96841 EXPORT_SYMBOL(netdev_class_remove_file_ns);
96842
96843-int netdev_kobject_init(void)
96844+int __init netdev_kobject_init(void)
96845 {
96846 kobj_ns_type_register(&net_ns_type_operations);
96847 return class_register(&net_class);
96848diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
96849index 81d3a9a..a0bd7a8 100644
96850--- a/net/core/net_namespace.c
96851+++ b/net/core/net_namespace.c
96852@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
96853 int error;
96854 LIST_HEAD(net_exit_list);
96855
96856- list_add_tail(&ops->list, list);
96857+ pax_list_add_tail((struct list_head *)&ops->list, list);
96858 if (ops->init || (ops->id && ops->size)) {
96859 for_each_net(net) {
96860 error = ops_init(ops, net);
96861@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
96862
96863 out_undo:
96864 /* If I have an error cleanup all namespaces I initialized */
96865- list_del(&ops->list);
96866+ pax_list_del((struct list_head *)&ops->list);
96867 ops_exit_list(ops, &net_exit_list);
96868 ops_free_list(ops, &net_exit_list);
96869 return error;
96870@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
96871 struct net *net;
96872 LIST_HEAD(net_exit_list);
96873
96874- list_del(&ops->list);
96875+ pax_list_del((struct list_head *)&ops->list);
96876 for_each_net(net)
96877 list_add_tail(&net->exit_list, &net_exit_list);
96878 ops_exit_list(ops, &net_exit_list);
96879@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
96880 mutex_lock(&net_mutex);
96881 error = register_pernet_operations(&pernet_list, ops);
96882 if (!error && (first_device == &pernet_list))
96883- first_device = &ops->list;
96884+ first_device = (struct list_head *)&ops->list;
96885 mutex_unlock(&net_mutex);
96886 return error;
96887 }
96888diff --git a/net/core/netpoll.c b/net/core/netpoll.c
96889index 81975f2..9ef3531 100644
96890--- a/net/core/netpoll.c
96891+++ b/net/core/netpoll.c
96892@@ -435,7 +435,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
96893 struct udphdr *udph;
96894 struct iphdr *iph;
96895 struct ethhdr *eth;
96896- static atomic_t ip_ident;
96897+ static atomic_unchecked_t ip_ident;
96898 struct ipv6hdr *ip6h;
96899
96900 udp_len = len + sizeof(*udph);
96901@@ -506,7 +506,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
96902 put_unaligned(0x45, (unsigned char *)iph);
96903 iph->tos = 0;
96904 put_unaligned(htons(ip_len), &(iph->tot_len));
96905- iph->id = htons(atomic_inc_return(&ip_ident));
96906+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
96907 iph->frag_off = 0;
96908 iph->ttl = 64;
96909 iph->protocol = IPPROTO_UDP;
96910diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
96911index cf67144..12bf94c 100644
96912--- a/net/core/rtnetlink.c
96913+++ b/net/core/rtnetlink.c
96914@@ -58,7 +58,7 @@ struct rtnl_link {
96915 rtnl_doit_func doit;
96916 rtnl_dumpit_func dumpit;
96917 rtnl_calcit_func calcit;
96918-};
96919+} __no_const;
96920
96921 static DEFINE_MUTEX(rtnl_mutex);
96922
96923@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
96924 if (rtnl_link_ops_get(ops->kind))
96925 return -EEXIST;
96926
96927- if (!ops->dellink)
96928- ops->dellink = unregister_netdevice_queue;
96929+ if (!ops->dellink) {
96930+ pax_open_kernel();
96931+ *(void **)&ops->dellink = unregister_netdevice_queue;
96932+ pax_close_kernel();
96933+ }
96934
96935- list_add_tail(&ops->list, &link_ops);
96936+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
96937 return 0;
96938 }
96939 EXPORT_SYMBOL_GPL(__rtnl_link_register);
96940@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
96941 for_each_net(net) {
96942 __rtnl_kill_links(net, ops);
96943 }
96944- list_del(&ops->list);
96945+ pax_list_del((struct list_head *)&ops->list);
96946 }
96947 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
96948
96949diff --git a/net/core/scm.c b/net/core/scm.c
96950index b442e7e..6f5b5a2 100644
96951--- a/net/core/scm.c
96952+++ b/net/core/scm.c
96953@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
96954 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
96955 {
96956 struct cmsghdr __user *cm
96957- = (__force struct cmsghdr __user *)msg->msg_control;
96958+ = (struct cmsghdr __force_user *)msg->msg_control;
96959 struct cmsghdr cmhdr;
96960 int cmlen = CMSG_LEN(len);
96961 int err;
96962@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
96963 err = -EFAULT;
96964 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
96965 goto out;
96966- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
96967+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
96968 goto out;
96969 cmlen = CMSG_SPACE(len);
96970 if (msg->msg_controllen < cmlen)
96971@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
96972 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
96973 {
96974 struct cmsghdr __user *cm
96975- = (__force struct cmsghdr __user*)msg->msg_control;
96976+ = (struct cmsghdr __force_user *)msg->msg_control;
96977
96978 int fdmax = 0;
96979 int fdnum = scm->fp->count;
96980@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
96981 if (fdnum < fdmax)
96982 fdmax = fdnum;
96983
96984- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
96985+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
96986 i++, cmfptr++)
96987 {
96988 struct socket *sock;
96989diff --git a/net/core/skbuff.c b/net/core/skbuff.c
96990index deffb37..213db0a 100644
96991--- a/net/core/skbuff.c
96992+++ b/net/core/skbuff.c
96993@@ -2006,7 +2006,7 @@ EXPORT_SYMBOL(__skb_checksum);
96994 __wsum skb_checksum(const struct sk_buff *skb, int offset,
96995 int len, __wsum csum)
96996 {
96997- const struct skb_checksum_ops ops = {
96998+ static const struct skb_checksum_ops ops = {
96999 .update = csum_partial_ext,
97000 .combine = csum_block_add_ext,
97001 };
97002@@ -3119,13 +3119,15 @@ void __init skb_init(void)
97003 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
97004 sizeof(struct sk_buff),
97005 0,
97006- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
97007+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
97008+ SLAB_NO_SANITIZE,
97009 NULL);
97010 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
97011 (2*sizeof(struct sk_buff)) +
97012 sizeof(atomic_t),
97013 0,
97014- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
97015+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
97016+ SLAB_NO_SANITIZE,
97017 NULL);
97018 }
97019
97020diff --git a/net/core/sock.c b/net/core/sock.c
97021index fbc5cfb..6d7e8c3 100644
97022--- a/net/core/sock.c
97023+++ b/net/core/sock.c
97024@@ -393,7 +393,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
97025 struct sk_buff_head *list = &sk->sk_receive_queue;
97026
97027 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
97028- atomic_inc(&sk->sk_drops);
97029+ atomic_inc_unchecked(&sk->sk_drops);
97030 trace_sock_rcvqueue_full(sk, skb);
97031 return -ENOMEM;
97032 }
97033@@ -403,7 +403,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
97034 return err;
97035
97036 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
97037- atomic_inc(&sk->sk_drops);
97038+ atomic_inc_unchecked(&sk->sk_drops);
97039 return -ENOBUFS;
97040 }
97041
97042@@ -423,7 +423,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
97043 skb_dst_force(skb);
97044
97045 spin_lock_irqsave(&list->lock, flags);
97046- skb->dropcount = atomic_read(&sk->sk_drops);
97047+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
97048 __skb_queue_tail(list, skb);
97049 spin_unlock_irqrestore(&list->lock, flags);
97050
97051@@ -443,7 +443,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
97052 skb->dev = NULL;
97053
97054 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
97055- atomic_inc(&sk->sk_drops);
97056+ atomic_inc_unchecked(&sk->sk_drops);
97057 goto discard_and_relse;
97058 }
97059 if (nested)
97060@@ -461,7 +461,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
97061 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
97062 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
97063 bh_unlock_sock(sk);
97064- atomic_inc(&sk->sk_drops);
97065+ atomic_inc_unchecked(&sk->sk_drops);
97066 goto discard_and_relse;
97067 }
97068
97069@@ -950,12 +950,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
97070 struct timeval tm;
97071 } v;
97072
97073- int lv = sizeof(int);
97074- int len;
97075+ unsigned int lv = sizeof(int);
97076+ unsigned int len;
97077
97078 if (get_user(len, optlen))
97079 return -EFAULT;
97080- if (len < 0)
97081+ if (len > INT_MAX)
97082 return -EINVAL;
97083
97084 memset(&v, 0, sizeof(v));
97085@@ -1107,11 +1107,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
97086
97087 case SO_PEERNAME:
97088 {
97089- char address[128];
97090+ char address[_K_SS_MAXSIZE];
97091
97092 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
97093 return -ENOTCONN;
97094- if (lv < len)
97095+ if (lv < len || sizeof address < len)
97096 return -EINVAL;
97097 if (copy_to_user(optval, address, len))
97098 return -EFAULT;
97099@@ -1188,7 +1188,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
97100
97101 if (len > lv)
97102 len = lv;
97103- if (copy_to_user(optval, &v, len))
97104+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
97105 return -EFAULT;
97106 lenout:
97107 if (put_user(len, optlen))
97108@@ -2353,7 +2353,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
97109 */
97110 smp_wmb();
97111 atomic_set(&sk->sk_refcnt, 1);
97112- atomic_set(&sk->sk_drops, 0);
97113+ atomic_set_unchecked(&sk->sk_drops, 0);
97114 }
97115 EXPORT_SYMBOL(sock_init_data);
97116
97117@@ -2478,6 +2478,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
97118 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
97119 int level, int type)
97120 {
97121+ struct sock_extended_err ee;
97122 struct sock_exterr_skb *serr;
97123 struct sk_buff *skb, *skb2;
97124 int copied, err;
97125@@ -2499,7 +2500,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
97126 sock_recv_timestamp(msg, sk, skb);
97127
97128 serr = SKB_EXT_ERR(skb);
97129- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
97130+ ee = serr->ee;
97131+ put_cmsg(msg, level, type, sizeof ee, &ee);
97132
97133 msg->msg_flags |= MSG_ERRQUEUE;
97134 err = copied;
97135diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
97136index a0e9cf6..ef7f9ed 100644
97137--- a/net/core/sock_diag.c
97138+++ b/net/core/sock_diag.c
97139@@ -9,26 +9,33 @@
97140 #include <linux/inet_diag.h>
97141 #include <linux/sock_diag.h>
97142
97143-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
97144+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
97145 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
97146 static DEFINE_MUTEX(sock_diag_table_mutex);
97147
97148 int sock_diag_check_cookie(void *sk, __u32 *cookie)
97149 {
97150+#ifndef CONFIG_GRKERNSEC_HIDESYM
97151 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
97152 cookie[1] != INET_DIAG_NOCOOKIE) &&
97153 ((u32)(unsigned long)sk != cookie[0] ||
97154 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
97155 return -ESTALE;
97156 else
97157+#endif
97158 return 0;
97159 }
97160 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
97161
97162 void sock_diag_save_cookie(void *sk, __u32 *cookie)
97163 {
97164+#ifdef CONFIG_GRKERNSEC_HIDESYM
97165+ cookie[0] = 0;
97166+ cookie[1] = 0;
97167+#else
97168 cookie[0] = (u32)(unsigned long)sk;
97169 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
97170+#endif
97171 }
97172 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
97173
97174@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
97175 mutex_lock(&sock_diag_table_mutex);
97176 if (sock_diag_handlers[hndl->family])
97177 err = -EBUSY;
97178- else
97179+ else {
97180+ pax_open_kernel();
97181 sock_diag_handlers[hndl->family] = hndl;
97182+ pax_close_kernel();
97183+ }
97184 mutex_unlock(&sock_diag_table_mutex);
97185
97186 return err;
97187@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
97188
97189 mutex_lock(&sock_diag_table_mutex);
97190 BUG_ON(sock_diag_handlers[family] != hnld);
97191+ pax_open_kernel();
97192 sock_diag_handlers[family] = NULL;
97193+ pax_close_kernel();
97194 mutex_unlock(&sock_diag_table_mutex);
97195 }
97196 EXPORT_SYMBOL_GPL(sock_diag_unregister);
97197diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
97198index cca4441..5e616de 100644
97199--- a/net/core/sysctl_net_core.c
97200+++ b/net/core/sysctl_net_core.c
97201@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
97202 {
97203 unsigned int orig_size, size;
97204 int ret, i;
97205- struct ctl_table tmp = {
97206+ ctl_table_no_const tmp = {
97207 .data = &size,
97208 .maxlen = sizeof(size),
97209 .mode = table->mode
97210@@ -199,7 +199,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
97211 void __user *buffer, size_t *lenp, loff_t *ppos)
97212 {
97213 char id[IFNAMSIZ];
97214- struct ctl_table tbl = {
97215+ ctl_table_no_const tbl = {
97216 .data = id,
97217 .maxlen = IFNAMSIZ,
97218 };
97219@@ -378,13 +378,12 @@ static struct ctl_table netns_core_table[] = {
97220
97221 static __net_init int sysctl_core_net_init(struct net *net)
97222 {
97223- struct ctl_table *tbl;
97224+ ctl_table_no_const *tbl = NULL;
97225
97226 net->core.sysctl_somaxconn = SOMAXCONN;
97227
97228- tbl = netns_core_table;
97229 if (!net_eq(net, &init_net)) {
97230- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
97231+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
97232 if (tbl == NULL)
97233 goto err_dup;
97234
97235@@ -394,17 +393,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
97236 if (net->user_ns != &init_user_ns) {
97237 tbl[0].procname = NULL;
97238 }
97239- }
97240-
97241- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
97242+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
97243+ } else
97244+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
97245 if (net->core.sysctl_hdr == NULL)
97246 goto err_reg;
97247
97248 return 0;
97249
97250 err_reg:
97251- if (tbl != netns_core_table)
97252- kfree(tbl);
97253+ kfree(tbl);
97254 err_dup:
97255 return -ENOMEM;
97256 }
97257@@ -419,7 +417,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
97258 kfree(tbl);
97259 }
97260
97261-static __net_initdata struct pernet_operations sysctl_core_ops = {
97262+static __net_initconst struct pernet_operations sysctl_core_ops = {
97263 .init = sysctl_core_net_init,
97264 .exit = sysctl_core_net_exit,
97265 };
97266diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
97267index dd4d506..fb2fb87 100644
97268--- a/net/decnet/af_decnet.c
97269+++ b/net/decnet/af_decnet.c
97270@@ -465,6 +465,7 @@ static struct proto dn_proto = {
97271 .sysctl_rmem = sysctl_decnet_rmem,
97272 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
97273 .obj_size = sizeof(struct dn_sock),
97274+ .slab_flags = SLAB_USERCOPY,
97275 };
97276
97277 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
97278diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
97279index dd0dfb2..fdbc764 100644
97280--- a/net/decnet/dn_dev.c
97281+++ b/net/decnet/dn_dev.c
97282@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
97283 .extra1 = &min_t3,
97284 .extra2 = &max_t3
97285 },
97286- {0}
97287+ { }
97288 },
97289 };
97290
97291diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
97292index 5325b54..a0d4d69 100644
97293--- a/net/decnet/sysctl_net_decnet.c
97294+++ b/net/decnet/sysctl_net_decnet.c
97295@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
97296
97297 if (len > *lenp) len = *lenp;
97298
97299- if (copy_to_user(buffer, addr, len))
97300+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
97301 return -EFAULT;
97302
97303 *lenp = len;
97304@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
97305
97306 if (len > *lenp) len = *lenp;
97307
97308- if (copy_to_user(buffer, devname, len))
97309+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
97310 return -EFAULT;
97311
97312 *lenp = len;
97313diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
97314index 1865fdf..581a595 100644
97315--- a/net/ieee802154/dgram.c
97316+++ b/net/ieee802154/dgram.c
97317@@ -315,8 +315,9 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
97318 if (saddr) {
97319 saddr->family = AF_IEEE802154;
97320 saddr->addr = mac_cb(skb)->sa;
97321+ }
97322+ if (addr_len)
97323 *addr_len = sizeof(*saddr);
97324- }
97325
97326 if (flags & MSG_TRUNC)
97327 copied = skb->len;
97328diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
97329index e4d96d4..e1651da 100644
97330--- a/net/ipv4/af_inet.c
97331+++ b/net/ipv4/af_inet.c
97332@@ -1686,13 +1686,9 @@ static int __init inet_init(void)
97333
97334 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
97335
97336- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
97337- if (!sysctl_local_reserved_ports)
97338- goto out;
97339-
97340 rc = proto_register(&tcp_prot, 1);
97341 if (rc)
97342- goto out_free_reserved_ports;
97343+ goto out;
97344
97345 rc = proto_register(&udp_prot, 1);
97346 if (rc)
97347@@ -1799,8 +1795,6 @@ out_unregister_udp_proto:
97348 proto_unregister(&udp_prot);
97349 out_unregister_tcp_proto:
97350 proto_unregister(&tcp_prot);
97351-out_free_reserved_ports:
97352- kfree(sysctl_local_reserved_ports);
97353 goto out;
97354 }
97355
97356diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
97357index f4b34d8..c54a163 100644
97358--- a/net/ipv4/devinet.c
97359+++ b/net/ipv4/devinet.c
97360@@ -1534,7 +1534,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
97361 idx = 0;
97362 head = &net->dev_index_head[h];
97363 rcu_read_lock();
97364- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
97365+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
97366 net->dev_base_seq;
97367 hlist_for_each_entry_rcu(dev, head, index_hlist) {
97368 if (idx < s_idx)
97369@@ -1845,7 +1845,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
97370 idx = 0;
97371 head = &net->dev_index_head[h];
97372 rcu_read_lock();
97373- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
97374+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
97375 net->dev_base_seq;
97376 hlist_for_each_entry_rcu(dev, head, index_hlist) {
97377 if (idx < s_idx)
97378@@ -2070,7 +2070,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
97379 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
97380 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
97381
97382-static struct devinet_sysctl_table {
97383+static const struct devinet_sysctl_table {
97384 struct ctl_table_header *sysctl_header;
97385 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
97386 } devinet_sysctl = {
97387@@ -2192,7 +2192,7 @@ static __net_init int devinet_init_net(struct net *net)
97388 int err;
97389 struct ipv4_devconf *all, *dflt;
97390 #ifdef CONFIG_SYSCTL
97391- struct ctl_table *tbl = ctl_forward_entry;
97392+ ctl_table_no_const *tbl = NULL;
97393 struct ctl_table_header *forw_hdr;
97394 #endif
97395
97396@@ -2210,7 +2210,7 @@ static __net_init int devinet_init_net(struct net *net)
97397 goto err_alloc_dflt;
97398
97399 #ifdef CONFIG_SYSCTL
97400- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
97401+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
97402 if (tbl == NULL)
97403 goto err_alloc_ctl;
97404
97405@@ -2230,7 +2230,10 @@ static __net_init int devinet_init_net(struct net *net)
97406 goto err_reg_dflt;
97407
97408 err = -ENOMEM;
97409- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
97410+ if (!net_eq(net, &init_net))
97411+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
97412+ else
97413+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
97414 if (forw_hdr == NULL)
97415 goto err_reg_ctl;
97416 net->ipv4.forw_hdr = forw_hdr;
97417@@ -2246,8 +2249,7 @@ err_reg_ctl:
97418 err_reg_dflt:
97419 __devinet_sysctl_unregister(all);
97420 err_reg_all:
97421- if (tbl != ctl_forward_entry)
97422- kfree(tbl);
97423+ kfree(tbl);
97424 err_alloc_ctl:
97425 #endif
97426 if (dflt != &ipv4_devconf_dflt)
97427diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
97428index c7539e2..b455e51 100644
97429--- a/net/ipv4/fib_frontend.c
97430+++ b/net/ipv4/fib_frontend.c
97431@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
97432 #ifdef CONFIG_IP_ROUTE_MULTIPATH
97433 fib_sync_up(dev);
97434 #endif
97435- atomic_inc(&net->ipv4.dev_addr_genid);
97436+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
97437 rt_cache_flush(dev_net(dev));
97438 break;
97439 case NETDEV_DOWN:
97440 fib_del_ifaddr(ifa, NULL);
97441- atomic_inc(&net->ipv4.dev_addr_genid);
97442+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
97443 if (ifa->ifa_dev->ifa_list == NULL) {
97444 /* Last address was deleted from this interface.
97445 * Disable IP.
97446@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
97447 #ifdef CONFIG_IP_ROUTE_MULTIPATH
97448 fib_sync_up(dev);
97449 #endif
97450- atomic_inc(&net->ipv4.dev_addr_genid);
97451+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
97452 rt_cache_flush(net);
97453 break;
97454 case NETDEV_DOWN:
97455diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
97456index e63f47a..e5c531d 100644
97457--- a/net/ipv4/fib_semantics.c
97458+++ b/net/ipv4/fib_semantics.c
97459@@ -766,7 +766,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
97460 nh->nh_saddr = inet_select_addr(nh->nh_dev,
97461 nh->nh_gw,
97462 nh->nh_parent->fib_scope);
97463- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
97464+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
97465
97466 return nh->nh_saddr;
97467 }
97468diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
97469index fc0e649..febfa65 100644
97470--- a/net/ipv4/inet_connection_sock.c
97471+++ b/net/ipv4/inet_connection_sock.c
97472@@ -29,7 +29,7 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
97473 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
97474 #endif
97475
97476-unsigned long *sysctl_local_reserved_ports;
97477+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
97478 EXPORT_SYMBOL(sysctl_local_reserved_ports);
97479
97480 void inet_get_local_port_range(struct net *net, int *low, int *high)
97481diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
97482index bb075fc..322dceb 100644
97483--- a/net/ipv4/inet_fragment.c
97484+++ b/net/ipv4/inet_fragment.c
97485@@ -278,9 +278,10 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
97486
97487 atomic_inc(&qp->refcnt);
97488 hlist_add_head(&qp->list, &hb->chain);
97489- spin_unlock(&hb->chain_lock);
97490- read_unlock(&f->lock);
97491 inet_frag_lru_add(nf, qp);
97492+ spin_unlock(&hb->chain_lock);
97493+ read_unlock(&f->lock);
97494+
97495 return qp;
97496 }
97497
97498diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
97499index 8b9cf27..0d8d592 100644
97500--- a/net/ipv4/inet_hashtables.c
97501+++ b/net/ipv4/inet_hashtables.c
97502@@ -18,6 +18,7 @@
97503 #include <linux/sched.h>
97504 #include <linux/slab.h>
97505 #include <linux/wait.h>
97506+#include <linux/security.h>
97507
97508 #include <net/inet_connection_sock.h>
97509 #include <net/inet_hashtables.h>
97510@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
97511 return inet_ehashfn(net, laddr, lport, faddr, fport);
97512 }
97513
97514+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
97515+
97516 /*
97517 * Allocate and initialize a new local port bind bucket.
97518 * The bindhash mutex for snum's hash chain must be held here.
97519@@ -554,6 +557,8 @@ ok:
97520 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
97521 spin_unlock(&head->lock);
97522
97523+ gr_update_task_in_ip_table(current, inet_sk(sk));
97524+
97525 if (tw) {
97526 inet_twsk_deschedule(tw, death_row);
97527 while (twrefcnt) {
97528diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
97529index 33d5537..da337a4 100644
97530--- a/net/ipv4/inetpeer.c
97531+++ b/net/ipv4/inetpeer.c
97532@@ -503,8 +503,8 @@ relookup:
97533 if (p) {
97534 p->daddr = *daddr;
97535 atomic_set(&p->refcnt, 1);
97536- atomic_set(&p->rid, 0);
97537- atomic_set(&p->ip_id_count,
97538+ atomic_set_unchecked(&p->rid, 0);
97539+ atomic_set_unchecked(&p->ip_id_count,
97540 (daddr->family == AF_INET) ?
97541 secure_ip_id(daddr->addr.a4) :
97542 secure_ipv6_id(daddr->addr.a6));
97543diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
97544index 2481993..2d9a7a7 100644
97545--- a/net/ipv4/ip_fragment.c
97546+++ b/net/ipv4/ip_fragment.c
97547@@ -283,7 +283,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
97548 return 0;
97549
97550 start = qp->rid;
97551- end = atomic_inc_return(&peer->rid);
97552+ end = atomic_inc_return_unchecked(&peer->rid);
97553 qp->rid = end;
97554
97555 rc = qp->q.fragments && (end - start) > max;
97556@@ -760,12 +760,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
97557
97558 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
97559 {
97560- struct ctl_table *table;
97561+ ctl_table_no_const *table = NULL;
97562 struct ctl_table_header *hdr;
97563
97564- table = ip4_frags_ns_ctl_table;
97565 if (!net_eq(net, &init_net)) {
97566- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
97567+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
97568 if (table == NULL)
97569 goto err_alloc;
97570
97571@@ -776,9 +775,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
97572 /* Don't export sysctls to unprivileged users */
97573 if (net->user_ns != &init_user_ns)
97574 table[0].procname = NULL;
97575- }
97576+ hdr = register_net_sysctl(net, "net/ipv4", table);
97577+ } else
97578+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
97579
97580- hdr = register_net_sysctl(net, "net/ipv4", table);
97581 if (hdr == NULL)
97582 goto err_reg;
97583
97584@@ -786,8 +786,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
97585 return 0;
97586
97587 err_reg:
97588- if (!net_eq(net, &init_net))
97589- kfree(table);
97590+ kfree(table);
97591 err_alloc:
97592 return -ENOMEM;
97593 }
97594diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
97595index d306360..1c1a1f1 100644
97596--- a/net/ipv4/ip_gre.c
97597+++ b/net/ipv4/ip_gre.c
97598@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
97599 module_param(log_ecn_error, bool, 0644);
97600 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
97601
97602-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
97603+static struct rtnl_link_ops ipgre_link_ops;
97604 static int ipgre_tunnel_init(struct net_device *dev);
97605
97606 static int ipgre_net_id __read_mostly;
97607@@ -732,7 +732,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
97608 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
97609 };
97610
97611-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
97612+static struct rtnl_link_ops ipgre_link_ops = {
97613 .kind = "gre",
97614 .maxtype = IFLA_GRE_MAX,
97615 .policy = ipgre_policy,
97616@@ -746,7 +746,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
97617 .fill_info = ipgre_fill_info,
97618 };
97619
97620-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
97621+static struct rtnl_link_ops ipgre_tap_ops = {
97622 .kind = "gretap",
97623 .maxtype = IFLA_GRE_MAX,
97624 .policy = ipgre_policy,
97625diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
97626index ddf32a6..3fdeea9 100644
97627--- a/net/ipv4/ip_sockglue.c
97628+++ b/net/ipv4/ip_sockglue.c
97629@@ -1172,7 +1172,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
97630 len = min_t(unsigned int, len, opt->optlen);
97631 if (put_user(len, optlen))
97632 return -EFAULT;
97633- if (copy_to_user(optval, opt->__data, len))
97634+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
97635+ copy_to_user(optval, opt->__data, len))
97636 return -EFAULT;
97637 return 0;
97638 }
97639@@ -1303,7 +1304,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
97640 if (sk->sk_type != SOCK_STREAM)
97641 return -ENOPROTOOPT;
97642
97643- msg.msg_control = optval;
97644+ msg.msg_control = (void __force_kernel *)optval;
97645 msg.msg_controllen = len;
97646 msg.msg_flags = flags;
97647
97648diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
97649index 52b802a..b725179 100644
97650--- a/net/ipv4/ip_vti.c
97651+++ b/net/ipv4/ip_vti.c
97652@@ -44,7 +44,7 @@
97653 #include <net/net_namespace.h>
97654 #include <net/netns/generic.h>
97655
97656-static struct rtnl_link_ops vti_link_ops __read_mostly;
97657+static struct rtnl_link_ops vti_link_ops;
97658
97659 static int vti_net_id __read_mostly;
97660 static int vti_tunnel_init(struct net_device *dev);
97661@@ -360,7 +360,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
97662 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
97663 };
97664
97665-static struct rtnl_link_ops vti_link_ops __read_mostly = {
97666+static struct rtnl_link_ops vti_link_ops = {
97667 .kind = "vti",
97668 .maxtype = IFLA_VTI_MAX,
97669 .policy = vti_policy,
97670diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
97671index efa1138..20dbba0 100644
97672--- a/net/ipv4/ipconfig.c
97673+++ b/net/ipv4/ipconfig.c
97674@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
97675
97676 mm_segment_t oldfs = get_fs();
97677 set_fs(get_ds());
97678- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
97679+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
97680 set_fs(oldfs);
97681 return res;
97682 }
97683@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
97684
97685 mm_segment_t oldfs = get_fs();
97686 set_fs(get_ds());
97687- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
97688+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
97689 set_fs(oldfs);
97690 return res;
97691 }
97692@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
97693
97694 mm_segment_t oldfs = get_fs();
97695 set_fs(get_ds());
97696- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
97697+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
97698 set_fs(oldfs);
97699 return res;
97700 }
97701diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
97702index fe3e9f7..4956990 100644
97703--- a/net/ipv4/ipip.c
97704+++ b/net/ipv4/ipip.c
97705@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
97706 static int ipip_net_id __read_mostly;
97707
97708 static int ipip_tunnel_init(struct net_device *dev);
97709-static struct rtnl_link_ops ipip_link_ops __read_mostly;
97710+static struct rtnl_link_ops ipip_link_ops;
97711
97712 static int ipip_err(struct sk_buff *skb, u32 info)
97713 {
97714@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
97715 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
97716 };
97717
97718-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
97719+static struct rtnl_link_ops ipip_link_ops = {
97720 .kind = "ipip",
97721 .maxtype = IFLA_IPTUN_MAX,
97722 .policy = ipip_policy,
97723diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
97724index 59da7cd..e318de1 100644
97725--- a/net/ipv4/netfilter/arp_tables.c
97726+++ b/net/ipv4/netfilter/arp_tables.c
97727@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
97728 #endif
97729
97730 static int get_info(struct net *net, void __user *user,
97731- const int *len, int compat)
97732+ int len, int compat)
97733 {
97734 char name[XT_TABLE_MAXNAMELEN];
97735 struct xt_table *t;
97736 int ret;
97737
97738- if (*len != sizeof(struct arpt_getinfo)) {
97739- duprintf("length %u != %Zu\n", *len,
97740+ if (len != sizeof(struct arpt_getinfo)) {
97741+ duprintf("length %u != %Zu\n", len,
97742 sizeof(struct arpt_getinfo));
97743 return -EINVAL;
97744 }
97745@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
97746 info.size = private->size;
97747 strcpy(info.name, name);
97748
97749- if (copy_to_user(user, &info, *len) != 0)
97750+ if (copy_to_user(user, &info, len) != 0)
97751 ret = -EFAULT;
97752 else
97753 ret = 0;
97754@@ -1688,7 +1688,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
97755
97756 switch (cmd) {
97757 case ARPT_SO_GET_INFO:
97758- ret = get_info(sock_net(sk), user, len, 1);
97759+ ret = get_info(sock_net(sk), user, *len, 1);
97760 break;
97761 case ARPT_SO_GET_ENTRIES:
97762 ret = compat_get_entries(sock_net(sk), user, len);
97763@@ -1733,7 +1733,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
97764
97765 switch (cmd) {
97766 case ARPT_SO_GET_INFO:
97767- ret = get_info(sock_net(sk), user, len, 0);
97768+ ret = get_info(sock_net(sk), user, *len, 0);
97769 break;
97770
97771 case ARPT_SO_GET_ENTRIES:
97772diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
97773index 718dfbd..cef4152 100644
97774--- a/net/ipv4/netfilter/ip_tables.c
97775+++ b/net/ipv4/netfilter/ip_tables.c
97776@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
97777 #endif
97778
97779 static int get_info(struct net *net, void __user *user,
97780- const int *len, int compat)
97781+ int len, int compat)
97782 {
97783 char name[XT_TABLE_MAXNAMELEN];
97784 struct xt_table *t;
97785 int ret;
97786
97787- if (*len != sizeof(struct ipt_getinfo)) {
97788- duprintf("length %u != %zu\n", *len,
97789+ if (len != sizeof(struct ipt_getinfo)) {
97790+ duprintf("length %u != %zu\n", len,
97791 sizeof(struct ipt_getinfo));
97792 return -EINVAL;
97793 }
97794@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
97795 info.size = private->size;
97796 strcpy(info.name, name);
97797
97798- if (copy_to_user(user, &info, *len) != 0)
97799+ if (copy_to_user(user, &info, len) != 0)
97800 ret = -EFAULT;
97801 else
97802 ret = 0;
97803@@ -1971,7 +1971,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
97804
97805 switch (cmd) {
97806 case IPT_SO_GET_INFO:
97807- ret = get_info(sock_net(sk), user, len, 1);
97808+ ret = get_info(sock_net(sk), user, *len, 1);
97809 break;
97810 case IPT_SO_GET_ENTRIES:
97811 ret = compat_get_entries(sock_net(sk), user, len);
97812@@ -2018,7 +2018,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
97813
97814 switch (cmd) {
97815 case IPT_SO_GET_INFO:
97816- ret = get_info(sock_net(sk), user, len, 0);
97817+ ret = get_info(sock_net(sk), user, *len, 0);
97818 break;
97819
97820 case IPT_SO_GET_ENTRIES:
97821diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
97822index 242e7f4..a084e95 100644
97823--- a/net/ipv4/ping.c
97824+++ b/net/ipv4/ping.c
97825@@ -55,7 +55,7 @@
97826
97827
97828 struct ping_table ping_table;
97829-struct pingv6_ops pingv6_ops;
97830+struct pingv6_ops *pingv6_ops;
97831 EXPORT_SYMBOL_GPL(pingv6_ops);
97832
97833 static u16 ping_port_rover;
97834@@ -334,7 +334,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
97835 return -ENODEV;
97836 }
97837 }
97838- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
97839+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
97840 scoped);
97841 rcu_read_unlock();
97842
97843@@ -542,7 +542,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
97844 }
97845 #if IS_ENABLED(CONFIG_IPV6)
97846 } else if (skb->protocol == htons(ETH_P_IPV6)) {
97847- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
97848+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
97849 #endif
97850 }
97851
97852@@ -560,7 +560,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
97853 info, (u8 *)icmph);
97854 #if IS_ENABLED(CONFIG_IPV6)
97855 } else if (family == AF_INET6) {
97856- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
97857+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
97858 info, (u8 *)icmph);
97859 #endif
97860 }
97861@@ -830,6 +830,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97862 {
97863 struct inet_sock *isk = inet_sk(sk);
97864 int family = sk->sk_family;
97865+ struct sockaddr_in *sin;
97866+ struct sockaddr_in6 *sin6;
97867 struct sk_buff *skb;
97868 int copied, err;
97869
97870@@ -839,12 +841,19 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97871 if (flags & MSG_OOB)
97872 goto out;
97873
97874+ if (addr_len) {
97875+ if (family == AF_INET)
97876+ *addr_len = sizeof(*sin);
97877+ else if (family == AF_INET6 && addr_len)
97878+ *addr_len = sizeof(*sin6);
97879+ }
97880+
97881 if (flags & MSG_ERRQUEUE) {
97882 if (family == AF_INET) {
97883 return ip_recv_error(sk, msg, len, addr_len);
97884 #if IS_ENABLED(CONFIG_IPV6)
97885 } else if (family == AF_INET6) {
97886- return pingv6_ops.ipv6_recv_error(sk, msg, len,
97887+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
97888 addr_len);
97889 #endif
97890 }
97891@@ -876,7 +885,6 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97892 sin->sin_port = 0 /* skb->h.uh->source */;
97893 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
97894 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
97895- *addr_len = sizeof(*sin);
97896 }
97897
97898 if (isk->cmsg_flags)
97899@@ -899,11 +907,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97900 sin6->sin6_scope_id =
97901 ipv6_iface_scope_id(&sin6->sin6_addr,
97902 IP6CB(skb)->iif);
97903- *addr_len = sizeof(*sin6);
97904 }
97905
97906 if (inet6_sk(sk)->rxopt.all)
97907- pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
97908+ pingv6_ops->ip6_datagram_recv_ctl(sk, msg, skb);
97909 #endif
97910 } else {
97911 BUG();
97912@@ -1093,7 +1100,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
97913 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
97914 0, sock_i_ino(sp),
97915 atomic_read(&sp->sk_refcnt), sp,
97916- atomic_read(&sp->sk_drops));
97917+ atomic_read_unchecked(&sp->sk_drops));
97918 }
97919
97920 static int ping_v4_seq_show(struct seq_file *seq, void *v)
97921diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
97922index 23c3e5b..cdb8b36 100644
97923--- a/net/ipv4/raw.c
97924+++ b/net/ipv4/raw.c
97925@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
97926 int raw_rcv(struct sock *sk, struct sk_buff *skb)
97927 {
97928 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
97929- atomic_inc(&sk->sk_drops);
97930+ atomic_inc_unchecked(&sk->sk_drops);
97931 kfree_skb(skb);
97932 return NET_RX_DROP;
97933 }
97934@@ -696,6 +696,9 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97935 if (flags & MSG_OOB)
97936 goto out;
97937
97938+ if (addr_len)
97939+ *addr_len = sizeof(*sin);
97940+
97941 if (flags & MSG_ERRQUEUE) {
97942 err = ip_recv_error(sk, msg, len, addr_len);
97943 goto out;
97944@@ -723,7 +726,6 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
97945 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
97946 sin->sin_port = 0;
97947 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
97948- *addr_len = sizeof(*sin);
97949 }
97950 if (inet->cmsg_flags)
97951 ip_cmsg_recv(msg, skb);
97952@@ -748,16 +750,20 @@ static int raw_init(struct sock *sk)
97953
97954 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
97955 {
97956+ struct icmp_filter filter;
97957+
97958 if (optlen > sizeof(struct icmp_filter))
97959 optlen = sizeof(struct icmp_filter);
97960- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
97961+ if (copy_from_user(&filter, optval, optlen))
97962 return -EFAULT;
97963+ raw_sk(sk)->filter = filter;
97964 return 0;
97965 }
97966
97967 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
97968 {
97969 int len, ret = -EFAULT;
97970+ struct icmp_filter filter;
97971
97972 if (get_user(len, optlen))
97973 goto out;
97974@@ -767,8 +773,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
97975 if (len > sizeof(struct icmp_filter))
97976 len = sizeof(struct icmp_filter);
97977 ret = -EFAULT;
97978- if (put_user(len, optlen) ||
97979- copy_to_user(optval, &raw_sk(sk)->filter, len))
97980+ filter = raw_sk(sk)->filter;
97981+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
97982 goto out;
97983 ret = 0;
97984 out: return ret;
97985@@ -997,7 +1003,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
97986 0, 0L, 0,
97987 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
97988 0, sock_i_ino(sp),
97989- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
97990+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
97991 }
97992
97993 static int raw_seq_show(struct seq_file *seq, void *v)
97994diff --git a/net/ipv4/route.c b/net/ipv4/route.c
97995index e611651f..0c17263 100644
97996--- a/net/ipv4/route.c
97997+++ b/net/ipv4/route.c
97998@@ -2621,34 +2621,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
97999 .maxlen = sizeof(int),
98000 .mode = 0200,
98001 .proc_handler = ipv4_sysctl_rtcache_flush,
98002+ .extra1 = &init_net,
98003 },
98004 { },
98005 };
98006
98007 static __net_init int sysctl_route_net_init(struct net *net)
98008 {
98009- struct ctl_table *tbl;
98010+ ctl_table_no_const *tbl = NULL;
98011
98012- tbl = ipv4_route_flush_table;
98013 if (!net_eq(net, &init_net)) {
98014- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
98015+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
98016 if (tbl == NULL)
98017 goto err_dup;
98018
98019 /* Don't export sysctls to unprivileged users */
98020 if (net->user_ns != &init_user_ns)
98021 tbl[0].procname = NULL;
98022- }
98023- tbl[0].extra1 = net;
98024+ tbl[0].extra1 = net;
98025+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
98026+ } else
98027+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
98028
98029- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
98030 if (net->ipv4.route_hdr == NULL)
98031 goto err_reg;
98032 return 0;
98033
98034 err_reg:
98035- if (tbl != ipv4_route_flush_table)
98036- kfree(tbl);
98037+ kfree(tbl);
98038 err_dup:
98039 return -ENOMEM;
98040 }
98041@@ -2671,8 +2671,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
98042
98043 static __net_init int rt_genid_init(struct net *net)
98044 {
98045- atomic_set(&net->ipv4.rt_genid, 0);
98046- atomic_set(&net->fnhe_genid, 0);
98047+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
98048+ atomic_set_unchecked(&net->fnhe_genid, 0);
98049 get_random_bytes(&net->ipv4.dev_addr_genid,
98050 sizeof(net->ipv4.dev_addr_genid));
98051 return 0;
98052diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
98053index 3d69ec8..57207b4 100644
98054--- a/net/ipv4/sysctl_net_ipv4.c
98055+++ b/net/ipv4/sysctl_net_ipv4.c
98056@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
98057 container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
98058 int ret;
98059 int range[2];
98060- struct ctl_table tmp = {
98061+ ctl_table_no_const tmp = {
98062 .data = &range,
98063 .maxlen = sizeof(range),
98064 .mode = table->mode,
98065@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
98066 int ret;
98067 gid_t urange[2];
98068 kgid_t low, high;
98069- struct ctl_table tmp = {
98070+ ctl_table_no_const tmp = {
98071 .data = &urange,
98072 .maxlen = sizeof(urange),
98073 .mode = table->mode,
98074@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
98075 void __user *buffer, size_t *lenp, loff_t *ppos)
98076 {
98077 char val[TCP_CA_NAME_MAX];
98078- struct ctl_table tbl = {
98079+ ctl_table_no_const tbl = {
98080 .data = val,
98081 .maxlen = TCP_CA_NAME_MAX,
98082 };
98083@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
98084 void __user *buffer, size_t *lenp,
98085 loff_t *ppos)
98086 {
98087- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
98088+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
98089 int ret;
98090
98091 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
98092@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
98093 void __user *buffer, size_t *lenp,
98094 loff_t *ppos)
98095 {
98096- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
98097+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
98098 int ret;
98099
98100 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
98101@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
98102 void __user *buffer, size_t *lenp,
98103 loff_t *ppos)
98104 {
98105- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
98106+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
98107 struct tcp_fastopen_context *ctxt;
98108 int ret;
98109 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
98110@@ -445,7 +445,7 @@ static struct ctl_table ipv4_table[] = {
98111 },
98112 {
98113 .procname = "ip_local_reserved_ports",
98114- .data = NULL, /* initialized in sysctl_ipv4_init */
98115+ .data = sysctl_local_reserved_ports,
98116 .maxlen = 65536,
98117 .mode = 0644,
98118 .proc_handler = proc_do_large_bitmap,
98119@@ -827,13 +827,12 @@ static struct ctl_table ipv4_net_table[] = {
98120
98121 static __net_init int ipv4_sysctl_init_net(struct net *net)
98122 {
98123- struct ctl_table *table;
98124+ ctl_table_no_const *table = NULL;
98125
98126- table = ipv4_net_table;
98127 if (!net_eq(net, &init_net)) {
98128 int i;
98129
98130- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
98131+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
98132 if (table == NULL)
98133 goto err_alloc;
98134
98135@@ -856,15 +855,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
98136 net->ipv4.sysctl_local_ports.range[0] = 32768;
98137 net->ipv4.sysctl_local_ports.range[1] = 61000;
98138
98139- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
98140+ if (!net_eq(net, &init_net))
98141+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
98142+ else
98143+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
98144 if (net->ipv4.ipv4_hdr == NULL)
98145 goto err_reg;
98146
98147 return 0;
98148
98149 err_reg:
98150- if (!net_eq(net, &init_net))
98151- kfree(table);
98152+ kfree(table);
98153 err_alloc:
98154 return -ENOMEM;
98155 }
98156@@ -886,16 +887,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
98157 static __init int sysctl_ipv4_init(void)
98158 {
98159 struct ctl_table_header *hdr;
98160- struct ctl_table *i;
98161-
98162- for (i = ipv4_table; i->procname; i++) {
98163- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
98164- i->data = sysctl_local_reserved_ports;
98165- break;
98166- }
98167- }
98168- if (!i->procname)
98169- return -EINVAL;
98170
98171 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
98172 if (hdr == NULL)
98173diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
98174index c53b7f3..a89aadd 100644
98175--- a/net/ipv4/tcp_input.c
98176+++ b/net/ipv4/tcp_input.c
98177@@ -759,7 +759,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
98178 * without any lock. We want to make sure compiler wont store
98179 * intermediate values in this location.
98180 */
98181- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
98182+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
98183 sk->sk_max_pacing_rate);
98184 }
98185
98186@@ -4482,7 +4482,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
98187 * simplifies code)
98188 */
98189 static void
98190-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
98191+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
98192 struct sk_buff *head, struct sk_buff *tail,
98193 u32 start, u32 end)
98194 {
98195@@ -5559,6 +5559,7 @@ discard:
98196 tcp_paws_reject(&tp->rx_opt, 0))
98197 goto discard_and_undo;
98198
98199+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
98200 if (th->syn) {
98201 /* We see SYN without ACK. It is attempt of
98202 * simultaneous connect with crossed SYNs.
98203@@ -5609,6 +5610,7 @@ discard:
98204 goto discard;
98205 #endif
98206 }
98207+#endif
98208 /* "fifth, if neither of the SYN or RST bits is set then
98209 * drop the segment and return."
98210 */
98211@@ -5655,7 +5657,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
98212 goto discard;
98213
98214 if (th->syn) {
98215- if (th->fin)
98216+ if (th->fin || th->urg || th->psh)
98217 goto discard;
98218 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
98219 return 1;
98220diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
98221index 0672139..cacc17d 100644
98222--- a/net/ipv4/tcp_ipv4.c
98223+++ b/net/ipv4/tcp_ipv4.c
98224@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
98225 EXPORT_SYMBOL(sysctl_tcp_low_latency);
98226
98227
98228+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98229+extern int grsec_enable_blackhole;
98230+#endif
98231+
98232 #ifdef CONFIG_TCP_MD5SIG
98233 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
98234 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98235@@ -1830,6 +1834,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
98236 return 0;
98237
98238 reset:
98239+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98240+ if (!grsec_enable_blackhole)
98241+#endif
98242 tcp_v4_send_reset(rsk, skb);
98243 discard:
98244 kfree_skb(skb);
98245@@ -1975,12 +1982,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
98246 TCP_SKB_CB(skb)->sacked = 0;
98247
98248 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
98249- if (!sk)
98250+ if (!sk) {
98251+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98252+ ret = 1;
98253+#endif
98254 goto no_tcp_socket;
98255-
98256+ }
98257 process:
98258- if (sk->sk_state == TCP_TIME_WAIT)
98259+ if (sk->sk_state == TCP_TIME_WAIT) {
98260+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98261+ ret = 2;
98262+#endif
98263 goto do_time_wait;
98264+ }
98265
98266 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
98267 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
98268@@ -2034,6 +2048,10 @@ csum_error:
98269 bad_packet:
98270 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
98271 } else {
98272+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98273+ if (!grsec_enable_blackhole || (ret == 1 &&
98274+ (skb->dev->flags & IFF_LOOPBACK)))
98275+#endif
98276 tcp_v4_send_reset(NULL, skb);
98277 }
98278
98279diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
98280index 97b6841..0893357 100644
98281--- a/net/ipv4/tcp_minisocks.c
98282+++ b/net/ipv4/tcp_minisocks.c
98283@@ -27,6 +27,10 @@
98284 #include <net/inet_common.h>
98285 #include <net/xfrm.h>
98286
98287+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98288+extern int grsec_enable_blackhole;
98289+#endif
98290+
98291 int sysctl_tcp_syncookies __read_mostly = 1;
98292 EXPORT_SYMBOL(sysctl_tcp_syncookies);
98293
98294@@ -708,7 +712,10 @@ embryonic_reset:
98295 * avoid becoming vulnerable to outside attack aiming at
98296 * resetting legit local connections.
98297 */
98298- req->rsk_ops->send_reset(sk, skb);
98299+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98300+ if (!grsec_enable_blackhole)
98301+#endif
98302+ req->rsk_ops->send_reset(sk, skb);
98303 } else if (fastopen) { /* received a valid RST pkt */
98304 reqsk_fastopen_remove(sk, req, true);
98305 tcp_reset(sk);
98306diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
98307index 8b97d71..9d7ccf5 100644
98308--- a/net/ipv4/tcp_probe.c
98309+++ b/net/ipv4/tcp_probe.c
98310@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
98311 if (cnt + width >= len)
98312 break;
98313
98314- if (copy_to_user(buf + cnt, tbuf, width))
98315+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
98316 return -EFAULT;
98317 cnt += width;
98318 }
98319diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
98320index 64f0354..a81b39d 100644
98321--- a/net/ipv4/tcp_timer.c
98322+++ b/net/ipv4/tcp_timer.c
98323@@ -22,6 +22,10 @@
98324 #include <linux/gfp.h>
98325 #include <net/tcp.h>
98326
98327+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98328+extern int grsec_lastack_retries;
98329+#endif
98330+
98331 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
98332 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
98333 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
98334@@ -189,6 +193,13 @@ static int tcp_write_timeout(struct sock *sk)
98335 }
98336 }
98337
98338+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98339+ if ((sk->sk_state == TCP_LAST_ACK) &&
98340+ (grsec_lastack_retries > 0) &&
98341+ (grsec_lastack_retries < retry_until))
98342+ retry_until = grsec_lastack_retries;
98343+#endif
98344+
98345 if (retransmits_timed_out(sk, retry_until,
98346 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
98347 /* Has it gone just too far? */
98348diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
98349index a7e4729..2758946 100644
98350--- a/net/ipv4/udp.c
98351+++ b/net/ipv4/udp.c
98352@@ -87,6 +87,7 @@
98353 #include <linux/types.h>
98354 #include <linux/fcntl.h>
98355 #include <linux/module.h>
98356+#include <linux/security.h>
98357 #include <linux/socket.h>
98358 #include <linux/sockios.h>
98359 #include <linux/igmp.h>
98360@@ -113,6 +114,10 @@
98361 #include <net/busy_poll.h>
98362 #include "udp_impl.h"
98363
98364+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98365+extern int grsec_enable_blackhole;
98366+#endif
98367+
98368 struct udp_table udp_table __read_mostly;
98369 EXPORT_SYMBOL(udp_table);
98370
98371@@ -615,6 +620,9 @@ found:
98372 return s;
98373 }
98374
98375+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
98376+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
98377+
98378 /*
98379 * This routine is called by the ICMP module when it gets some
98380 * sort of error condition. If err < 0 then the socket should
98381@@ -914,9 +922,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
98382 dport = usin->sin_port;
98383 if (dport == 0)
98384 return -EINVAL;
98385+
98386+ err = gr_search_udp_sendmsg(sk, usin);
98387+ if (err)
98388+ return err;
98389 } else {
98390 if (sk->sk_state != TCP_ESTABLISHED)
98391 return -EDESTADDRREQ;
98392+
98393+ err = gr_search_udp_sendmsg(sk, NULL);
98394+ if (err)
98395+ return err;
98396+
98397 daddr = inet->inet_daddr;
98398 dport = inet->inet_dport;
98399 /* Open fast path for connected socket.
98400@@ -1163,7 +1180,7 @@ static unsigned int first_packet_length(struct sock *sk)
98401 IS_UDPLITE(sk));
98402 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
98403 IS_UDPLITE(sk));
98404- atomic_inc(&sk->sk_drops);
98405+ atomic_inc_unchecked(&sk->sk_drops);
98406 __skb_unlink(skb, rcvq);
98407 __skb_queue_tail(&list_kill, skb);
98408 }
98409@@ -1234,6 +1251,12 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
98410 int is_udplite = IS_UDPLITE(sk);
98411 bool slow;
98412
98413+ /*
98414+ * Check any passed addresses
98415+ */
98416+ if (addr_len)
98417+ *addr_len = sizeof(*sin);
98418+
98419 if (flags & MSG_ERRQUEUE)
98420 return ip_recv_error(sk, msg, len, addr_len);
98421
98422@@ -1243,6 +1266,10 @@ try_again:
98423 if (!skb)
98424 goto out;
98425
98426+ err = gr_search_udp_recvmsg(sk, skb);
98427+ if (err)
98428+ goto out_free;
98429+
98430 ulen = skb->len - sizeof(struct udphdr);
98431 copied = len;
98432 if (copied > ulen)
98433@@ -1276,7 +1303,7 @@ try_again:
98434 if (unlikely(err)) {
98435 trace_kfree_skb(skb, udp_recvmsg);
98436 if (!peeked) {
98437- atomic_inc(&sk->sk_drops);
98438+ atomic_inc_unchecked(&sk->sk_drops);
98439 UDP_INC_STATS_USER(sock_net(sk),
98440 UDP_MIB_INERRORS, is_udplite);
98441 }
98442@@ -1295,7 +1322,6 @@ try_again:
98443 sin->sin_port = udp_hdr(skb)->source;
98444 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
98445 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
98446- *addr_len = sizeof(*sin);
98447 }
98448 if (inet->cmsg_flags)
98449 ip_cmsg_recv(msg, skb);
98450@@ -1566,7 +1592,7 @@ csum_error:
98451 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
98452 drop:
98453 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
98454- atomic_inc(&sk->sk_drops);
98455+ atomic_inc_unchecked(&sk->sk_drops);
98456 kfree_skb(skb);
98457 return -1;
98458 }
98459@@ -1585,7 +1611,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
98460 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
98461
98462 if (!skb1) {
98463- atomic_inc(&sk->sk_drops);
98464+ atomic_inc_unchecked(&sk->sk_drops);
98465 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
98466 IS_UDPLITE(sk));
98467 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
98468@@ -1786,6 +1812,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
98469 goto csum_error;
98470
98471 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
98472+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
98473+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
98474+#endif
98475 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
98476
98477 /*
98478@@ -2350,7 +2379,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
98479 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
98480 0, sock_i_ino(sp),
98481 atomic_read(&sp->sk_refcnt), sp,
98482- atomic_read(&sp->sk_drops));
98483+ atomic_read_unchecked(&sp->sk_drops));
98484 }
98485
98486 int udp4_seq_show(struct seq_file *seq, void *v)
98487diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
98488index e1a6393..f634ce5 100644
98489--- a/net/ipv4/xfrm4_policy.c
98490+++ b/net/ipv4/xfrm4_policy.c
98491@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
98492 fl4->flowi4_tos = iph->tos;
98493 }
98494
98495-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
98496+static int xfrm4_garbage_collect(struct dst_ops *ops)
98497 {
98498 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
98499
98500- xfrm4_policy_afinfo.garbage_collect(net);
98501+ xfrm_garbage_collect_deferred(net);
98502 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
98503 }
98504
98505@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
98506
98507 static int __net_init xfrm4_net_init(struct net *net)
98508 {
98509- struct ctl_table *table;
98510+ ctl_table_no_const *table = NULL;
98511 struct ctl_table_header *hdr;
98512
98513- table = xfrm4_policy_table;
98514 if (!net_eq(net, &init_net)) {
98515- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
98516+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
98517 if (!table)
98518 goto err_alloc;
98519
98520 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
98521- }
98522-
98523- hdr = register_net_sysctl(net, "net/ipv4", table);
98524+ hdr = register_net_sysctl(net, "net/ipv4", table);
98525+ } else
98526+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
98527 if (!hdr)
98528 goto err_reg;
98529
98530@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
98531 return 0;
98532
98533 err_reg:
98534- if (!net_eq(net, &init_net))
98535- kfree(table);
98536+ kfree(table);
98537 err_alloc:
98538 return -ENOMEM;
98539 }
98540diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
98541index 9c05d77..9cfa714 100644
98542--- a/net/ipv6/addrconf.c
98543+++ b/net/ipv6/addrconf.c
98544@@ -589,7 +589,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
98545 idx = 0;
98546 head = &net->dev_index_head[h];
98547 rcu_read_lock();
98548- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
98549+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
98550 net->dev_base_seq;
98551 hlist_for_each_entry_rcu(dev, head, index_hlist) {
98552 if (idx < s_idx)
98553@@ -2334,7 +2334,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
98554 p.iph.ihl = 5;
98555 p.iph.protocol = IPPROTO_IPV6;
98556 p.iph.ttl = 64;
98557- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
98558+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
98559
98560 if (ops->ndo_do_ioctl) {
98561 mm_segment_t oldfs = get_fs();
98562@@ -3964,7 +3964,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
98563 s_ip_idx = ip_idx = cb->args[2];
98564
98565 rcu_read_lock();
98566- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
98567+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
98568 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
98569 idx = 0;
98570 head = &net->dev_index_head[h];
98571@@ -4571,7 +4571,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
98572 dst_free(&ifp->rt->dst);
98573 break;
98574 }
98575- atomic_inc(&net->ipv6.dev_addr_genid);
98576+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
98577 rt_genid_bump_ipv6(net);
98578 }
98579
98580@@ -4592,7 +4592,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
98581 int *valp = ctl->data;
98582 int val = *valp;
98583 loff_t pos = *ppos;
98584- struct ctl_table lctl;
98585+ ctl_table_no_const lctl;
98586 int ret;
98587
98588 /*
98589@@ -4677,7 +4677,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
98590 int *valp = ctl->data;
98591 int val = *valp;
98592 loff_t pos = *ppos;
98593- struct ctl_table lctl;
98594+ ctl_table_no_const lctl;
98595 int ret;
98596
98597 /*
98598diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
98599index 4fbdb70..f6411f2 100644
98600--- a/net/ipv6/af_inet6.c
98601+++ b/net/ipv6/af_inet6.c
98602@@ -776,7 +776,7 @@ static int __net_init inet6_net_init(struct net *net)
98603
98604 net->ipv6.sysctl.bindv6only = 0;
98605 net->ipv6.sysctl.icmpv6_time = 1*HZ;
98606- atomic_set(&net->ipv6.rt_genid, 0);
98607+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
98608
98609 err = ipv6_init_mibs(net);
98610 if (err)
98611diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
98612index 93b1aa3..e902855 100644
98613--- a/net/ipv6/datagram.c
98614+++ b/net/ipv6/datagram.c
98615@@ -906,5 +906,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
98616 0,
98617 sock_i_ino(sp),
98618 atomic_read(&sp->sk_refcnt), sp,
98619- atomic_read(&sp->sk_drops));
98620+ atomic_read_unchecked(&sp->sk_drops));
98621 }
98622diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
98623index eef8d94..cfa1852 100644
98624--- a/net/ipv6/icmp.c
98625+++ b/net/ipv6/icmp.c
98626@@ -997,7 +997,7 @@ struct ctl_table ipv6_icmp_table_template[] = {
98627
98628 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
98629 {
98630- struct ctl_table *table;
98631+ ctl_table_no_const *table;
98632
98633 table = kmemdup(ipv6_icmp_table_template,
98634 sizeof(ipv6_icmp_table_template),
98635diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
98636index 8acb286..840dd06 100644
98637--- a/net/ipv6/ip6_gre.c
98638+++ b/net/ipv6/ip6_gre.c
98639@@ -74,7 +74,7 @@ struct ip6gre_net {
98640 struct net_device *fb_tunnel_dev;
98641 };
98642
98643-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
98644+static struct rtnl_link_ops ip6gre_link_ops;
98645 static int ip6gre_tunnel_init(struct net_device *dev);
98646 static void ip6gre_tunnel_setup(struct net_device *dev);
98647 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
98648@@ -1294,7 +1294,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
98649 }
98650
98651
98652-static struct inet6_protocol ip6gre_protocol __read_mostly = {
98653+static struct inet6_protocol ip6gre_protocol = {
98654 .handler = ip6gre_rcv,
98655 .err_handler = ip6gre_err,
98656 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
98657@@ -1637,7 +1637,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
98658 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
98659 };
98660
98661-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
98662+static struct rtnl_link_ops ip6gre_link_ops = {
98663 .kind = "ip6gre",
98664 .maxtype = IFLA_GRE_MAX,
98665 .policy = ip6gre_policy,
98666@@ -1650,7 +1650,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
98667 .fill_info = ip6gre_fill_info,
98668 };
98669
98670-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
98671+static struct rtnl_link_ops ip6gre_tap_ops = {
98672 .kind = "ip6gretap",
98673 .maxtype = IFLA_GRE_MAX,
98674 .policy = ip6gre_policy,
98675diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
98676index 7881965..9cf62c4 100644
98677--- a/net/ipv6/ip6_tunnel.c
98678+++ b/net/ipv6/ip6_tunnel.c
98679@@ -89,7 +89,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
98680
98681 static int ip6_tnl_dev_init(struct net_device *dev);
98682 static void ip6_tnl_dev_setup(struct net_device *dev);
98683-static struct rtnl_link_ops ip6_link_ops __read_mostly;
98684+static struct rtnl_link_ops ip6_link_ops;
98685
98686 static int ip6_tnl_net_id __read_mostly;
98687 struct ip6_tnl_net {
98688@@ -1717,7 +1717,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
98689 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
98690 };
98691
98692-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
98693+static struct rtnl_link_ops ip6_link_ops = {
98694 .kind = "ip6tnl",
98695 .maxtype = IFLA_IPTUN_MAX,
98696 .policy = ip6_tnl_policy,
98697diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
98698index 7b42d5e..1eff693 100644
98699--- a/net/ipv6/ip6_vti.c
98700+++ b/net/ipv6/ip6_vti.c
98701@@ -63,7 +63,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
98702
98703 static int vti6_dev_init(struct net_device *dev);
98704 static void vti6_dev_setup(struct net_device *dev);
98705-static struct rtnl_link_ops vti6_link_ops __read_mostly;
98706+static struct rtnl_link_ops vti6_link_ops;
98707
98708 static int vti6_net_id __read_mostly;
98709 struct vti6_net {
98710@@ -902,7 +902,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
98711 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
98712 };
98713
98714-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
98715+static struct rtnl_link_ops vti6_link_ops = {
98716 .kind = "vti6",
98717 .maxtype = IFLA_VTI_MAX,
98718 .policy = vti6_policy,
98719diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
98720index 1c6ce31..299e566 100644
98721--- a/net/ipv6/ipv6_sockglue.c
98722+++ b/net/ipv6/ipv6_sockglue.c
98723@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
98724 if (sk->sk_type != SOCK_STREAM)
98725 return -ENOPROTOOPT;
98726
98727- msg.msg_control = optval;
98728+ msg.msg_control = (void __force_kernel *)optval;
98729 msg.msg_controllen = len;
98730 msg.msg_flags = flags;
98731
98732diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
98733index 710238f..0fd1816 100644
98734--- a/net/ipv6/netfilter/ip6_tables.c
98735+++ b/net/ipv6/netfilter/ip6_tables.c
98736@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
98737 #endif
98738
98739 static int get_info(struct net *net, void __user *user,
98740- const int *len, int compat)
98741+ int len, int compat)
98742 {
98743 char name[XT_TABLE_MAXNAMELEN];
98744 struct xt_table *t;
98745 int ret;
98746
98747- if (*len != sizeof(struct ip6t_getinfo)) {
98748- duprintf("length %u != %zu\n", *len,
98749+ if (len != sizeof(struct ip6t_getinfo)) {
98750+ duprintf("length %u != %zu\n", len,
98751 sizeof(struct ip6t_getinfo));
98752 return -EINVAL;
98753 }
98754@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
98755 info.size = private->size;
98756 strcpy(info.name, name);
98757
98758- if (copy_to_user(user, &info, *len) != 0)
98759+ if (copy_to_user(user, &info, len) != 0)
98760 ret = -EFAULT;
98761 else
98762 ret = 0;
98763@@ -1981,7 +1981,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
98764
98765 switch (cmd) {
98766 case IP6T_SO_GET_INFO:
98767- ret = get_info(sock_net(sk), user, len, 1);
98768+ ret = get_info(sock_net(sk), user, *len, 1);
98769 break;
98770 case IP6T_SO_GET_ENTRIES:
98771 ret = compat_get_entries(sock_net(sk), user, len);
98772@@ -2028,7 +2028,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
98773
98774 switch (cmd) {
98775 case IP6T_SO_GET_INFO:
98776- ret = get_info(sock_net(sk), user, len, 0);
98777+ ret = get_info(sock_net(sk), user, *len, 0);
98778 break;
98779
98780 case IP6T_SO_GET_ENTRIES:
98781diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
98782index 767ab8d..c5ec70a 100644
98783--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
98784+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
98785@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
98786
98787 static int nf_ct_frag6_sysctl_register(struct net *net)
98788 {
98789- struct ctl_table *table;
98790+ ctl_table_no_const *table = NULL;
98791 struct ctl_table_header *hdr;
98792
98793- table = nf_ct_frag6_sysctl_table;
98794 if (!net_eq(net, &init_net)) {
98795- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
98796+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
98797 GFP_KERNEL);
98798 if (table == NULL)
98799 goto err_alloc;
98800@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
98801 table[0].data = &net->nf_frag.frags.timeout;
98802 table[1].data = &net->nf_frag.frags.low_thresh;
98803 table[2].data = &net->nf_frag.frags.high_thresh;
98804- }
98805-
98806- hdr = register_net_sysctl(net, "net/netfilter", table);
98807+ hdr = register_net_sysctl(net, "net/netfilter", table);
98808+ } else
98809+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
98810 if (hdr == NULL)
98811 goto err_reg;
98812
98813@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
98814 return 0;
98815
98816 err_reg:
98817- if (!net_eq(net, &init_net))
98818- kfree(table);
98819+ kfree(table);
98820 err_alloc:
98821 return -ENOMEM;
98822 }
98823diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
98824index 827f795..7e28e82 100644
98825--- a/net/ipv6/output_core.c
98826+++ b/net/ipv6/output_core.c
98827@@ -9,8 +9,8 @@
98828
98829 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
98830 {
98831- static atomic_t ipv6_fragmentation_id;
98832- int old, new;
98833+ static atomic_unchecked_t ipv6_fragmentation_id;
98834+ int id;
98835
98836 #if IS_ENABLED(CONFIG_IPV6)
98837 if (rt && !(rt->dst.flags & DST_NOPEER)) {
98838@@ -26,13 +26,10 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
98839 }
98840 }
98841 #endif
98842- do {
98843- old = atomic_read(&ipv6_fragmentation_id);
98844- new = old + 1;
98845- if (!new)
98846- new = 1;
98847- } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
98848- fhdr->identification = htonl(new);
98849+ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
98850+ if (!id)
98851+ id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
98852+ fhdr->identification = htonl(id);
98853 }
98854 EXPORT_SYMBOL(ipv6_select_ident);
98855
98856diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
98857index a83243c..a1ca589 100644
98858--- a/net/ipv6/ping.c
98859+++ b/net/ipv6/ping.c
98860@@ -246,6 +246,22 @@ static struct pernet_operations ping_v6_net_ops = {
98861 };
98862 #endif
98863
98864+static struct pingv6_ops real_pingv6_ops = {
98865+ .ipv6_recv_error = ipv6_recv_error,
98866+ .ip6_datagram_recv_ctl = ip6_datagram_recv_ctl,
98867+ .icmpv6_err_convert = icmpv6_err_convert,
98868+ .ipv6_icmp_error = ipv6_icmp_error,
98869+ .ipv6_chk_addr = ipv6_chk_addr,
98870+};
98871+
98872+static struct pingv6_ops dummy_pingv6_ops = {
98873+ .ipv6_recv_error = dummy_ipv6_recv_error,
98874+ .ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl,
98875+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
98876+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
98877+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
98878+};
98879+
98880 int __init pingv6_init(void)
98881 {
98882 #ifdef CONFIG_PROC_FS
98883@@ -253,11 +269,7 @@ int __init pingv6_init(void)
98884 if (ret)
98885 return ret;
98886 #endif
98887- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
98888- pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl;
98889- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
98890- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
98891- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
98892+ pingv6_ops = &real_pingv6_ops;
98893 return inet6_register_protosw(&pingv6_protosw);
98894 }
98895
98896@@ -266,11 +278,7 @@ int __init pingv6_init(void)
98897 */
98898 void pingv6_exit(void)
98899 {
98900- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
98901- pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl;
98902- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
98903- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
98904- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
98905+ pingv6_ops = &dummy_pingv6_ops;
98906 #ifdef CONFIG_PROC_FS
98907 unregister_pernet_subsys(&ping_v6_net_ops);
98908 #endif
98909diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
98910index b6bb87e..06cc9ed 100644
98911--- a/net/ipv6/raw.c
98912+++ b/net/ipv6/raw.c
98913@@ -384,7 +384,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
98914 {
98915 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
98916 skb_checksum_complete(skb)) {
98917- atomic_inc(&sk->sk_drops);
98918+ atomic_inc_unchecked(&sk->sk_drops);
98919 kfree_skb(skb);
98920 return NET_RX_DROP;
98921 }
98922@@ -412,7 +412,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
98923 struct raw6_sock *rp = raw6_sk(sk);
98924
98925 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
98926- atomic_inc(&sk->sk_drops);
98927+ atomic_inc_unchecked(&sk->sk_drops);
98928 kfree_skb(skb);
98929 return NET_RX_DROP;
98930 }
98931@@ -436,7 +436,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
98932
98933 if (inet->hdrincl) {
98934 if (skb_checksum_complete(skb)) {
98935- atomic_inc(&sk->sk_drops);
98936+ atomic_inc_unchecked(&sk->sk_drops);
98937 kfree_skb(skb);
98938 return NET_RX_DROP;
98939 }
98940@@ -465,6 +465,9 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
98941 if (flags & MSG_OOB)
98942 return -EOPNOTSUPP;
98943
98944+ if (addr_len)
98945+ *addr_len=sizeof(*sin6);
98946+
98947 if (flags & MSG_ERRQUEUE)
98948 return ipv6_recv_error(sk, msg, len, addr_len);
98949
98950@@ -503,7 +506,6 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
98951 sin6->sin6_flowinfo = 0;
98952 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
98953 IP6CB(skb)->iif);
98954- *addr_len = sizeof(*sin6);
98955 }
98956
98957 sock_recv_ts_and_drops(msg, sk, skb);
98958@@ -606,7 +608,7 @@ out:
98959 return err;
98960 }
98961
98962-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
98963+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
98964 struct flowi6 *fl6, struct dst_entry **dstp,
98965 unsigned int flags)
98966 {
98967@@ -918,12 +920,15 @@ do_confirm:
98968 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
98969 char __user *optval, int optlen)
98970 {
98971+ struct icmp6_filter filter;
98972+
98973 switch (optname) {
98974 case ICMPV6_FILTER:
98975 if (optlen > sizeof(struct icmp6_filter))
98976 optlen = sizeof(struct icmp6_filter);
98977- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
98978+ if (copy_from_user(&filter, optval, optlen))
98979 return -EFAULT;
98980+ raw6_sk(sk)->filter = filter;
98981 return 0;
98982 default:
98983 return -ENOPROTOOPT;
98984@@ -936,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
98985 char __user *optval, int __user *optlen)
98986 {
98987 int len;
98988+ struct icmp6_filter filter;
98989
98990 switch (optname) {
98991 case ICMPV6_FILTER:
98992@@ -947,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
98993 len = sizeof(struct icmp6_filter);
98994 if (put_user(len, optlen))
98995 return -EFAULT;
98996- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
98997+ filter = raw6_sk(sk)->filter;
98998+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
98999 return -EFAULT;
99000 return 0;
99001 default:
99002diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
99003index cc85a9b..526a133 100644
99004--- a/net/ipv6/reassembly.c
99005+++ b/net/ipv6/reassembly.c
99006@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
99007
99008 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
99009 {
99010- struct ctl_table *table;
99011+ ctl_table_no_const *table = NULL;
99012 struct ctl_table_header *hdr;
99013
99014- table = ip6_frags_ns_ctl_table;
99015 if (!net_eq(net, &init_net)) {
99016- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
99017+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
99018 if (table == NULL)
99019 goto err_alloc;
99020
99021@@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
99022 /* Don't export sysctls to unprivileged users */
99023 if (net->user_ns != &init_user_ns)
99024 table[0].procname = NULL;
99025- }
99026+ hdr = register_net_sysctl(net, "net/ipv6", table);
99027+ } else
99028+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
99029
99030- hdr = register_net_sysctl(net, "net/ipv6", table);
99031 if (hdr == NULL)
99032 goto err_reg;
99033
99034@@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
99035 return 0;
99036
99037 err_reg:
99038- if (!net_eq(net, &init_net))
99039- kfree(table);
99040+ kfree(table);
99041 err_alloc:
99042 return -ENOMEM;
99043 }
99044diff --git a/net/ipv6/route.c b/net/ipv6/route.c
99045index 4b4944c..d346b14 100644
99046--- a/net/ipv6/route.c
99047+++ b/net/ipv6/route.c
99048@@ -1495,7 +1495,7 @@ int ip6_route_add(struct fib6_config *cfg)
99049 if (!table)
99050 goto out;
99051
99052- rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
99053+ rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
99054
99055 if (!rt) {
99056 err = -ENOMEM;
99057@@ -2954,7 +2954,7 @@ struct ctl_table ipv6_route_table_template[] = {
99058
99059 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
99060 {
99061- struct ctl_table *table;
99062+ ctl_table_no_const *table;
99063
99064 table = kmemdup(ipv6_route_table_template,
99065 sizeof(ipv6_route_table_template),
99066diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
99067index d3005b3..b36df4a 100644
99068--- a/net/ipv6/sit.c
99069+++ b/net/ipv6/sit.c
99070@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
99071 static void ipip6_dev_free(struct net_device *dev);
99072 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
99073 __be32 *v4dst);
99074-static struct rtnl_link_ops sit_link_ops __read_mostly;
99075+static struct rtnl_link_ops sit_link_ops;
99076
99077 static int sit_net_id __read_mostly;
99078 struct sit_net {
99079@@ -1664,7 +1664,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
99080 unregister_netdevice_queue(dev, head);
99081 }
99082
99083-static struct rtnl_link_ops sit_link_ops __read_mostly = {
99084+static struct rtnl_link_ops sit_link_ops = {
99085 .kind = "sit",
99086 .maxtype = IFLA_IPTUN_MAX,
99087 .policy = ipip6_policy,
99088diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
99089index 107b2f1..72741a9 100644
99090--- a/net/ipv6/sysctl_net_ipv6.c
99091+++ b/net/ipv6/sysctl_net_ipv6.c
99092@@ -40,7 +40,7 @@ static struct ctl_table ipv6_rotable[] = {
99093
99094 static int __net_init ipv6_sysctl_net_init(struct net *net)
99095 {
99096- struct ctl_table *ipv6_table;
99097+ ctl_table_no_const *ipv6_table;
99098 struct ctl_table *ipv6_route_table;
99099 struct ctl_table *ipv6_icmp_table;
99100 int err;
99101diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
99102index f67033b..6f974fc 100644
99103--- a/net/ipv6/tcp_ipv6.c
99104+++ b/net/ipv6/tcp_ipv6.c
99105@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
99106 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
99107 }
99108
99109+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99110+extern int grsec_enable_blackhole;
99111+#endif
99112+
99113 static void tcp_v6_hash(struct sock *sk)
99114 {
99115 if (sk->sk_state != TCP_CLOSE) {
99116@@ -1397,6 +1401,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
99117 return 0;
99118
99119 reset:
99120+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99121+ if (!grsec_enable_blackhole)
99122+#endif
99123 tcp_v6_send_reset(sk, skb);
99124 discard:
99125 if (opt_skb)
99126@@ -1479,12 +1486,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
99127 TCP_SKB_CB(skb)->sacked = 0;
99128
99129 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
99130- if (!sk)
99131+ if (!sk) {
99132+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99133+ ret = 1;
99134+#endif
99135 goto no_tcp_socket;
99136+ }
99137
99138 process:
99139- if (sk->sk_state == TCP_TIME_WAIT)
99140+ if (sk->sk_state == TCP_TIME_WAIT) {
99141+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99142+ ret = 2;
99143+#endif
99144 goto do_time_wait;
99145+ }
99146
99147 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
99148 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
99149@@ -1536,6 +1551,10 @@ csum_error:
99150 bad_packet:
99151 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
99152 } else {
99153+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99154+ if (!grsec_enable_blackhole || (ret == 1 &&
99155+ (skb->dev->flags & IFF_LOOPBACK)))
99156+#endif
99157 tcp_v6_send_reset(NULL, skb);
99158 }
99159
99160diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
99161index 089c741..cfee117 100644
99162--- a/net/ipv6/udp.c
99163+++ b/net/ipv6/udp.c
99164@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
99165 udp_ipv6_hash_secret + net_hash_mix(net));
99166 }
99167
99168+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99169+extern int grsec_enable_blackhole;
99170+#endif
99171+
99172 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
99173 {
99174 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
99175@@ -392,6 +396,9 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
99176 int is_udp4;
99177 bool slow;
99178
99179+ if (addr_len)
99180+ *addr_len = sizeof(struct sockaddr_in6);
99181+
99182 if (flags & MSG_ERRQUEUE)
99183 return ipv6_recv_error(sk, msg, len, addr_len);
99184
99185@@ -435,7 +442,7 @@ try_again:
99186 if (unlikely(err)) {
99187 trace_kfree_skb(skb, udpv6_recvmsg);
99188 if (!peeked) {
99189- atomic_inc(&sk->sk_drops);
99190+ atomic_inc_unchecked(&sk->sk_drops);
99191 if (is_udp4)
99192 UDP_INC_STATS_USER(sock_net(sk),
99193 UDP_MIB_INERRORS,
99194@@ -477,7 +484,7 @@ try_again:
99195 ipv6_iface_scope_id(&sin6->sin6_addr,
99196 IP6CB(skb)->iif);
99197 }
99198- *addr_len = sizeof(*sin6);
99199+
99200 }
99201 if (is_udp4) {
99202 if (inet->cmsg_flags)
99203@@ -685,7 +692,7 @@ csum_error:
99204 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
99205 drop:
99206 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
99207- atomic_inc(&sk->sk_drops);
99208+ atomic_inc_unchecked(&sk->sk_drops);
99209 kfree_skb(skb);
99210 return -1;
99211 }
99212@@ -742,7 +749,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
99213 if (likely(skb1 == NULL))
99214 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
99215 if (!skb1) {
99216- atomic_inc(&sk->sk_drops);
99217+ atomic_inc_unchecked(&sk->sk_drops);
99218 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
99219 IS_UDPLITE(sk));
99220 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
99221@@ -881,6 +888,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
99222 goto csum_error;
99223
99224 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
99225+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
99226+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
99227+#endif
99228 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
99229
99230 kfree_skb(skb);
99231diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
99232index 5f8e128..865d38e 100644
99233--- a/net/ipv6/xfrm6_policy.c
99234+++ b/net/ipv6/xfrm6_policy.c
99235@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
99236 }
99237 }
99238
99239-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
99240+static int xfrm6_garbage_collect(struct dst_ops *ops)
99241 {
99242 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
99243
99244- xfrm6_policy_afinfo.garbage_collect(net);
99245+ xfrm_garbage_collect_deferred(net);
99246 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
99247 }
99248
99249@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
99250
99251 static int __net_init xfrm6_net_init(struct net *net)
99252 {
99253- struct ctl_table *table;
99254+ ctl_table_no_const *table = NULL;
99255 struct ctl_table_header *hdr;
99256
99257- table = xfrm6_policy_table;
99258 if (!net_eq(net, &init_net)) {
99259- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
99260+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
99261 if (!table)
99262 goto err_alloc;
99263
99264 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
99265- }
99266+ hdr = register_net_sysctl(net, "net/ipv6", table);
99267+ } else
99268+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
99269
99270- hdr = register_net_sysctl(net, "net/ipv6", table);
99271 if (!hdr)
99272 goto err_reg;
99273
99274@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
99275 return 0;
99276
99277 err_reg:
99278- if (!net_eq(net, &init_net))
99279- kfree(table);
99280+ kfree(table);
99281 err_alloc:
99282 return -ENOMEM;
99283 }
99284diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
99285index 41ac7938..75e3bb1 100644
99286--- a/net/irda/ircomm/ircomm_tty.c
99287+++ b/net/irda/ircomm/ircomm_tty.c
99288@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
99289 add_wait_queue(&port->open_wait, &wait);
99290
99291 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
99292- __FILE__, __LINE__, tty->driver->name, port->count);
99293+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
99294
99295 spin_lock_irqsave(&port->lock, flags);
99296 if (!tty_hung_up_p(filp))
99297- port->count--;
99298+ atomic_dec(&port->count);
99299 port->blocked_open++;
99300 spin_unlock_irqrestore(&port->lock, flags);
99301
99302@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
99303 }
99304
99305 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
99306- __FILE__, __LINE__, tty->driver->name, port->count);
99307+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
99308
99309 schedule();
99310 }
99311@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
99312
99313 spin_lock_irqsave(&port->lock, flags);
99314 if (!tty_hung_up_p(filp))
99315- port->count++;
99316+ atomic_inc(&port->count);
99317 port->blocked_open--;
99318 spin_unlock_irqrestore(&port->lock, flags);
99319
99320 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
99321- __FILE__, __LINE__, tty->driver->name, port->count);
99322+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
99323
99324 if (!retval)
99325 port->flags |= ASYNC_NORMAL_ACTIVE;
99326@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
99327
99328 /* ++ is not atomic, so this should be protected - Jean II */
99329 spin_lock_irqsave(&self->port.lock, flags);
99330- self->port.count++;
99331+ atomic_inc(&self->port.count);
99332 spin_unlock_irqrestore(&self->port.lock, flags);
99333 tty_port_tty_set(&self->port, tty);
99334
99335 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
99336- self->line, self->port.count);
99337+ self->line, atomic_read(&self->port.count));
99338
99339 /* Not really used by us, but lets do it anyway */
99340 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
99341@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
99342 tty_kref_put(port->tty);
99343 }
99344 port->tty = NULL;
99345- port->count = 0;
99346+ atomic_set(&port->count, 0);
99347 spin_unlock_irqrestore(&port->lock, flags);
99348
99349 wake_up_interruptible(&port->open_wait);
99350@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
99351 seq_putc(m, '\n');
99352
99353 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
99354- seq_printf(m, "Open count: %d\n", self->port.count);
99355+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
99356 seq_printf(m, "Max data size: %d\n", self->max_data_size);
99357 seq_printf(m, "Max header size: %d\n", self->max_header_size);
99358
99359diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
99360index c4b7218..3e83259 100644
99361--- a/net/iucv/af_iucv.c
99362+++ b/net/iucv/af_iucv.c
99363@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
99364
99365 write_lock_bh(&iucv_sk_list.lock);
99366
99367- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
99368+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
99369 while (__iucv_get_sock_by_name(name)) {
99370 sprintf(name, "%08x",
99371- atomic_inc_return(&iucv_sk_list.autobind_name));
99372+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
99373 }
99374
99375 write_unlock_bh(&iucv_sk_list.lock);
99376diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
99377index cd5b8ec..f205e6b 100644
99378--- a/net/iucv/iucv.c
99379+++ b/net/iucv/iucv.c
99380@@ -690,7 +690,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
99381 return NOTIFY_OK;
99382 }
99383
99384-static struct notifier_block __refdata iucv_cpu_notifier = {
99385+static struct notifier_block iucv_cpu_notifier = {
99386 .notifier_call = iucv_cpu_notify,
99387 };
99388
99389diff --git a/net/key/af_key.c b/net/key/af_key.c
99390index 545f047..9757a9d 100644
99391--- a/net/key/af_key.c
99392+++ b/net/key/af_key.c
99393@@ -3041,10 +3041,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
99394 static u32 get_acqseq(void)
99395 {
99396 u32 res;
99397- static atomic_t acqseq;
99398+ static atomic_unchecked_t acqseq;
99399
99400 do {
99401- res = atomic_inc_return(&acqseq);
99402+ res = atomic_inc_return_unchecked(&acqseq);
99403 } while (!res);
99404 return res;
99405 }
99406diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
99407index da1a1ce..571db8d 100644
99408--- a/net/l2tp/l2tp_ip.c
99409+++ b/net/l2tp/l2tp_ip.c
99410@@ -518,6 +518,9 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
99411 if (flags & MSG_OOB)
99412 goto out;
99413
99414+ if (addr_len)
99415+ *addr_len = sizeof(*sin);
99416+
99417 skb = skb_recv_datagram(sk, flags, noblock, &err);
99418 if (!skb)
99419 goto out;
99420@@ -540,7 +543,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
99421 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
99422 sin->sin_port = 0;
99423 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
99424- *addr_len = sizeof(*sin);
99425 }
99426 if (inet->cmsg_flags)
99427 ip_cmsg_recv(msg, skb);
99428diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
99429index b4b61b2..ac84a257 100644
99430--- a/net/mac80211/cfg.c
99431+++ b/net/mac80211/cfg.c
99432@@ -826,7 +826,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
99433 ret = ieee80211_vif_use_channel(sdata, chandef,
99434 IEEE80211_CHANCTX_EXCLUSIVE);
99435 }
99436- } else if (local->open_count == local->monitors) {
99437+ } else if (local_read(&local->open_count) == local->monitors) {
99438 local->_oper_chandef = *chandef;
99439 ieee80211_hw_config(local, 0);
99440 }
99441@@ -3311,7 +3311,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
99442 else
99443 local->probe_req_reg--;
99444
99445- if (!local->open_count)
99446+ if (!local_read(&local->open_count))
99447 break;
99448
99449 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
99450@@ -3774,8 +3774,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
99451 if (chanctx_conf) {
99452 *chandef = chanctx_conf->def;
99453 ret = 0;
99454- } else if (local->open_count > 0 &&
99455- local->open_count == local->monitors &&
99456+ } else if (local_read(&local->open_count) > 0 &&
99457+ local_read(&local->open_count) == local->monitors &&
99458 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
99459 if (local->use_chanctx)
99460 *chandef = local->monitor_chandef;
99461diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
99462index 4aea4e7..9e698d1 100644
99463--- a/net/mac80211/ieee80211_i.h
99464+++ b/net/mac80211/ieee80211_i.h
99465@@ -28,6 +28,7 @@
99466 #include <net/ieee80211_radiotap.h>
99467 #include <net/cfg80211.h>
99468 #include <net/mac80211.h>
99469+#include <asm/local.h>
99470 #include "key.h"
99471 #include "sta_info.h"
99472 #include "debug.h"
99473@@ -961,7 +962,7 @@ struct ieee80211_local {
99474 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
99475 spinlock_t queue_stop_reason_lock;
99476
99477- int open_count;
99478+ local_t open_count;
99479 int monitors, cooked_mntrs;
99480 /* number of interfaces with corresponding FIF_ flags */
99481 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
99482diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
99483index a075791..1d0027f 100644
99484--- a/net/mac80211/iface.c
99485+++ b/net/mac80211/iface.c
99486@@ -519,7 +519,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
99487 break;
99488 }
99489
99490- if (local->open_count == 0) {
99491+ if (local_read(&local->open_count) == 0) {
99492 res = drv_start(local);
99493 if (res)
99494 goto err_del_bss;
99495@@ -566,7 +566,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
99496 res = drv_add_interface(local, sdata);
99497 if (res)
99498 goto err_stop;
99499- } else if (local->monitors == 0 && local->open_count == 0) {
99500+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
99501 res = ieee80211_add_virtual_monitor(local);
99502 if (res)
99503 goto err_stop;
99504@@ -675,7 +675,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
99505 atomic_inc(&local->iff_promiscs);
99506
99507 if (coming_up)
99508- local->open_count++;
99509+ local_inc(&local->open_count);
99510
99511 if (hw_reconf_flags)
99512 ieee80211_hw_config(local, hw_reconf_flags);
99513@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
99514 err_del_interface:
99515 drv_remove_interface(local, sdata);
99516 err_stop:
99517- if (!local->open_count)
99518+ if (!local_read(&local->open_count))
99519 drv_stop(local);
99520 err_del_bss:
99521 sdata->bss = NULL;
99522@@ -856,7 +856,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
99523 }
99524
99525 if (going_down)
99526- local->open_count--;
99527+ local_dec(&local->open_count);
99528
99529 switch (sdata->vif.type) {
99530 case NL80211_IFTYPE_AP_VLAN:
99531@@ -923,7 +923,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
99532 }
99533 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
99534
99535- if (local->open_count == 0)
99536+ if (local_read(&local->open_count) == 0)
99537 ieee80211_clear_tx_pending(local);
99538
99539 /*
99540@@ -963,7 +963,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
99541
99542 ieee80211_recalc_ps(local, -1);
99543
99544- if (local->open_count == 0) {
99545+ if (local_read(&local->open_count) == 0) {
99546 ieee80211_stop_device(local);
99547
99548 /* no reconfiguring after stop! */
99549@@ -974,7 +974,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
99550 ieee80211_configure_filter(local);
99551 ieee80211_hw_config(local, hw_reconf_flags);
99552
99553- if (local->monitors == local->open_count)
99554+ if (local->monitors == local_read(&local->open_count))
99555 ieee80211_add_virtual_monitor(local);
99556 }
99557
99558diff --git a/net/mac80211/main.c b/net/mac80211/main.c
99559index 7d1c3ac..b62dd29 100644
99560--- a/net/mac80211/main.c
99561+++ b/net/mac80211/main.c
99562@@ -172,7 +172,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
99563 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
99564 IEEE80211_CONF_CHANGE_POWER);
99565
99566- if (changed && local->open_count) {
99567+ if (changed && local_read(&local->open_count)) {
99568 ret = drv_config(local, changed);
99569 /*
99570 * Goal:
99571diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
99572index 3401262..d5cd68d 100644
99573--- a/net/mac80211/pm.c
99574+++ b/net/mac80211/pm.c
99575@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
99576 struct ieee80211_sub_if_data *sdata;
99577 struct sta_info *sta;
99578
99579- if (!local->open_count)
99580+ if (!local_read(&local->open_count))
99581 goto suspend;
99582
99583 ieee80211_scan_cancel(local);
99584@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
99585 cancel_work_sync(&local->dynamic_ps_enable_work);
99586 del_timer_sync(&local->dynamic_ps_timer);
99587
99588- local->wowlan = wowlan && local->open_count;
99589+ local->wowlan = wowlan && local_read(&local->open_count);
99590 if (local->wowlan) {
99591 int err = drv_suspend(local, wowlan);
99592 if (err < 0) {
99593@@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
99594 WARN_ON(!list_empty(&local->chanctx_list));
99595
99596 /* stop hardware - this must stop RX */
99597- if (local->open_count)
99598+ if (local_read(&local->open_count))
99599 ieee80211_stop_device(local);
99600
99601 suspend:
99602diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
99603index 22b223f..ab70070 100644
99604--- a/net/mac80211/rate.c
99605+++ b/net/mac80211/rate.c
99606@@ -734,7 +734,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
99607
99608 ASSERT_RTNL();
99609
99610- if (local->open_count)
99611+ if (local_read(&local->open_count))
99612 return -EBUSY;
99613
99614 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
99615diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
99616index 6ff1346..936ca9a 100644
99617--- a/net/mac80211/rc80211_pid_debugfs.c
99618+++ b/net/mac80211/rc80211_pid_debugfs.c
99619@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
99620
99621 spin_unlock_irqrestore(&events->lock, status);
99622
99623- if (copy_to_user(buf, pb, p))
99624+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
99625 return -EFAULT;
99626
99627 return p;
99628diff --git a/net/mac80211/util.c b/net/mac80211/util.c
99629index 9f9b9bd..d6fcf59 100644
99630--- a/net/mac80211/util.c
99631+++ b/net/mac80211/util.c
99632@@ -1474,7 +1474,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
99633 }
99634 #endif
99635 /* everything else happens only if HW was up & running */
99636- if (!local->open_count)
99637+ if (!local_read(&local->open_count))
99638 goto wake_up;
99639
99640 /*
99641@@ -1699,7 +1699,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
99642 local->in_reconfig = false;
99643 barrier();
99644
99645- if (local->monitors == local->open_count && local->monitors > 0)
99646+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
99647 ieee80211_add_virtual_monitor(local);
99648
99649 /*
99650diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
99651index c3398cd..98ad3b4 100644
99652--- a/net/netfilter/Kconfig
99653+++ b/net/netfilter/Kconfig
99654@@ -1002,6 +1002,16 @@ config NETFILTER_XT_MATCH_ESP
99655
99656 To compile it as a module, choose M here. If unsure, say N.
99657
99658+config NETFILTER_XT_MATCH_GRADM
99659+ tristate '"gradm" match support'
99660+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
99661+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
99662+ ---help---
99663+ The gradm match allows to match on grsecurity RBAC being enabled.
99664+ It is useful when iptables rules are applied early on bootup to
99665+ prevent connections to the machine (except from a trusted host)
99666+ while the RBAC system is disabled.
99667+
99668 config NETFILTER_XT_MATCH_HASHLIMIT
99669 tristate '"hashlimit" match support'
99670 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
99671diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
99672index 394483b..ed51f2d 100644
99673--- a/net/netfilter/Makefile
99674+++ b/net/netfilter/Makefile
99675@@ -130,6 +130,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
99676 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
99677 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
99678 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
99679+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
99680 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
99681 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
99682 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
99683diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
99684index bac7e01..1d7a31a 100644
99685--- a/net/netfilter/ipset/ip_set_core.c
99686+++ b/net/netfilter/ipset/ip_set_core.c
99687@@ -1950,7 +1950,7 @@ done:
99688 return ret;
99689 }
99690
99691-static struct nf_sockopt_ops so_set __read_mostly = {
99692+static struct nf_sockopt_ops so_set = {
99693 .pf = PF_INET,
99694 .get_optmin = SO_IP_SET,
99695 .get_optmax = SO_IP_SET + 1,
99696diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
99697index 4c8e5c0..5a79b4d 100644
99698--- a/net/netfilter/ipvs/ip_vs_conn.c
99699+++ b/net/netfilter/ipvs/ip_vs_conn.c
99700@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
99701 /* Increase the refcnt counter of the dest */
99702 ip_vs_dest_hold(dest);
99703
99704- conn_flags = atomic_read(&dest->conn_flags);
99705+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
99706 if (cp->protocol != IPPROTO_UDP)
99707 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
99708 flags = cp->flags;
99709@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
99710
99711 cp->control = NULL;
99712 atomic_set(&cp->n_control, 0);
99713- atomic_set(&cp->in_pkts, 0);
99714+ atomic_set_unchecked(&cp->in_pkts, 0);
99715
99716 cp->packet_xmit = NULL;
99717 cp->app = NULL;
99718@@ -1188,7 +1188,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
99719
99720 /* Don't drop the entry if its number of incoming packets is not
99721 located in [0, 8] */
99722- i = atomic_read(&cp->in_pkts);
99723+ i = atomic_read_unchecked(&cp->in_pkts);
99724 if (i > 8 || i < 0) return 0;
99725
99726 if (!todrop_rate[i]) return 0;
99727diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
99728index 4f26ee4..6a9d7c3 100644
99729--- a/net/netfilter/ipvs/ip_vs_core.c
99730+++ b/net/netfilter/ipvs/ip_vs_core.c
99731@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
99732 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
99733 /* do not touch skb anymore */
99734
99735- atomic_inc(&cp->in_pkts);
99736+ atomic_inc_unchecked(&cp->in_pkts);
99737 ip_vs_conn_put(cp);
99738 return ret;
99739 }
99740@@ -1706,7 +1706,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
99741 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
99742 pkts = sysctl_sync_threshold(ipvs);
99743 else
99744- pkts = atomic_add_return(1, &cp->in_pkts);
99745+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
99746
99747 if (ipvs->sync_state & IP_VS_STATE_MASTER)
99748 ip_vs_sync_conn(net, cp, pkts);
99749diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
99750index 35be035..50f8834 100644
99751--- a/net/netfilter/ipvs/ip_vs_ctl.c
99752+++ b/net/netfilter/ipvs/ip_vs_ctl.c
99753@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
99754 */
99755 ip_vs_rs_hash(ipvs, dest);
99756 }
99757- atomic_set(&dest->conn_flags, conn_flags);
99758+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
99759
99760 /* bind the service */
99761 old_svc = rcu_dereference_protected(dest->svc, 1);
99762@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
99763 * align with netns init in ip_vs_control_net_init()
99764 */
99765
99766-static struct ctl_table vs_vars[] = {
99767+static ctl_table_no_const vs_vars[] __read_only = {
99768 {
99769 .procname = "amemthresh",
99770 .maxlen = sizeof(int),
99771@@ -2075,7 +2075,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
99772 " %-7s %-6d %-10d %-10d\n",
99773 &dest->addr.in6,
99774 ntohs(dest->port),
99775- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
99776+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
99777 atomic_read(&dest->weight),
99778 atomic_read(&dest->activeconns),
99779 atomic_read(&dest->inactconns));
99780@@ -2086,7 +2086,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
99781 "%-7s %-6d %-10d %-10d\n",
99782 ntohl(dest->addr.ip),
99783 ntohs(dest->port),
99784- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
99785+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
99786 atomic_read(&dest->weight),
99787 atomic_read(&dest->activeconns),
99788 atomic_read(&dest->inactconns));
99789@@ -2564,7 +2564,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
99790
99791 entry.addr = dest->addr.ip;
99792 entry.port = dest->port;
99793- entry.conn_flags = atomic_read(&dest->conn_flags);
99794+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
99795 entry.weight = atomic_read(&dest->weight);
99796 entry.u_threshold = dest->u_threshold;
99797 entry.l_threshold = dest->l_threshold;
99798@@ -3107,7 +3107,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
99799 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
99800 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
99801 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
99802- (atomic_read(&dest->conn_flags) &
99803+ (atomic_read_unchecked(&dest->conn_flags) &
99804 IP_VS_CONN_F_FWD_MASK)) ||
99805 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
99806 atomic_read(&dest->weight)) ||
99807@@ -3580,7 +3580,7 @@ out:
99808 }
99809
99810
99811-static const struct genl_ops ip_vs_genl_ops[] __read_mostly = {
99812+static const struct genl_ops ip_vs_genl_ops[] = {
99813 {
99814 .cmd = IPVS_CMD_NEW_SERVICE,
99815 .flags = GENL_ADMIN_PERM,
99816@@ -3697,7 +3697,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
99817 {
99818 int idx;
99819 struct netns_ipvs *ipvs = net_ipvs(net);
99820- struct ctl_table *tbl;
99821+ ctl_table_no_const *tbl;
99822
99823 atomic_set(&ipvs->dropentry, 0);
99824 spin_lock_init(&ipvs->dropentry_lock);
99825diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
99826index ca056a3..9cf01ef 100644
99827--- a/net/netfilter/ipvs/ip_vs_lblc.c
99828+++ b/net/netfilter/ipvs/ip_vs_lblc.c
99829@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
99830 * IPVS LBLC sysctl table
99831 */
99832 #ifdef CONFIG_SYSCTL
99833-static struct ctl_table vs_vars_table[] = {
99834+static ctl_table_no_const vs_vars_table[] __read_only = {
99835 {
99836 .procname = "lblc_expiration",
99837 .data = NULL,
99838diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
99839index 3f21a2f..a112e85 100644
99840--- a/net/netfilter/ipvs/ip_vs_lblcr.c
99841+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
99842@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
99843 * IPVS LBLCR sysctl table
99844 */
99845
99846-static struct ctl_table vs_vars_table[] = {
99847+static ctl_table_no_const vs_vars_table[] __read_only = {
99848 {
99849 .procname = "lblcr_expiration",
99850 .data = NULL,
99851diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
99852index f63c238..1b87f8a 100644
99853--- a/net/netfilter/ipvs/ip_vs_sync.c
99854+++ b/net/netfilter/ipvs/ip_vs_sync.c
99855@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
99856 cp = cp->control;
99857 if (cp) {
99858 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
99859- pkts = atomic_add_return(1, &cp->in_pkts);
99860+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
99861 else
99862 pkts = sysctl_sync_threshold(ipvs);
99863 ip_vs_sync_conn(net, cp->control, pkts);
99864@@ -771,7 +771,7 @@ control:
99865 if (!cp)
99866 return;
99867 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
99868- pkts = atomic_add_return(1, &cp->in_pkts);
99869+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
99870 else
99871 pkts = sysctl_sync_threshold(ipvs);
99872 goto sloop;
99873@@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
99874
99875 if (opt)
99876 memcpy(&cp->in_seq, opt, sizeof(*opt));
99877- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
99878+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
99879 cp->state = state;
99880 cp->old_state = cp->state;
99881 /*
99882diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
99883index c47444e..b0961c6 100644
99884--- a/net/netfilter/ipvs/ip_vs_xmit.c
99885+++ b/net/netfilter/ipvs/ip_vs_xmit.c
99886@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
99887 else
99888 rc = NF_ACCEPT;
99889 /* do not touch skb anymore */
99890- atomic_inc(&cp->in_pkts);
99891+ atomic_inc_unchecked(&cp->in_pkts);
99892 goto out;
99893 }
99894
99895@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
99896 else
99897 rc = NF_ACCEPT;
99898 /* do not touch skb anymore */
99899- atomic_inc(&cp->in_pkts);
99900+ atomic_inc_unchecked(&cp->in_pkts);
99901 goto out;
99902 }
99903
99904diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
99905index a4b5e2a..13b1de3 100644
99906--- a/net/netfilter/nf_conntrack_acct.c
99907+++ b/net/netfilter/nf_conntrack_acct.c
99908@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
99909 #ifdef CONFIG_SYSCTL
99910 static int nf_conntrack_acct_init_sysctl(struct net *net)
99911 {
99912- struct ctl_table *table;
99913+ ctl_table_no_const *table;
99914
99915 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
99916 GFP_KERNEL);
99917diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
99918index 43549eb..0bbeace 100644
99919--- a/net/netfilter/nf_conntrack_core.c
99920+++ b/net/netfilter/nf_conntrack_core.c
99921@@ -1605,6 +1605,10 @@ void nf_conntrack_init_end(void)
99922 #define DYING_NULLS_VAL ((1<<30)+1)
99923 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
99924
99925+#ifdef CONFIG_GRKERNSEC_HIDESYM
99926+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
99927+#endif
99928+
99929 int nf_conntrack_init_net(struct net *net)
99930 {
99931 int ret;
99932@@ -1619,7 +1623,11 @@ int nf_conntrack_init_net(struct net *net)
99933 goto err_stat;
99934 }
99935
99936+#ifdef CONFIG_GRKERNSEC_HIDESYM
99937+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
99938+#else
99939 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
99940+#endif
99941 if (!net->ct.slabname) {
99942 ret = -ENOMEM;
99943 goto err_slabname;
99944diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
99945index 1df1761..ce8b88a 100644
99946--- a/net/netfilter/nf_conntrack_ecache.c
99947+++ b/net/netfilter/nf_conntrack_ecache.c
99948@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
99949 #ifdef CONFIG_SYSCTL
99950 static int nf_conntrack_event_init_sysctl(struct net *net)
99951 {
99952- struct ctl_table *table;
99953+ ctl_table_no_const *table;
99954
99955 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
99956 GFP_KERNEL);
99957diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
99958index 974a2a4..52cc6ff 100644
99959--- a/net/netfilter/nf_conntrack_helper.c
99960+++ b/net/netfilter/nf_conntrack_helper.c
99961@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
99962
99963 static int nf_conntrack_helper_init_sysctl(struct net *net)
99964 {
99965- struct ctl_table *table;
99966+ ctl_table_no_const *table;
99967
99968 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
99969 GFP_KERNEL);
99970diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
99971index ce30041..3861b5d 100644
99972--- a/net/netfilter/nf_conntrack_proto.c
99973+++ b/net/netfilter/nf_conntrack_proto.c
99974@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
99975
99976 static void
99977 nf_ct_unregister_sysctl(struct ctl_table_header **header,
99978- struct ctl_table **table,
99979+ ctl_table_no_const **table,
99980 unsigned int users)
99981 {
99982 if (users > 0)
99983diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
99984index a99b6c3..cb372f9 100644
99985--- a/net/netfilter/nf_conntrack_proto_dccp.c
99986+++ b/net/netfilter/nf_conntrack_proto_dccp.c
99987@@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
99988 const char *msg;
99989 u_int8_t state;
99990
99991- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
99992+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
99993 BUG_ON(dh == NULL);
99994
99995 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
99996@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
99997 out_invalid:
99998 if (LOG_INVALID(net, IPPROTO_DCCP))
99999 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
100000- NULL, msg);
100001+ NULL, "%s", msg);
100002 return false;
100003 }
100004
100005@@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
100006 u_int8_t type, old_state, new_state;
100007 enum ct_dccp_roles role;
100008
100009- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
100010+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
100011 BUG_ON(dh == NULL);
100012 type = dh->dccph_type;
100013
100014@@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
100015 unsigned int cscov;
100016 const char *msg;
100017
100018- dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
100019+ dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
100020 if (dh == NULL) {
100021 msg = "nf_ct_dccp: short packet ";
100022 goto out_invalid;
100023@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
100024
100025 out_invalid:
100026 if (LOG_INVALID(net, IPPROTO_DCCP))
100027- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
100028+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
100029 return -NF_ACCEPT;
100030 }
100031
100032diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
100033index f641751..d3c5b51 100644
100034--- a/net/netfilter/nf_conntrack_standalone.c
100035+++ b/net/netfilter/nf_conntrack_standalone.c
100036@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
100037
100038 static int nf_conntrack_standalone_init_sysctl(struct net *net)
100039 {
100040- struct ctl_table *table;
100041+ ctl_table_no_const *table;
100042
100043 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
100044 GFP_KERNEL);
100045diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
100046index 7a394df..bd91a8a 100644
100047--- a/net/netfilter/nf_conntrack_timestamp.c
100048+++ b/net/netfilter/nf_conntrack_timestamp.c
100049@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
100050 #ifdef CONFIG_SYSCTL
100051 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
100052 {
100053- struct ctl_table *table;
100054+ ctl_table_no_const *table;
100055
100056 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
100057 GFP_KERNEL);
100058diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
100059index 85296d4..8becdec 100644
100060--- a/net/netfilter/nf_log.c
100061+++ b/net/netfilter/nf_log.c
100062@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
100063
100064 #ifdef CONFIG_SYSCTL
100065 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
100066-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
100067+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
100068
100069 static int nf_log_proc_dostring(struct ctl_table *table, int write,
100070 void __user *buffer, size_t *lenp, loff_t *ppos)
100071@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
100072 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
100073 mutex_unlock(&nf_log_mutex);
100074 } else {
100075+ ctl_table_no_const nf_log_table = *table;
100076+
100077 mutex_lock(&nf_log_mutex);
100078 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
100079 lockdep_is_held(&nf_log_mutex));
100080 if (!logger)
100081- table->data = "NONE";
100082+ nf_log_table.data = "NONE";
100083 else
100084- table->data = logger->name;
100085- r = proc_dostring(table, write, buffer, lenp, ppos);
100086+ nf_log_table.data = logger->name;
100087+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
100088 mutex_unlock(&nf_log_mutex);
100089 }
100090
100091diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
100092index f042ae5..30ea486 100644
100093--- a/net/netfilter/nf_sockopt.c
100094+++ b/net/netfilter/nf_sockopt.c
100095@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
100096 }
100097 }
100098
100099- list_add(&reg->list, &nf_sockopts);
100100+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
100101 out:
100102 mutex_unlock(&nf_sockopt_mutex);
100103 return ret;
100104@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
100105 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
100106 {
100107 mutex_lock(&nf_sockopt_mutex);
100108- list_del(&reg->list);
100109+ pax_list_del((struct list_head *)&reg->list);
100110 mutex_unlock(&nf_sockopt_mutex);
100111 }
100112 EXPORT_SYMBOL(nf_unregister_sockopt);
100113diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
100114index a155d19..726b0f2 100644
100115--- a/net/netfilter/nfnetlink_log.c
100116+++ b/net/netfilter/nfnetlink_log.c
100117@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
100118 struct nfnl_log_net {
100119 spinlock_t instances_lock;
100120 struct hlist_head instance_table[INSTANCE_BUCKETS];
100121- atomic_t global_seq;
100122+ atomic_unchecked_t global_seq;
100123 };
100124
100125 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
100126@@ -564,7 +564,7 @@ __build_packet_message(struct nfnl_log_net *log,
100127 /* global sequence number */
100128 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
100129 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
100130- htonl(atomic_inc_return(&log->global_seq))))
100131+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
100132 goto nla_put_failure;
100133
100134 if (data_len) {
100135diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
100136index da0c1f4..f79737a 100644
100137--- a/net/netfilter/nft_compat.c
100138+++ b/net/netfilter/nft_compat.c
100139@@ -216,7 +216,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
100140 /* We want to reuse existing compat_to_user */
100141 old_fs = get_fs();
100142 set_fs(KERNEL_DS);
100143- t->compat_to_user(out, in);
100144+ t->compat_to_user((void __force_user *)out, in);
100145 set_fs(old_fs);
100146 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
100147 kfree(out);
100148@@ -403,7 +403,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
100149 /* We want to reuse existing compat_to_user */
100150 old_fs = get_fs();
100151 set_fs(KERNEL_DS);
100152- m->compat_to_user(out, in);
100153+ m->compat_to_user((void __force_user *)out, in);
100154 set_fs(old_fs);
100155 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
100156 kfree(out);
100157diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
100158new file mode 100644
100159index 0000000..c566332
100160--- /dev/null
100161+++ b/net/netfilter/xt_gradm.c
100162@@ -0,0 +1,51 @@
100163+/*
100164+ * gradm match for netfilter
100165